language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
EpistasisLab__tpot
|
tpot/builtin_modules/arithmetictransformer.py
|
{
"start": 13452,
"end": 14082
}
|
class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that returns an array of zeros.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
return np.zeros((X.shape[0],1))
|
ZeroTransformer
|
python
|
django__django
|
tests/migrations/test_migrations_plan/0003_third.py
|
{
"start": 43,
"end": 449
}
|
class ____(migrations.Migration):
dependencies = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
],
),
migrations.RunSQL(
["SELECT * FROM migrations_author"], ["SELECT * FROM migrations_book"]
),
]
|
Migration
|
python
|
gevent__gevent
|
src/gevent/testing/util.py
|
{
"start": 15061,
"end": 17572
}
|
class ____(object):
"""
Something that uses the ``examples/`` directory
from the root of the gevent distribution.
The `cwd` property is set to the root of the gevent distribution.
"""
#: Arguments to pass to the example file.
example_args = []
before_delay = 3
after_delay = 0.5
#: Path of the example Python file, relative to `cwd`
example = None # subclasses define this to be the path to the server.py
#: Keyword arguments to pass to the start or run method.
start_kwargs = None
def find_setup_py(self):
"Return the directory containing setup.py"
return search_for_setup_py(
a_file=__file__,
a_class=type(self)
)
@property
def cwd(self):
try:
root = self.find_setup_py()
except NoSetupPyFound as e:
raise unittest.SkipTest("Unable to locate file/dir to run: %s" % (e,))
return os.path.join(root, 'examples')
@property
def setenv(self):
"""
Returns a dictionary of environment variables to set for the
child in addition to (or replacing) the ones already in the
environment.
Since the child is run in `cwd`, relative paths in ``PYTHONPATH``
need to be converted to absolute paths.
"""
abs_pythonpath = absolute_pythonpath()
return {'PYTHONPATH': abs_pythonpath} if abs_pythonpath else None
def _start(self, meth):
if getattr(self, 'args', None):
raise AssertionError("Invalid test", self, self.args)
if getattr(self, 'server', None):
raise AssertionError("Invalid test", self, self.server)
try:
# These could be or are properties that can raise
server = self.example
server_dir = self.cwd
except NoSetupPyFound as e:
raise unittest.SkipTest("Unable to locate file/dir to run: %s" % (e,))
kwargs = self.start_kwargs or {}
setenv = self.setenv
if setenv:
if 'setenv' in kwargs:
kwargs['setenv'].update(setenv)
else:
kwargs['setenv'] = setenv
return meth(
[sys.executable, '-W', 'ignore', '-u', server] + self.example_args,
cwd=server_dir,
**kwargs
)
def start_example(self):
return self._start(meth=start)
def run_example(self):# run() is a unittest method.
return self._start(meth=run)
|
ExampleMixin
|
python
|
fluentpython__example-code-2e
|
15-more-types/cafeteria/cafeteria.py
|
{
"start": 251,
"end": 436
}
|
class ____(Generic[T_co]):
def __init__(self, beverage: T_co) -> None:
self.beverage = beverage
def dispense(self) -> T_co:
return self.beverage
|
BeverageDispenser
|
python
|
apache__airflow
|
airflow-core/src/airflow/ti_deps/deps/ready_to_reschedule.py
|
{
"start": 1084,
"end": 3833
}
|
class ____(BaseTIDep):
"""Determines whether a task is ready to be rescheduled."""
NAME = "Ready To Reschedule"
IGNORABLE = True
IS_TASK_DEP = True
RESCHEDULEABLE_STATES = {TaskInstanceState.UP_FOR_RESCHEDULE, None}
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
"""
Determine whether a task is ready to be rescheduled.
Only tasks in NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is considered as passed.
This dependency fails if the latest reschedule request's reschedule date is still
in the future.
"""
if (
# Mapped sensors don't have the reschedule property (it can only be calculated after unmapping),
# so we don't check them here. They are handled below by checking TaskReschedule instead.
ti.map_index < 0 and not getattr(ti.task, "reschedule", False)
):
yield self._passing_status(reason="Task is not in reschedule mode.")
return
if dep_context.ignore_in_reschedule_period:
yield self._passing_status(
reason="The context specified that being in a reschedule period was permitted."
)
return
if ti.state not in self.RESCHEDULEABLE_STATES:
yield self._passing_status(
reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state."
)
return
next_reschedule_date = session.scalar(
TaskReschedule.stmt_for_task_instance(ti, descending=True)
.with_only_columns(TaskReschedule.reschedule_date)
.limit(1)
)
if not next_reschedule_date:
# Because mapped sensors don't have the reschedule property, here's the last resort
# and we need a slightly different passing reason
if ti.map_index >= 0:
yield self._passing_status(reason="The task is mapped and not in reschedule mode")
return
yield self._passing_status(reason="There is no reschedule request for this task instance.")
return
now = timezone.utcnow()
if now >= next_reschedule_date:
yield self._passing_status(reason="Task instance id ready for reschedule.")
return
yield self._failing_status(
reason=(
"Task is not ready for reschedule yet but will be rescheduled automatically. "
f"Current date is {now.isoformat()} and task will be "
f"rescheduled at {next_reschedule_date.isoformat()}."
)
)
|
ReadyToRescheduleDep
|
python
|
huggingface__transformers
|
src/transformers/models/perception_lm/modular_perception_lm.py
|
{
"start": 5768,
"end": 12779
}
|
class ____(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: PerceptionLMConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = PerceptionLMMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_tiles, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_tiles, num_patches, embed_dim)`).
"""
image_outputs = self.vision_tower(pixel_values.flatten(0, 1))
image_outputs = image_outputs.last_hidden_state
if self.config.vision_use_cls_token:
image_outputs = image_outputs[:, 1:, :]
image_features = self.multi_modal_projector(image_outputs)
return image_features
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: Optional[torch.FloatTensor] = None,
video_features: Optional[torch.FloatTensor] = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.size()[:-1].numel()}"
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(
f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.size()[:-1].numel()}"
)
return special_image_mask, special_video_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**lm_kwargs,
) -> Union[tuple, PerceptionLMModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None:
raise ValueError(
"You cannot specify both (pixel_values or pixel_values_videos) and inputs_embeds at the same time, and must specify either one"
)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
image_features = None
if pixel_values is not None:
image_features = self.get_image_features(pixel_values=pixel_values)
image_features = image_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
video_features = None
if pixel_values_videos is not None:
video_features = self.get_image_features(pixel_values=pixel_values_videos)
video_features = video_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype)
_, special_video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**lm_kwargs,
)
return PerceptionLMModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
past_key_values=outputs.past_key_values,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
video_hidden_states=(video_features if pixel_values_videos is not None else None),
)
@auto_docstring
|
PerceptionLMModel
|
python
|
neetcode-gh__leetcode
|
python/0036-valid-sudoku.py
|
{
"start": 0,
"end": 749
}
|
class ____:
def isValidSudoku(self, board: List[List[str]]) -> bool:
cols = collections.defaultdict(set)
rows = collections.defaultdict(set)
squares = collections.defaultdict(set) # key = (r /3, c /3)
for r in range(9):
for c in range(9):
if board[r][c] == ".":
continue
if (
board[r][c] in rows[r]
or board[r][c] in cols[c]
or board[r][c] in squares[(r // 3, c // 3)]
):
return False
cols[c].add(board[r][c])
rows[r].add(board[r][c])
squares[(r // 3, c // 3)].add(board[r][c])
return True
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/api/parsers/sentry_app.py
|
{
"start": 2026,
"end": 2498
}
|
class ____(serializers.URLField):
def to_internal_value(self, url):
# The Django URLField doesn't distinguish between different types of
# invalid URLs, so do any manual checks here to give the User a better
# error message.
if url and not url.startswith("http"):
raise ValidationError("URL must start with http[s]://")
return url
@extend_schema_serializer(exclude_fields=["popularity", "features", "status"])
|
URLField
|
python
|
langchain-ai__langchain
|
libs/standard-tests/langchain_tests/unit_tests/embeddings.py
|
{
"start": 734,
"end": 4597
}
|
class ____(EmbeddingsTests):
"""Base class for embeddings unit tests.
Test subclasses must implement the `embeddings_class` property to specify the
embeddings model to be tested. You can also override the
`embedding_model_params` property to specify initialization parameters.
```python
from typing import Type
from langchain_tests.unit_tests import EmbeddingsUnitTests
from my_package.embeddings import MyEmbeddingsModel
class TestMyEmbeddingsModelUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[MyEmbeddingsModel]:
# Return the embeddings model class to test here
return MyEmbeddingsModel
@property
def embedding_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001"}
```
!!! note
API references for individual test methods include troubleshooting tips.
Testing initialization from environment variables
Overriding the `init_from_env_params` property will enable additional tests
for initialization from environment variables. See below for details.
??? note "`init_from_env_params`"
This property is used in unit tests to test initialization from
environment variables. It should return a tuple of three dictionaries
that specify the environment variables, additional initialization args,
and expected instance attributes to check.
Defaults to empty dicts. If not overridden, the test is skipped.
```python
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"MY_API_KEY": "api_key",
},
{
"model": "model-001",
},
{
"my_api_key": "api_key",
},
)
```
"""
def test_init(self) -> None:
"""Test model initialization.
??? note "Troubleshooting"
If this test fails, ensure that `embedding_model_params` is specified
and the model can be initialized from those params.
"""
model = self.embeddings_class(**self.embedding_model_params)
assert model is not None
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Init from env params.
This property is used in unit tests to test initialization from environment
variables. It should return a tuple of three dictionaries that specify the
environment variables, additional initialization args, and expected instance
attributes to check.
"""
return {}, {}, {}
def test_init_from_env(self) -> None:
"""Test initialization from environment variables.
Relies on the `init_from_env_params` property.
Test is skipped if that property is not set.
??? note "Troubleshooting"
If this test fails, ensure that `init_from_env_params` is specified
correctly and that model parameters are properly set from environment
variables during initialization.
"""
env_params, embeddings_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
model = self.embeddings_class(**embeddings_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
|
EmbeddingsUnitTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/query-kth-smallest-trimmed-number.py
|
{
"start": 58,
"end": 1108
}
|
class ____(object):
def smallestTrimmedNumbers(self, nums, queries):
"""
:type nums: List[str]
:type queries: List[List[int]]
:rtype: List[int]
"""
max_t = max(t for _, t in queries)
lookup = [[] for _ in xrange(max_t+1)]
for i, (k, t) in enumerate(queries):
lookup[t].append((k, i))
result = [0]*len(queries)
idxs = range(len(nums))
for l in xrange(1, max_t+1):
cnt = [0]*10
for i in idxs:
d = int(nums[i][-l])
cnt[d] += 1
for d in xrange(9):
cnt[d+1] += cnt[d]
new_idxs = [0]*len(nums)
for i in reversed(idxs):
d = int(nums[i][-l])
cnt[d] -= 1
new_idxs[cnt[d]] = i
idxs = new_idxs
for k, i in lookup[l]:
result[i] = idxs[k-1]
return result
# Time: O(q * n * t) on average
# Space: O(n + q)
import random
# quick select
|
Solution
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_proxy_state.py
|
{
"start": 949,
"end": 1159
}
|
class ____:
def __init__(self, *args, **kwargs):
pass
def ready(self):
return json.dumps(["mock_worker_id", "mock_log_file_path"])
def check_health(self):
pass
|
FakeProxyActor
|
python
|
PrefectHQ__prefect
|
src/prefect/cli/transfer/_migratable_resources/deployments.py
|
{
"start": 947,
"end": 10191
}
|
class ____(MigratableResource[DeploymentResponse]):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, deployment: DeploymentResponse):
self.source_deployment = deployment
self.destination_deployment: DeploymentResponse | None = None
self._dependencies: dict[uuid.UUID, MigratableProtocol] = {}
@property
def source_id(self) -> uuid.UUID:
return self.source_deployment.id
@property
def destination_id(self) -> uuid.UUID | None:
return self.destination_deployment.id if self.destination_deployment else None
@classmethod
async def construct(cls, obj: DeploymentResponse) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[DeploymentResponse] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
if self._dependencies:
return list(self._dependencies.values())
async with get_client() as client:
if dependency := await MigratableFlow.get_instance(
id=self.source_deployment.flow_id
):
self._dependencies[self.source_deployment.flow_id] = dependency
else:
flow = await client.read_flow(self.source_deployment.flow_id)
self._dependencies[
self.source_deployment.flow_id
] = await construct_migratable_resource(flow)
if self.source_deployment.work_queue_id is not None:
if dependency := await MigratableWorkQueue.get_instance(
id=self.source_deployment.work_queue_id
):
self._dependencies[self.source_deployment.work_queue_id] = (
dependency
)
else:
work_queue = await client.read_work_queue(
self.source_deployment.work_queue_id
)
self._dependencies[
work_queue.id
] = await construct_migratable_resource(work_queue)
if self.source_deployment.work_pool_name is not None:
if dependency := await MigratableWorkPool.get_instance_by_name(
name=self.source_deployment.work_pool_name
):
self._dependencies[dependency.source_id] = dependency
else:
work_pool = await client.read_work_pool(
self.source_deployment.work_pool_name
)
self._dependencies[
work_pool.id
] = await construct_migratable_resource(work_pool)
if self.source_deployment.storage_document_id is not None:
if dependency := await MigratableBlockDocument.get_instance(
id=self.source_deployment.storage_document_id
):
self._dependencies[self.source_deployment.storage_document_id] = (
dependency
)
else:
storage_document = await client.read_block_document(
self.source_deployment.storage_document_id
)
self._dependencies[
storage_document.id
] = await construct_migratable_resource(storage_document)
if self.source_deployment.infrastructure_document_id is not None:
if dependency := await MigratableBlockDocument.get_instance(
id=self.source_deployment.infrastructure_document_id
):
self._dependencies[
self.source_deployment.infrastructure_document_id
] = dependency
else:
infrastructure_document = await client.read_block_document(
self.source_deployment.infrastructure_document_id
)
self._dependencies[
infrastructure_document.id
] = await construct_migratable_resource(infrastructure_document)
if self.source_deployment.pull_steps:
# TODO: Figure out how to find block document references in pull steps
pass
return list(self._dependencies.values())
async def migrate(self) -> None:
async with get_client() as client:
try:
if (
destination_flow_id := getattr(
self._dependencies.get(self.source_deployment.flow_id),
"destination_id",
None,
)
) is None:
raise ValueError("Unable to find destination flow")
if (
self.source_deployment.storage_document_id
and (
destination_storage_document_id := getattr(
self._dependencies.get(
self.source_deployment.storage_document_id
),
"destination_id",
None,
)
)
is None
):
raise ValueError("Unable to find destination storage document")
else:
destination_storage_document_id = None
if (
self.source_deployment.infrastructure_document_id
and (
destination_infrastructure_document_id := getattr(
self._dependencies.get(
self.source_deployment.infrastructure_document_id
),
"destination_id",
None,
)
)
is None
):
raise ValueError(
"Unable to find destination infrastructure document"
)
else:
destination_infrastructure_document_id = None
destination_deployment_id = await client.create_deployment(
flow_id=destination_flow_id,
name=self.source_deployment.name,
version=self.source_deployment.version,
version_info=self.source_deployment.version_info,
schedules=[
DeploymentScheduleCreate(
schedule=schedule.schedule,
active=schedule.active,
max_scheduled_runs=schedule.max_scheduled_runs,
parameters=schedule.parameters,
slug=schedule.slug,
)
for schedule in self.source_deployment.schedules
],
concurrency_limit=self.source_deployment.concurrency_limit,
concurrency_options=self.source_deployment.concurrency_options,
parameters=self.source_deployment.parameters,
description=self.source_deployment.description,
work_queue_name=self.source_deployment.work_queue_name,
work_pool_name=self.source_deployment.work_pool_name,
tags=self.source_deployment.tags,
storage_document_id=destination_storage_document_id,
path=self.source_deployment.path,
entrypoint=self.source_deployment.entrypoint,
infrastructure_document_id=destination_infrastructure_document_id,
parameter_openapi_schema=self.source_deployment.parameter_openapi_schema,
paused=self.source_deployment.paused,
pull_steps=self.source_deployment.pull_steps,
enforce_parameter_schema=self.source_deployment.enforce_parameter_schema,
job_variables=self.source_deployment.job_variables,
branch=self.source_deployment.branch,
base=self.source_deployment.base,
root=self.source_deployment.root,
)
self.destination_deployment = await client.read_deployment(
destination_deployment_id
)
except ObjectLimitReached:
raise TransferSkipped("Deployment limit reached (upgrade tier)")
except ObjectAlreadyExists:
self.destination_deployment = await client.read_deployment(
self.source_deployment.id
)
raise TransferSkipped("Already exists")
|
MigratableDeployment
|
python
|
PyCQA__pylint
|
pylint/reporters/reports_handler_mix_in.py
|
{
"start": 754,
"end": 3304
}
|
class ____:
"""A mix-in class containing all the reports and stats manipulation
related methods for the main lint class.
"""
def __init__(self) -> None:
self._reports: ReportsDict = collections.defaultdict(list)
self._reports_state: dict[str, bool] = {}
def report_order(self) -> MutableSequence[BaseChecker]:
"""Return a list of reporters."""
return list(self._reports)
def register_report(
self, reportid: str, r_title: str, r_cb: ReportsCallable, checker: BaseChecker
) -> None:
"""Register a report.
:param reportid: The unique identifier for the report
:param r_title: The report's title
:param r_cb: The method to call to make the report
:param checker: The checker defining the report
"""
reportid = reportid.upper()
self._reports[checker].append((reportid, r_title, r_cb))
def deregister_reports(self, checker: BaseChecker) -> None:
"""De-register all reports for a checker."""
for r_id, r_title, r_cb in checker.reports:
self._reports[checker].remove((r_id, r_title, r_cb))
def enable_report(self, reportid: str) -> None:
"""Enable the report of the given id."""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid: str) -> None:
"""Disable the report of the given id."""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid: str) -> bool:
"""Is the report associated to the given identifier enabled ?"""
return self._reports_state.get(reportid, True)
def make_reports( # type: ignore[misc] # ReportsHandlerMixIn is always mixed with PyLinter
self: PyLinter,
stats: LinterStats,
old_stats: LinterStats | None,
) -> Section:
"""Render registered reports."""
sect = Section("Report", f"{self.stats.statement} statements analysed.")
for checker in self.report_order():
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReportError:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
|
ReportsHandlerMixIn
|
python
|
PyCQA__pylint
|
tests/functional/i/implicit/implicit_flag_alias.py
|
{
"start": 369,
"end": 509
}
|
class ____(ExplicitUnionFlags): # [invalid-enum-extension]
"""Class with flags that overlap a superclass"""
RWX = 7
|
SubclassUnionFlags
|
python
|
great-expectations__great_expectations
|
contrib/experimental/great_expectations_experimental/rule_based_profiler/data_assistant/statistics_data_assistant.py
|
{
"start": 1343,
"end": 32260
}
|
class ____(DataAssistant):
"""
StatisticsDataAssistant provides metrics for dataset exploration purposes.
Fundamentally, StatisticsDataAssistant is "OnboardingDataAssistant minus Expectations -- only Metrics", the intended
usecase being obtaining description of data via metrics as well as comparing metrics between sub-sampeled datasets
to determine the smallest dataset, whose statistics represent the overall data distribution sufficiantly adequately.
"""
__alias__: str = "statistics"
def __init__(
self,
name: str,
validator: Validator,
) -> None:
super().__init__(
name=name,
validator=validator,
)
def get_variables(self) -> Optional[Dict[str, Any]]:
"""
Returns:
Optional "variables" configuration attribute name/value pairs (overrides), commonly-used in Builder objects.
"""
return None
def get_rules(self) -> Optional[List[Rule]]:
"""
Returns:
Optional custom list of "Rule" objects implementing particular "DataAssistant" functionality.
"""
total_count_metric_multi_batch_parameter_builder_for_evaluations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_table_row_count_metric_multi_batch_parameter_builder()
column_integrity_rule: Rule = self._build_column_integrity_rule(
total_count_metric_multi_batch_parameter_builder_for_evaluations=total_count_metric_multi_batch_parameter_builder_for_evaluations
)
numeric_columns_rule: Rule = self._build_numeric_columns_rule()
datetime_columns_rule: Rule = self._build_datetime_columns_rule()
text_columns_rule: Rule = self._build_text_columns_rule()
categorical_columns_rule: Rule = self._build_categorical_columns_rule()
return [
column_integrity_rule,
numeric_columns_rule,
datetime_columns_rule,
text_columns_rule,
categorical_columns_rule,
]
def _build_data_assistant_result(
self, data_assistant_result: DataAssistantResult
) -> DataAssistantResult:
return StatisticsDataAssistantResult(
_batch_id_to_batch_identifier_display_name_map=data_assistant_result._batch_id_to_batch_identifier_display_name_map,
profiler_config=data_assistant_result.profiler_config,
profiler_execution_time=data_assistant_result.profiler_execution_time,
rule_domain_builder_execution_time=data_assistant_result.rule_domain_builder_execution_time,
rule_execution_time=data_assistant_result.rule_execution_time,
rule_exception_tracebacks=data_assistant_result.rule_exception_tracebacks,
metrics_by_domain=data_assistant_result.metrics_by_domain,
expectation_configurations=data_assistant_result.expectation_configurations,
citation=data_assistant_result.citation,
)
@staticmethod
def _build_table_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ParameterBuilder" objects for table "Domain" type metrics.
"""
# Step-1: Instantiate "TableDomainBuilder" object.
table_domain_builder: DomainBuilder = TableDomainBuilder(
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
table_row_count_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_table_row_count_metric_multi_batch_parameter_builder()
table_columns_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_table_columns_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" configurations for all additional statistics needed.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
ParameterBuilderConfig(
**table_row_count_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
table_row_count_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
mean_table_columns_set_match_multi_batch_parameter_builder_for_validations = (
MeanTableColumnsSetMatchMultiBatchParameterBuilder(
name="column_names_set_estimator",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
suite_parameter_builder_configs=None,
)
)
# Step-4: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": 0,
"upper_bound": None,
},
"round_decimals": 0,
"exact_match": None,
"success_ratio": 1.0,
}
parameter_builders: List[ParameterBuilder] = [
table_row_count_metric_multi_batch_parameter_builder_for_metrics,
table_columns_metric_multi_batch_parameter_builder_for_metrics,
table_row_count_range_parameter_builder_for_validations,
mean_table_columns_set_match_multi_batch_parameter_builder_for_validations,
]
rule = Rule(
name="table_rule",
variables=variables,
domain_builder=table_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=None,
)
return rule
@staticmethod
def _build_column_integrity_rule(
total_count_metric_multi_batch_parameter_builder_for_evaluations: Optional[
ParameterBuilder
] = None,
) -> Rule:
"""
This method builds "Rule" object focused on emitting "map" style column integrity metrics.
"""
# Step-1: Instantiate "ColumnDomainBuilder" for selecting all columns.
every_column_domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=None,
exclude_semantic_types=None,
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" configurations for all additional statistics needed.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
if total_count_metric_multi_batch_parameter_builder_for_evaluations is None:
total_count_metric_multi_batch_parameter_builder_for_evaluations = DataAssistant.commonly_used_parameter_builders.get_table_row_count_metric_multi_batch_parameter_builder()
column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_evaluations = column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_metrics
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
ParameterBuilderConfig(
**total_count_metric_multi_batch_parameter_builder_for_evaluations.to_json_dict()
),
ParameterBuilderConfig(
**column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_evaluations.to_json_dict()
),
]
map_metric_name: str
map_metric_name = "column_values.unique"
column_values_unique_mean_unexpected_value_multi_batch_parameter_builder_for_validations = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name=f"{map_metric_name}.unexpected_value",
map_metric_name=map_metric_name,
total_count_parameter_builder_name=total_count_metric_multi_batch_parameter_builder_for_evaluations.name,
null_count_parameter_builder_name=column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_evaluations.name,
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
data_context=None,
)
map_metric_name = "column_values.null"
column_values_null_mean_unexpected_value_multi_batch_parameter_builder_for_validations = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name=f"{map_metric_name}.unexpected_value",
map_metric_name=map_metric_name,
total_count_parameter_builder_name=total_count_metric_multi_batch_parameter_builder_for_evaluations.name,
null_count_parameter_builder_name=column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_evaluations.name,
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
data_context=None,
)
map_metric_name = "column_values.nonnull"
column_values_nonnull_mean_unexpected_value_multi_batch_parameter_builder_for_validations = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name=f"{map_metric_name}.unexpected_value",
map_metric_name=map_metric_name,
total_count_parameter_builder_name=total_count_metric_multi_batch_parameter_builder_for_evaluations.name,
null_count_parameter_builder_name=column_values_nonnull_unexpected_count_metric_multi_batch_parameter_builder_for_evaluations.name,
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
data_context=None,
)
# Step-4: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"success_ratio": 7.5e-1,
}
parameter_builders: List[ParameterBuilder] = [
column_values_unique_mean_unexpected_value_multi_batch_parameter_builder_for_validations,
column_values_null_mean_unexpected_value_multi_batch_parameter_builder_for_validations,
column_values_nonnull_mean_unexpected_value_multi_batch_parameter_builder_for_validations,
]
rule = Rule(
name="column_integrity_rule",
variables=variables,
domain_builder=every_column_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=None,
)
return rule
@staticmethod
def _build_numeric_columns_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ParameterBuilder" objects for numeric columns "Domain" type metrics.
"""
# Step-1: Instantiate "ColumnDomainBuilder" for selecting numeric columns (but not "ID-type" columns).
numeric_column_type_domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=[
"_id",
"_ID",
],
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=[
SemanticDomainTypes.NUMERIC,
],
exclude_semantic_types=[
SemanticDomainTypes.IDENTIFIER,
],
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
column_min_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_min_metric_multi_batch_parameter_builder()
column_max_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_max_metric_multi_batch_parameter_builder()
column_quantile_values_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_quantile_values_metric_multi_batch_parameter_builder()
column_median_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_median_metric_multi_batch_parameter_builder()
column_mean_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_mean_metric_multi_batch_parameter_builder()
column_standard_deviation_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_standard_deviation_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" configurations for all additional statistics needed.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_min_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_min_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_max_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_max_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_quantile_values_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_quantile_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs={
"quantiles": f"{VARIABLES_KEY}quantiles",
"allow_relative_error": f"{VARIABLES_KEY}allow_relative_error",
},
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_median_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_median_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_mean_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_mean_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_standard_deviation_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_standard_deviation_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
# Step-4: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"mostly": 1.0,
"strict_min": False,
"strict_max": False,
"quantiles": [
0.25,
0.5,
0.75,
],
"allow_relative_error": False,
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": None,
"upper_bound": None,
},
"round_decimals": 15,
}
parameter_builders: List[ParameterBuilder] = [
column_min_values_range_parameter_builder_for_validations,
column_max_values_range_parameter_builder_for_validations,
column_quantile_values_range_parameter_builder_for_validations,
column_median_values_range_parameter_builder_for_validations,
column_mean_values_range_parameter_builder_for_validations,
column_standard_deviation_values_range_parameter_builder_for_validations,
]
rule = Rule(
name="numeric_columns_rule",
variables=variables,
domain_builder=numeric_column_type_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=None,
)
return rule
@staticmethod
def _build_datetime_columns_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ParameterBuilder" objects for datetime columns "Domain" type metrics.
"""
# Step-1: Instantiate "ColumnDomainBuilder" for selecting proper datetime columns (not "datetime-looking" text).
datetime_column_type_domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=[
SemanticDomainTypes.DATETIME,
],
exclude_semantic_types=[
SemanticDomainTypes.TEXT,
],
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
column_min_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_min_metric_multi_batch_parameter_builder()
column_max_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_max_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" configurations for all additional statistics needed.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_min_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_min_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_max_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_max_values_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
# Step-4: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"mostly": 1.0,
"strict_min": False,
"strict_max": False,
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": None,
"upper_bound": None,
},
"round_decimals": 1,
}
parameter_builders: List[ParameterBuilder] = [
column_min_values_range_parameter_builder_for_validations,
column_max_values_range_parameter_builder_for_validations,
]
rule = Rule(
name="datetime_columns_rule",
variables=variables,
domain_builder=datetime_column_type_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=None,
)
return rule
@staticmethod
def _build_text_columns_rule() -> Rule:
# Step-1: Instantiate "ColumnDomainBuilder" for selecting proper text columns.
text_column_type_domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=[
SemanticDomainTypes.TEXT,
],
exclude_semantic_types=[
SemanticDomainTypes.NUMERIC,
SemanticDomainTypes.DATETIME,
],
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
column_min_length_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_min_length_metric_multi_batch_parameter_builder()
column_max_length_metric_multi_batch_parameter_builder_for_metrics: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.get_column_max_length_metric_multi_batch_parameter_builder()
# Step-3: Declare "ParameterBuilder" configurations for all additional statistics needed.
suite_parameter_builder_configs: Optional[List[ParameterBuilderConfig]]
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_min_length_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_min_length_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
suite_parameter_builder_configs = [
ParameterBuilderConfig(
**column_max_length_metric_multi_batch_parameter_builder_for_metrics.to_json_dict()
),
]
column_max_length_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name=None,
suffix=None,
metric_value_kwargs=None,
suite_parameter_builder_configs=suite_parameter_builder_configs,
)
# Step-4: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"mostly": 1.0,
"strict_min": False,
"strict_max": False,
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": 0,
"upper_bound": None,
},
"round_decimals": 0,
"success_ratio": 7.5e-1,
}
parameter_builders: List[ParameterBuilder] = [
column_min_length_range_parameter_builder_for_validations,
column_max_length_range_parameter_builder_for_validations,
]
rule = Rule(
name="text_columns_rule",
variables=variables,
domain_builder=text_column_type_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=None,
)
return rule
@staticmethod
def _build_categorical_columns_rule() -> Rule:
"""
This method builds "Rule" object focused on emitting "ParameterBuilder" objects for categorical columns "Domain" type metrics.
"""
# Step-1: Instantiate "CategoricalColumnDomainBuilder" for selecting columns containing "FEW" discrete values.
categorical_column_type_domain_builder: DomainBuilder = ColumnDomainBuilder(
include_column_names=None,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=[
"_id",
],
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=[
SemanticDomainTypes.LOGIC,
SemanticDomainTypes.TEXT,
],
exclude_semantic_types=[
SemanticDomainTypes.BINARY,
SemanticDomainTypes.CURRENCY,
SemanticDomainTypes.IDENTIFIER,
],
data_context=None,
)
# Step-2: Declare "ParameterBuilder" for every metric of interest.
# Step-3: Declare "ParameterBuilder" configurations for all additional statistics needed.
column_unique_proportion_range_parameter_builder_for_validations: ParameterBuilder = DataAssistant.commonly_used_parameter_builders.build_numeric_metric_range_multi_batch_parameter_builder(
metric_name="column.unique_proportion",
suffix=None,
metric_value_kwargs=None,
)
# Step-4: Instantiate and return "Rule" object, comprised of "variables", "domain_builder", "parameter_builders", and "expectation_configuration_builders" components.
variables: dict = {
"cardinality_limit_mode": CardinalityLimitMode.FEW.name,
"mostly": 1.0,
"strict_min": False,
"strict_max": False,
"false_positive_rate": 0.05,
"estimator": "bootstrap",
"n_resamples": 9999,
"random_seed": None,
"quantile_statistic_interpolation_method": "nearest",
"quantile_bias_correction": False,
"quantile_bias_std_error_ratio_threshold": None,
"include_estimator_samples_histogram_in_details": False,
"truncate_values": {
"lower_bound": 0.0,
"upper_bound": None,
},
"round_decimals": 15,
}
parameter_builders: List[ParameterBuilder] = [
column_unique_proportion_range_parameter_builder_for_validations,
]
rule = Rule(
name="categorical_columns_rule",
variables=variables,
domain_builder=categorical_column_type_domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=None,
)
return rule
|
StatisticsDataAssistant
|
python
|
getsentry__sentry
|
src/sentry/auth/providers/saml2/jumpcloud/provider.py
|
{
"start": 80,
"end": 177
}
|
class ____(GenericSAML2Provider):
name = "Jumpcloud"
key = "jumpcloud"
|
JumpcloudSAML2Provider
|
python
|
dask__dask
|
dask/array/core.py
|
{
"start": 3199,
"end": 44342
}
|
class ____(Warning):
"""A warning given when bad chunking may cause poor performance"""
def getter(a, b, asarray=True, lock=None):
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(
None if x is None else slice(None, None)
for x in b
if not isinstance(x, Integral)
)
return getter(a, b2, asarray=asarray, lock=lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
# Below we special-case `np.matrix` to force a conversion to
# `np.ndarray` and preserve original Dask behavior for `getter`,
# as for all purposes `np.matrix` is array-like and thus
# `is_arraylike` evaluates to `True` in that case.
if asarray and (not is_arraylike(c) or isinstance(c, np.matrix)):
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getter_nofancy(a, b, asarray=True, lock=None):
"""A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
def getter_inline(a, b, asarray=True, lock=None):
"""A getter function that optimizations feel comfortable inlining
Slicing operations with this function may be inlined into a graph, such as
in the following rewrite
**Before**
>>> a = x[:10] # doctest: +SKIP
>>> b = a + 1 # doctest: +SKIP
>>> c = a * 2 # doctest: +SKIP
**After**
>>> b = x[:10] + 1 # doctest: +SKIP
>>> c = x[:10] * 2 # doctest: +SKIP
This inlining can be relevant to operations when running off of disk.
"""
return getter(a, b, asarray=asarray, lock=lock)
from dask.array.optimization import fuse_slice, optimize
# __array_function__ dict for mapping aliases and mismatching names
_HANDLED_FUNCTIONS = {}
def implements(*numpy_functions):
"""Register an __array_function__ implementation for dask.array.Array
Register that a function implements the API of a NumPy function (or several
NumPy functions in case of aliases) which is handled with
``__array_function__``.
Parameters
----------
\\*numpy_functions : callables
One or more NumPy functions that are handled by ``__array_function__``
and will be mapped by `implements` to a `dask.array` function.
"""
def decorator(dask_func):
for numpy_function in numpy_functions:
_HANDLED_FUNCTIONS[numpy_function] = dask_func
return dask_func
return decorator
def _should_delegate(self, other) -> bool:
"""Check whether Dask should delegate to the other.
This implementation follows NEP-13:
https://numpy.org/neps/nep-0013-ufunc-overrides.html#behavior-in-combination-with-python-s-binary-operations
"""
if hasattr(other, "__array_ufunc__") and other.__array_ufunc__ is None:
return True
elif (
hasattr(other, "__array_ufunc__")
and not is_valid_array_chunk(other)
# don't delegate to our own parent classes
and not isinstance(self, type(other))
and type(self) is not type(other)
):
return True
elif (
not hasattr(other, "__array_ufunc__")
and hasattr(other, "__array_priority__")
and other.__array_priority__ > self.__array_priority__
):
return True
return False
def check_if_handled_given_other(f):
"""Check if method is handled by Dask given type of other
Ensures proper deferral to upcast types in dunder operations without
assuming unknown types are automatically downcast types.
"""
@wraps(f)
def wrapper(self, other):
if _should_delegate(self, other):
return NotImplemented
else:
return f(self, other)
return wrapper
def slices_from_chunks(chunks):
"""Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
slices = [
[slice(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
return list(product(*slices))
def graph_from_arraylike(
arr, # Any array-like which supports slicing
chunks,
shape,
name,
getitem=getter,
lock=False,
asarray=True,
dtype=None,
inline_array=False,
) -> HighLevelGraph:
"""
HighLevelGraph for slicing chunks from an array-like according to a chunk pattern.
If ``inline_array`` is True, this make a Blockwise layer of slicing tasks where the
array-like is embedded into every task.,
If ``inline_array`` is False, this inserts the array-like as a standalone value in
a MaterializedLayer, then generates a Blockwise layer of slicing tasks that refer
to it.
>>> dict(graph_from_arraylike(arr, chunks=(2, 3), shape=(4, 6), name="X", inline_array=True)) # doctest: +SKIP
{(arr, 0, 0): (getter, arr, (slice(0, 2), slice(0, 3))),
(arr, 1, 0): (getter, arr, (slice(2, 4), slice(0, 3))),
(arr, 1, 1): (getter, arr, (slice(2, 4), slice(3, 6))),
(arr, 0, 1): (getter, arr, (slice(0, 2), slice(3, 6)))}
>>> dict( # doctest: +SKIP
graph_from_arraylike(arr, chunks=((2, 2), (3, 3)), shape=(4,6), name="X", inline_array=False)
)
{"original-X": arr,
('X', 0, 0): (getter, 'original-X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'original-X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'original-X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'original-X', (slice(0, 2), slice(3, 6)))}
"""
chunks = normalize_chunks(chunks, shape, dtype=dtype)
out_ind = tuple(range(len(shape)))
if (
has_keyword(getitem, "asarray")
and has_keyword(getitem, "lock")
and (not asarray or lock)
):
kwargs = {"asarray": asarray, "lock": lock}
else:
# Common case, drop extra parameters
kwargs = {}
if inline_array:
layer = core_blockwise(
getitem,
name,
out_ind,
arr,
None,
ArraySliceDep(chunks),
out_ind,
numblocks={},
_data_producer=True,
**kwargs,
)
return HighLevelGraph.from_collections(name, layer)
else:
original_name = f"original-{name}"
layers = {}
layers[original_name] = MaterializedLayer({original_name: arr})
layers[name] = core_blockwise(
getitem,
name,
out_ind,
TaskRef(original_name),
None,
ArraySliceDep(chunks),
out_ind,
numblocks={},
_data_producer=True,
**kwargs,
)
deps = {
original_name: set(),
name: {original_name},
}
return HighLevelGraph(layers, deps)
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
"""Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def _concatenate2(arrays, axes=None):
"""Recursively concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
If axes is an empty list or tuple, return arrays, or arrays[0] if
arrays is a list.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
Special Case
>>> _concatenate2([x, x], axes=())
array([[1, 2],
[3, 4]])
"""
if axes is None:
axes = []
if axes == ():
if isinstance(arrays, list):
return arrays[0]
else:
return arrays
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
concatenate = concatenate_lookup.dispatch(
type(max(arrays, key=lambda x: getattr(x, "__array_priority__", 0)))
)
if isinstance(arrays[0], dict):
# Handle concatenation of `dict`s, used as a replacement for structured
# arrays when that's not supported by the array library (e.g., CuPy).
keys = list(arrays[0].keys())
assert all(list(a.keys()) == keys for a in arrays)
ret = dict()
for k in keys:
ret[k] = concatenate(list(a[k] for a in arrays), axis=axes[0])
return ret
else:
return concatenate(arrays, axis=axes[0])
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype="dtype", nout=None):
"""
Tries to infer output dtype of ``func`` for a small set of input arguments.
Parameters
----------
func: Callable
Function for which output dtype is to be determined
args: List of array like
Arguments to the function, which would usually be used. Only attributes
``ndim`` and ``dtype`` are used.
kwargs: dict
Additional ``kwargs`` to the ``func``
funcname: String
Name of calling function to improve potential error messages
suggest_dtype: None/False or String
If not ``None`` adds suggestion to potential error message to specify a dtype
via the specified kwarg. Defaults to ``'dtype'``.
nout: None or Int
``None`` if function returns single output, integer if many.
Defaults to ``None``.
Returns
-------
: dtype or List of dtype
One or many dtypes (depending on ``nout``)
"""
from dask.array.utils import meta_from_array
# make sure that every arg is an evaluated array
args = [
(
np.zeros_like(meta_from_array(x), shape=((1,) * x.ndim), dtype=x.dtype)
if is_arraylike(x)
else x
)
for x in args
]
try:
with np.errstate(all="ignore"):
o = func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
suggest = (
(
f"Please specify the dtype explicitly using the `{suggest_dtype}` kwarg.\n\n"
)
if suggest_dtype
else ""
)
msg = (
f"`dtype` inference failed in `{funcname}`.\n\n"
f"{suggest}"
"Original error is below:\n"
"------------------------\n"
f"{e!r}\n\n"
"Traceback:\n"
"---------\n"
f"{tb}"
)
else:
msg = None
if msg is not None:
raise ValueError(msg)
return getattr(o, "dtype", type(o)) if nout is None else tuple(e.dtype for e in o)
def normalize_arg(x):
"""Normalize user provided arguments to blockwise or map_blocks
We do a few things:
1. If they are string literals that might collide with blockwise_token then we
quote them
2. IF they are large (as defined by sizeof) then we put them into the
graph on their own by using dask.delayed
"""
if is_dask_collection(x):
return x
elif isinstance(x, str) and re.match(r"_\d+", x):
return delayed(x)
elif isinstance(x, list) and len(x) >= 10:
return delayed(x)
elif sizeof(x) > 1e6:
return delayed(x)
else:
return x
def _pass_extra_kwargs(func, keys, *args, **kwargs):
"""Helper for :func:`dask.array.map_blocks` to pass `block_info` or `block_id`.
For each element of `keys`, a corresponding element of args is changed
to a keyword argument with that key, before all arguments re passed on
to `func`.
"""
kwargs.update(zip(keys, args))
return func(*args[len(keys) :], **kwargs)
def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=None,
new_axis=None,
enforce_ndim=False,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that ``map_blocks`` will attempt to automatically determine the output
array type by calling ``func`` on 0-d versions of the inputs. Please refer to
the ``meta`` keyword argument below if you expect that the function will not
succeed when operating on 0-d arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
If ``func`` accepts ``block_info=`` or ``block_id=``
as keyword arguments, these will be passed dictionaries
containing information about input and output chunks/arrays
during computation. See examples for details.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present). The size of each chunk along this
dimension will be set to 1. Please specify ``chunks`` if the individual
chunks have a different size.
enforce_ndim : bool, default False
Whether to enforce at runtime that the dimensionality of the array
produced by ``func`` actually matches that of the array returned by
``map_blocks``.
If True, this will raise an error when there is a mismatch.
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified is expected to be an
array of the same type and dtype of that returned when calling ``.compute()``
on the array returned by this function. When not provided, ``meta`` will be
inferred by applying the function to a small set of fake data, usually a
0-d array. It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it, providing
``meta`` will be required otherwise. If the output type is known beforehand
(e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype
can be passed, for example: ``meta=np.array((), dtype=np.int32)``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.map_overlap : Generalized operation with overlap between neighbors.
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Note that ``map_blocks()`` will concatenate chunks along axes specified by
the keyword parameter ``drop_axis`` prior to applying the function.
This is illustrated in the figure below:
.. image:: /images/map_blocks_drop_axis.png
Due to memory-size-constraints, it is often not advisable to use ``drop_axis``
on an axis that is chunked. In that case, it is better not to use
``map_blocks`` but rather
``dask.array.reduction(..., axis=dropped_axes, concatenate=False)`` which
maintains a leaner memory footprint while it drops any axis.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function can get information about where it is in the array by
accepting a special ``block_info`` or ``block_id`` keyword argument.
During computation, they will contain information about each of the input
and output chunks (and dask arrays) relevant to each call of ``func``.
>>> def func(block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
The keys to the ``block_info`` dictionary indicate which is the input and
output Dask array:
- **Input Dask array(s):** ``block_info[0]`` refers to the first input Dask array.
The dictionary key is ``0`` because that is the argument index corresponding
to the first input Dask array.
In cases where multiple Dask arrays have been passed as input to the function,
you can access them with the number corresponding to the input argument,
eg: ``block_info[1]``, ``block_info[2]``, etc.
(Note that if you pass multiple Dask arrays as input to map_blocks,
the arrays must match each other by having matching numbers of chunks,
along corresponding dimensions up to broadcasting rules.)
- **Output Dask array:** ``block_info[None]`` refers to the output Dask array,
and contains information about the output chunks.
The output chunk shape and dtype may may be different than the input chunks.
For each dask array, ``block_info`` describes:
- ``shape``: the shape of the full Dask array,
- ``num-chunks``: the number of chunks of the full array in each dimension,
- ``chunk-location``: the chunk location (for example the fourth chunk over
in the first dimension), and
- ``array-location``: the array location within the full Dask array
(for example the slice corresponding to ``40:50``).
In addition to these, there are two extra parameters described by
``block_info`` for the output array (in ``block_info[None]``):
- ``chunk-shape``: the output chunk shape, and
- ``dtype``: the output dtype.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float64)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
``block_id`` is similar to ``block_info`` but contains only the ``chunk_location``:
>>> def func(block_id=None):
... pass
This will receive the following information:
>>> block_id # doctest: +SKIP
(4, 3)
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment')
dask.array<increment, shape=(1000,), dtype=int64, chunksize=(100,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> rng = da.random.default_rng()
>>> da.map_blocks(lambda x: x[2], rng.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide
a ``dtype``:
>>> import cupy # doctest: +SKIP
>>> rng = da.random.default_rng(cupy.random.default_rng()) # doctest: +SKIP
>>> dt = np.float32
>>> da.map_blocks(lambda x: x[2], rng.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP
dask.array<lambda, shape=(5,), dtype=float32, chunksize=(5,), chunktype=cupy.ndarray>
"""
if drop_axis is None:
drop_axis = []
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
token = f"{token or funcname(func)}"
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = []
for a in args:
if isinstance(a, Array):
argpairs.append((a, tuple(range(a.ndim))[::-1]))
elif isinstance(a, BlockwiseDep):
argpairs.append((a, tuple(range(args[0].ndim))[::-1]))
else:
argpairs.append((a, None))
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
ndim_out = len(out_ind)
if any(i < -ndim_out or i >= ndim_out for i in drop_axis):
raise ValueError(
f"drop_axis out of range (drop_axis={drop_axis}, "
f"but output is {ndim_out}d)."
)
drop_axis = [i % ndim_out for i in drop_axis]
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
f"Provided chunks have {len(chunks)} dims; expected {len(out_ind)} dims"
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
if enforce_ndim:
out = blockwise(
apply_and_enforce,
out_ind,
*concat(argpairs),
expected_ndim=len(out_ind),
_func=func,
name=name,
token=token,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
else:
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
token=token,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an object to inject it.
if has_keyword(func, "block_id"):
extra_argpairs.append((ArrayBlockIdDep(out.chunks), out_ind))
extra_names.append("block_id")
if has_keyword(func, "_overlap_trim_info"):
# Internal for map overlap to reduce size of graph
num_chunks = out.numblocks
block_id_dict = {
block_id: (block_id, num_chunks)
for block_id in product(*(range(len(c)) for c in out.chunks))
}
extra_argpairs.append((ArrayValuesDep(out.chunks, block_id_dict), out_ind))
extra_names.append("_overlap_trim_info")
# If func has block_info as an argument, construct a dict of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_dict = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dict[block_id] = info
extra_argpairs.append((ArrayValuesDep(out.chunks, block_info_dict), out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
def apply_and_enforce(*args, **kwargs):
"""Apply a function, and enforce the output.ndim to match expected_ndim
Ensures the output has the expected dimensionality."""
func = kwargs.pop("_func")
expected_ndim = kwargs.pop("expected_ndim")
out = func(*args, **kwargs)
if getattr(out, "ndim", 0) != expected_ndim:
out_ndim = getattr(out, "ndim", 0)
raise ValueError(
f"Dimension mismatch: expected output of {func} "
f"to have dims = {expected_ndim}. Got {out_ndim} instead."
)
return out
def broadcast_chunks(*chunkss):
"""Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if not chunkss:
return ()
elif len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError(f"Chunks do not align: {step2}")
result.append(step2[0])
return tuple(result)
def store(
sources: Array | Collection[Array],
targets: ArrayLike | Delayed | Collection[ArrayLike | Delayed],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
load_stored: bool | None = None,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``.
If sources is a single item, targets must be a single item; if sources is a
collection of arrays, targets must be a matching collection.
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices, optional
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
load_stored: boolean, optional
Optionally return the stored result, loaded in to memory (default None).
If None, ``load_stored`` is True if ``return_stored`` is True and
``compute`` is False. *This is an advanced option.*
When False, store will return the appropriate ``target`` for each chunk that is stored.
Directly computing this result is not what you want.
Instead, you can use the returned ``target`` to execute followup operations to the store.
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
# There's no way to test that targets is a single array-like.
# We need to trust the user.
targets = [targets] # type: ignore[list-item]
targets = cast("Collection[ArrayLike | Delayed]", targets)
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
f"Different number of sources [{len(sources)}] and targets [{len(targets)}]"
)
if isinstance(regions, tuple) or regions is None:
regions_list = [regions] * len(sources)
else:
regions_list = list(regions)
if len(sources) != len(regions_list):
raise ValueError(
f"Different number of sources [{len(sources)}] and "
f"targets [{len(targets)}] than regions [{len(regions_list)}]"
)
del regions
if load_stored is None:
load_stored = return_stored and not compute
if lock is True:
lock = get_scheduler_lock(collection=Array, scheduler=kwargs.get("scheduler"))
arrays = []
for s, t, r in zip(sources, targets, regions_list):
slices = ArraySliceDep(s.chunks)
arrays.append(
s.map_blocks(
load_store_chunk, # type: ignore[arg-type]
t,
# Note: slices / BlockwiseDep have to be passed by arg, not by kwarg
slices,
region=r,
lock=lock,
return_stored=return_stored,
load_stored=load_stored,
token="store-map",
meta=s._meta,
)
)
if compute:
if not return_stored:
import dask
dask.compute(arrays, **kwargs)
return None
else:
stored_persisted = persist(*arrays, **kwargs)
arrays = []
for s, r in zip(stored_persisted, regions_list):
slices = ArraySliceDep(s.chunks)
arrays.append(
s.map_blocks(
load_chunk, # type: ignore[arg-type]
# Note: slices / BlockwiseDep have to be passed by arg, not by kwarg
slices,
lock=lock,
region=r,
meta=s._meta,
)
)
if len(arrays) == 1:
return arrays[0]
return tuple(arrays)
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
>>> blockdims_from_blockshape((10, 0), (4, 0))
((4, 4, 2), (0,))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if np.isnan(sum(shape)) or np.isnan(sum(chunks)):
raise ValueError(
f"Array chunk sizes are unknown. shape: {shape}, chunks: {chunks}{unknown_chunk_message}"
)
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple(
((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))
for d, bd in zip(shape, chunks)
)
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
results = unpack_singleton(results)
# Single chunk. There is a risk that the result holds a buffer stored in the
# graph or on a process-local Worker. Deep copy to make sure that nothing can
# accidentally write back to it.
try:
return results.copy() # numpy, sparse, scipy.sparse (any version)
except AttributeError:
# Not an Array API object
return results
CHUNKS_NONE_ERROR_MESSAGE = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
https://docs.dask.org/en/latest/array-creation.html#chunks
""".strip()
|
PerformanceWarning
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/dataproc.py
|
{
"start": 2441,
"end": 7148
}
|
class ____:
"""A helper class for building Dataproc job."""
def __init__(
self,
project_id: str,
task_id: str,
cluster_name: str,
job_type: str,
properties: dict[str, str] | None = None,
) -> None:
name = f"{task_id.replace('.', '_')}_{uuid.uuid4()!s:.8}"
self.job_type = job_type
self.job: dict[str, Any] = {
"job": {
"reference": {"project_id": project_id, "job_id": name},
"placement": {"cluster_name": cluster_name},
"labels": {"airflow-version": "v" + airflow_version.replace(".", "-").replace("+", "-")},
job_type: {},
}
}
if properties is not None:
self.job["job"][job_type]["properties"] = properties
def add_labels(self, labels: dict | None = None) -> None:
"""
Set labels for Dataproc job.
:param labels: Labels for the job query.
"""
if labels:
self.job["job"]["labels"].update(labels)
def add_variables(self, variables: dict | None = None) -> None:
"""
Set variables for Dataproc job.
:param variables: Variables for the job query.
"""
if variables is not None:
self.job["job"][self.job_type]["script_variables"] = variables
def add_args(self, args: list[str] | None = None) -> None:
"""
Set args for Dataproc job.
:param args: Args for the job query.
"""
if args is not None:
self.job["job"][self.job_type]["args"] = args
def add_query(self, query: str | list[str]) -> None:
"""
Add query for Dataproc job.
:param query: query for the job.
"""
queries = self.job["job"][self.job_type].setdefault("query_list", {"queries": []})["queries"]
if isinstance(query, str):
queries.append(query)
elif isinstance(query, list):
queries.extend(query)
def add_query_uri(self, query_uri: str) -> None:
"""
Set query uri for Dataproc job.
:param query_uri: URI for the job query.
"""
self.job["job"][self.job_type]["query_file_uri"] = query_uri
def add_jar_file_uris(self, jars: list[str] | None = None) -> None:
"""
Set jars uris for Dataproc job.
:param jars: List of jars URIs
"""
if jars is not None:
self.job["job"][self.job_type]["jar_file_uris"] = jars
def add_archive_uris(self, archives: list[str] | None = None) -> None:
"""
Set archives uris for Dataproc job.
:param archives: List of archives URIs
"""
if archives is not None:
self.job["job"][self.job_type]["archive_uris"] = archives
def add_file_uris(self, files: list[str] | None = None) -> None:
"""
Set file uris for Dataproc job.
:param files: List of files URIs
"""
if files is not None:
self.job["job"][self.job_type]["file_uris"] = files
def add_python_file_uris(self, pyfiles: list[str] | None = None) -> None:
"""
Set python file uris for Dataproc job.
:param pyfiles: List of python files URIs
"""
if pyfiles is not None:
self.job["job"][self.job_type]["python_file_uris"] = pyfiles
def set_main(self, main_jar: str | None = None, main_class: str | None = None) -> None:
"""
Set Dataproc main class.
:param main_jar: URI for the main file.
:param main_class: Name of the main class.
:raises: ValueError
"""
if main_class is not None and main_jar is not None:
raise ValueError("Set either main_jar or main_class")
if main_jar:
self.job["job"][self.job_type]["main_jar_file_uri"] = main_jar
else:
self.job["job"][self.job_type]["main_class"] = main_class
def set_python_main(self, main: str) -> None:
"""
Set Dataproc main python file uri.
:param main: URI for the python main file.
"""
self.job["job"][self.job_type]["main_python_file_uri"] = main
def set_job_name(self, name: str) -> None:
"""
Set Dataproc job name.
Job name is sanitized, replacing dots by underscores.
:param name: Job name.
"""
sanitized_name = f"{name.replace('.', '_')}_{uuid.uuid4()!s:.8}"
self.job["job"]["reference"]["job_id"] = sanitized_name
def build(self) -> dict:
"""
Return Dataproc job.
:return: Dataproc job
"""
return self.job
|
DataProcJobBuilder
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_bar04.py
|
{
"start": 315,
"end": 2165
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
chart1 = workbook.add_chart({"type": "bar"})
chart2 = workbook.add_chart({"type": "bar"})
chart1.axis_ids = [64446848, 64448384]
chart2.axis_ids = [85389696, 85391232]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet1.write_column("A1", data[0])
worksheet1.write_column("B1", data[1])
worksheet1.write_column("C1", data[2])
chart1.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart1.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet1.insert_chart("E9", chart1)
worksheet2.write_column("A1", data[0])
worksheet2.write_column("B1", data[1])
worksheet2.write_column("C1", data[2])
chart2.add_series(
{
"categories": "=Sheet2!$A$1:$A$5",
"values": "=Sheet2!$B$1:$B$5",
}
)
chart2.add_series(
{
"categories": "=Sheet2!$A$1:$A$5",
"values": "=Sheet2!$C$1:$C$5",
}
)
worksheet2.insert_chart("E9", chart2)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
mlflow__mlflow
|
mlflow/system_metrics/metrics/base_metrics_monitor.py
|
{
"start": 94,
"end": 780
}
|
class ____(abc.ABC):
"""Base class of system metrics monitor."""
def __init__(self):
self._metrics = defaultdict(list)
@abc.abstractmethod
def collect_metrics(self):
"""Method to collect metrics.
Subclass should implement this method to collect metrics and store in `self._metrics`.
"""
@abc.abstractmethod
def aggregate_metrics(self):
"""Method to aggregate metrics.
Subclass should implement this method to aggregate the metrics and return it in a dict.
"""
@property
def metrics(self):
return self._metrics
def clear_metrics(self):
self._metrics.clear()
|
BaseMetricsMonitor
|
python
|
apache__airflow
|
airflow-core/tests/unit/ti_deps/deps/test_pool_slots_available_dep.py
|
{
"start": 1229,
"end": 3039
}
|
class ____:
def setup_method(self):
db.clear_db_pools()
with create_session() as session:
test_pool = Pool(pool="test_pool", include_deferred=False)
test_includes_deferred_pool = Pool(pool="test_includes_deferred_pool", include_deferred=True)
session.add_all([test_pool, test_includes_deferred_pool])
session.commit()
def teardown_method(self):
db.clear_db_pools()
@patch("airflow.models.Pool.open_slots", return_value=0)
def test_pooled_task_reached_concurrency(self, mock_open_slots):
ti = Mock(pool="test_pool", pool_slots=1)
assert not PoolSlotsAvailableDep().is_met(ti=ti)
@patch("airflow.models.Pool.open_slots", return_value=1)
def test_pooled_task_pass(self, mock_open_slots):
ti = Mock(pool="test_pool", pool_slots=1)
assert PoolSlotsAvailableDep().is_met(ti=ti)
@patch("airflow.models.Pool.open_slots", return_value=0)
def test_running_pooled_task_pass(self, mock_open_slots):
for state in EXECUTION_STATES:
ti = Mock(pool="test_pool", state=state, pool_slots=1)
assert PoolSlotsAvailableDep().is_met(ti=ti)
@patch("airflow.models.Pool.open_slots", return_value=0)
def test_deferred_pooled_task_pass(self, mock_open_slots):
ti = Mock(pool="test_includes_deferred_pool", state=TaskInstanceState.DEFERRED, pool_slots=1)
assert PoolSlotsAvailableDep().is_met(ti=ti)
ti_to_fail = Mock(pool="test_pool", state=TaskInstanceState.DEFERRED, pool_slots=1)
assert not PoolSlotsAvailableDep().is_met(ti=ti_to_fail)
def test_task_with_nonexistent_pool(self):
ti = Mock(pool="nonexistent_pool", pool_slots=1)
assert not PoolSlotsAvailableDep().is_met(ti=ti)
|
TestPoolSlotsAvailableDep
|
python
|
cherrypy__cherrypy
|
cherrypy/process/plugins.py
|
{
"start": 8186,
"end": 12151
}
|
class ____(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to `Gavin Baker
<http://antonym.org/2005/12/dropping-privileges-in-python.html>`_.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
"""Initialize the privilege dropping plugin."""
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
@property
def uid(self):
"""The uid under which to run.
Availability: Unix.
"""
return self._uid
@uid.setter
def uid(self, val):
if val is not None:
if pwd is None:
self.bus.log(
'pwd module not available; ignoring uid.',
level=30,
)
val = None
elif isinstance(val, text_or_bytes):
val = pwd.getpwnam(val)[2]
self._uid = val
@property
def gid(self):
"""The gid under which to run.
Availability: Unix.
"""
return self._gid
@gid.setter
def gid(self, val):
if val is not None:
if grp is None:
self.bus.log(
'grp module not available; ignoring gid.',
level=30,
)
val = None
elif isinstance(val, text_or_bytes):
val = grp.getgrnam(val)[2]
self._gid = val
@property
def umask(self):
"""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
"""
return self._umask
@umask.setter
def umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log(
'umask function not available; ignoring umask.',
level=30,
)
val = None
self._umask = val
def start(self):
"""Drop the process privileges."""
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log(
'Already running as uid: %r gid: %r' % current_ids(),
)
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log(
'umask old: %03o, new: %03o' % (old_umask, self.umask),
)
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
|
DropPrivileges
|
python
|
doocs__leetcode
|
solution/2000-2099/2089.Find Target Indices After Sorting Array/Solution.py
|
{
"start": 0,
"end": 170
}
|
class ____:
def targetIndices(self, nums: List[int], target: int) -> List[int]:
nums.sort()
return [i for i, v in enumerate(nums) if v == target]
|
Solution
|
python
|
huggingface__transformers
|
tests/fsdp/test_context_parallel.py
|
{
"start": 1125,
"end": 7748
}
|
class ____(TestCasePlus):
"""Test Trainer with Torch context parallelism enabled via accelerate's ParallelismConfig."""
@require_torch_multi_accelerator
@require_accelerate
@slow
@run_first
def test_cp_equivalence(self):
"""Test that CP produces the same losses as without CP."""
# Shared setup
world_size = 2
script_path = __file__
# Step 1: Run with CP enabled (cp_size=world_size)
cp_yes_output_dir = Path(self.get_auto_remove_tmp_dir()).resolve()
cp_yes_config_path = cp_yes_output_dir / "context_parallel_config.yaml"
cp_yes_losses_path = cp_yes_output_dir / "cp_yes_losses.json"
# Write config file inline (self-contained test)
with open(cp_yes_config_path, "w") as f:
f.write(
f"""distributed_type: FSDP
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_version: 2
mixed_precision: bf16
num_processes: {world_size}
parallelism_config:
parallelism_config_dp_replicate_size: 1
parallelism_config_dp_shard_size: 1
parallelism_config_tp_size: 1
parallelism_config_cp_size: {world_size}
parallelism_config_cp_comm_strategy: alltoall
"""
)
cmd_cp_yes = f"""
accelerate launch
--config_file {cp_yes_config_path}
{script_path}
--output_dir {cp_yes_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {cp_yes_losses_path}
""".split()
execute_subprocess_async(cmd_cp_yes, env=self.get_env())
# Step 2: Run without CP (FSDP with num_processes=1, no parallelism_config)
cp_no_output_dir = Path(self.get_auto_remove_tmp_dir()).resolve()
cp_no_config_path = cp_no_output_dir / "context_parallel_config.yaml"
cp_no_losses_path = cp_no_output_dir / "cp_no_losses.json"
# Write config file inline (self-contained test)
with open(cp_no_config_path, "w") as f:
f.write(
"""distributed_type: FSDP
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
fsdp_version: 2
mixed_precision: bf16
num_processes: 1
"""
)
cmd_cp_no = f"""
accelerate launch
--config_file {cp_no_config_path}
{script_path}
--output_dir {cp_no_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {cp_no_losses_path}
""".split()
execute_subprocess_async(cmd_cp_no, env=self.get_env())
# Compare losses - should be very close since CP just splits sequence computation
with open(cp_yes_losses_path) as f:
cp_yes_losses = json.load(f)
with open(cp_no_losses_path) as f:
cp_no_losses = json.load(f)
assert len(cp_yes_losses) == len(cp_no_losses), (
f"Different number of losses: CP has {len(cp_yes_losses)}, no-CP has {len(cp_no_losses)}"
)
# CP should produce very similar results (small numerical differences expected)
# The differences come from:
# - Different gradient reduction patterns in distributed training
# - BF16 mixed precision accumulated differences
# - Sequence splitting and gathering in CP mode
cp_yes_losses_tensor = torch.tensor(cp_yes_losses)
cp_no_losses_tensor = torch.tensor(cp_no_losses)
# Use torch.testing.assert_close with rtol=2% and atol=0.02
# Testing shows actual differences are typically <1.5%
torch.testing.assert_close(
cp_yes_losses_tensor,
cp_no_losses_tensor,
rtol=2e-2, # 2% relative tolerance
atol=2e-2, # 0.02 absolute tolerance
msg=f"CP losses {cp_yes_losses} do not match non-CP losses {cp_no_losses}",
)
if __name__ == "__main__":
# Parse custom arguments (not TrainingArguments parameters)
loss_output_file = None
if "--loss_output_file" in sys.argv:
idx = sys.argv.index("--loss_output_file")
loss_output_file = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
# Use SmolLM (small Llama-based model that works with CP)
model_name = "HuggingFaceTB/SmolLM-135M"
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
attn_implementation="sdpa", # CP requires SDPA
)
# Create simple dataset: just tokenize some text
texts = [
"The quick brown fox jumps over the lazy dog. " * 10,
"Hello world, this is a test sentence for training. " * 10,
] * 4 # 8 samples total
def tokenize_function(examples):
return tokenizer(examples, max_length=128, truncation=True, padding="max_length")
train_dataset = [tokenize_function(text) for text in texts]
# Use standard DataCollatorForLanguageModeling for causal LM
# pad_to_multiple_of=4 ensures sequences are divisible by cp_size * 2 (for cp_size=2)
# Trainer will automatically generate position_ids and shift_labels as needed
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False, # Causal language modeling
pad_to_multiple_of=4,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=data_collator,
)
# Train for a few steps
trainer.train()
# Verify training completed
assert trainer.state.global_step > 0, "Training should have completed at least one step"
# Save losses to file if requested (for equivalence testing)
if loss_output_file and training_args.process_index == 0:
losses = [log["loss"] for log in trainer.state.log_history if "loss" in log]
with open(loss_output_file, "w") as f:
json.dump(losses, f)
|
TestContextParallel
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constructor14.py
|
{
"start": 537,
"end": 756
}
|
class ____(Generic[T_contra]):
def __init__(self, callback: Callback[T_contra]) -> None:
self._callback: Callback[T_contra] = callback
def copy(self) -> Self:
return type(self)(self._callback)
|
Thing
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dags.py
|
{
"start": 2441,
"end": 8814
}
|
class ____:
"""Common class for /dags related unit tests."""
@staticmethod
def _clear_db():
clear_db_connections()
clear_db_runs()
clear_db_dags()
clear_db_assets()
clear_db_serialized_dags()
def _create_deactivated_paused_dag(self, session=None):
dag_model = DagModel(
dag_id=DAG3_ID,
bundle_name="dag_maker",
relative_fileloc="dag_del_1.py",
fileloc="/tmp/dag_del_1.py",
timetable_summary="2 2 * * *",
is_stale=True,
is_paused=True,
owners="test_owner,another_test_owner",
next_dagrun=datetime(2021, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
)
dagrun_failed = DagRun(
dag_id=DAG3_ID,
run_id="run1",
logical_date=datetime(2018, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
start_date=DAG3_START_DATE_1,
run_type=DagRunType.SCHEDULED,
state=DagRunState.FAILED,
triggered_by=DagRunTriggeredByType.TEST,
)
dagrun_success = DagRun(
dag_id=DAG3_ID,
run_id="run2",
logical_date=datetime(2019, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
start_date=DAG3_START_DATE_2,
run_type=DagRunType.MANUAL,
state=DagRunState.SUCCESS,
triggered_by=DagRunTriggeredByType.TEST,
)
session.add(dag_model)
session.add(dagrun_failed)
session.add(dagrun_success)
def _create_dag_tags(self, session=None):
session.add(DagTag(dag_id=DAG1_ID, name="tag_2"))
session.add(DagTag(dag_id=DAG2_ID, name="tag_1"))
session.add(DagTag(dag_id=DAG3_ID, name="tag_1"))
def _create_asset_test_data(self, session=None):
"""Create test assets and asset-scheduled DAGs."""
# Create assets
asset1 = AssetModel(uri="test://asset1", name="test_asset_1", group="test-group")
asset2 = AssetModel(uri="s3://bucket/dataset", name="dataset_asset", group="test-group")
asset3 = AssetModel(uri="test://scheduled_asset", name="scheduled_asset", group="test-group")
session.add_all([asset1, asset2, asset3])
session.commit()
# Create a DAG with asset-based scheduling
asset_scheduled_dag = DagModel(
dag_id=ASSET_SCHEDULED_DAG_ID,
bundle_name="dag_maker",
relative_fileloc="asset_scheduled_dag.py",
fileloc="/tmp/asset_scheduled_dag.py",
timetable_summary="Asset",
timetable_description="Triggered by assets",
is_stale=False,
is_paused=False,
owners="airflow",
asset_expression={"any": [{"uri": "test://scheduled_asset"}]},
max_active_tasks=16,
max_active_runs=16,
max_consecutive_failed_dag_runs=0,
has_task_concurrency_limits=False,
has_import_errors=False,
)
# Create DAGs with asset dependencies
asset_dep_dag = DagModel(
dag_id=ASSET_DEP_DAG_ID,
bundle_name="dag_maker",
relative_fileloc="asset_dep_dag.py",
fileloc="/tmp/asset_dep_dag.py",
timetable_summary="Asset",
timetable_description="Triggered by assets",
is_stale=False,
is_paused=False,
owners="airflow",
asset_expression={"any": [{"uri": "test://asset1"}]},
max_active_tasks=16,
max_active_runs=16,
max_consecutive_failed_dag_runs=0,
has_task_concurrency_limits=False,
has_import_errors=False,
)
asset_dep_dag2 = DagModel(
dag_id=ASSET_DEP_DAG2_ID,
bundle_name="dag_maker",
relative_fileloc="asset_dep_dag2.py",
fileloc="/tmp/asset_dep_dag2.py",
timetable_summary="Asset",
timetable_description="Triggered by assets",
is_stale=False,
is_paused=False,
owners="airflow",
asset_expression={"any": [{"uri": "s3://bucket/dataset"}]},
max_active_tasks=16,
max_active_runs=16,
max_consecutive_failed_dag_runs=0,
has_task_concurrency_limits=False,
has_import_errors=False,
)
session.add_all([asset_scheduled_dag, asset_dep_dag, asset_dep_dag2])
session.commit()
# Create asset dependencies
asset_ref1 = DagScheduleAssetReference(dag_id=ASSET_DEP_DAG_ID, asset_id=asset1.id)
asset_ref2 = DagScheduleAssetReference(dag_id=ASSET_DEP_DAG2_ID, asset_id=asset2.id)
asset_ref3 = DagScheduleAssetReference(dag_id=ASSET_SCHEDULED_DAG_ID, asset_id=asset3.id)
session.add_all([asset_ref1, asset_ref2, asset_ref3])
session.commit()
@pytest.fixture(autouse=True)
def setup(self, dag_maker, session) -> None:
self._clear_db()
with dag_maker(
DAG1_ID,
dag_display_name=DAG1_DISPLAY_NAME,
schedule=None,
start_date=DAG1_START_DATE,
doc_md="details",
default_args={
"depends_on_past": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
},
params={"foo": 1},
tags=["example"],
):
EmptyOperator(task_id=TASK_ID)
dag_maker.create_dagrun(state=DagRunState.FAILED)
with dag_maker(
DAG2_ID,
schedule=None,
start_date=DAG2_START_DATE,
doc_md="details",
default_args={
"depends_on_past": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
},
params={"foo": 1},
max_active_tasks=16,
max_active_runs=16,
):
EmptyOperator(task_id=TASK_ID)
self._create_deactivated_paused_dag(session)
self._create_dag_tags(session)
dag_maker.sync_dagbag_to_db()
dag_maker.dag_model.last_parse_duration = 0.24
dag_maker.dag_model.has_task_concurrency_limits = True
session.merge(dag_maker.dag_model)
session.commit()
def teardown_method(self) -> None:
self._clear_db()
|
TestDagEndpoint
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_lockmode.py
|
{
"start": 1860,
"end": 4652
}
|
class ____(_fixtures.FixtureTest):
__sparse_driver_backend__ = True
# test against the major backends. We are naming specific databases
# here rather than using requirements rules since the behavior of
# "FOR UPDATE" as well as "OF" is very specific to each DB, and we need
# to run the query differently based on backend.
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
Address, addresses = cls.classes.Address, cls.tables.addresses
cls.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_inner_joinedload_w_limit(self):
User = self.classes.User
sess = fixture_session()
q = (
sess.query(User)
.options(joinedload(User.addresses, innerjoin=True))
.with_for_update()
.limit(1)
)
if testing.against("oracle"):
assert_raises_message(exc.DatabaseError, "ORA-02014", q.all)
else:
q.all()
sess.close()
def test_inner_joinedload_wo_limit(self):
User = self.classes.User
sess = fixture_session()
sess.query(User).options(
joinedload(User.addresses, innerjoin=True)
).with_for_update().all()
sess.close()
def test_outer_joinedload_w_limit(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.addresses, innerjoin=False)
)
if testing.against("postgresql"):
q = q.with_for_update(of=User)
else:
q = q.with_for_update()
q = q.limit(1)
if testing.against("oracle"):
assert_raises_message(exc.DatabaseError, "ORA-02014", q.all)
else:
q.all()
sess.close()
def test_outer_joinedload_wo_limit(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.addresses, innerjoin=False)
)
if testing.against("postgresql"):
q = q.with_for_update(of=User)
else:
q = q.with_for_update()
q.all()
sess.close()
def test_join_w_subquery(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
q1 = sess.query(User).with_for_update().subquery()
sess.query(q1).join(Address).all()
sess.close()
def test_plain(self):
User = self.classes.User
sess = fixture_session()
sess.query(User).with_for_update().all()
sess.close()
|
BackendTest
|
python
|
pallets__click
|
src/click/testing.py
|
{
"start": 6336,
"end": 19102
}
|
class ____:
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from `<stdin>` writes
to `<stdout>`. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
:param catch_exceptions: Whether to catch any exceptions other than
``SystemExit`` when running :meth:`~CliRunner.invoke`.
.. versionchanged:: 8.2
Added the ``catch_exceptions`` parameter.
.. versionchanged:: 8.2
``mix_stderr`` parameter has been removed.
"""
def __init__(
self,
charset: str = "utf-8",
env: cabc.Mapping[str, str | None] | None = None,
echo_stdin: bool = False,
catch_exceptions: bool = True,
) -> None:
self.charset = charset
self.env: cabc.Mapping[str, str | None] = env or {}
self.echo_stdin = echo_stdin
self.catch_exceptions = catch_exceptions
def get_default_prog_name(self, cli: Command) -> str:
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or "root"
def make_env(
self, overrides: cabc.Mapping[str, str | None] | None = None
) -> cabc.Mapping[str, str | None]:
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(
self,
input: str | bytes | t.IO[t.Any] | None = None,
env: cabc.Mapping[str, str | None] | None = None,
color: bool = False,
) -> cabc.Iterator[tuple[io.BytesIO, io.BytesIO, io.BytesIO]]:
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up `<stdin>` with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
:param input: the input stream to put into `sys.stdin`.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
.. versionadded:: 8.2
An additional output stream is returned, which is a mix of
`<stdout>` and `<stderr>` streams.
.. versionchanged:: 8.2
Always returns the `<stderr>` stream.
.. versionchanged:: 8.0
`<stderr>` is opened with ``errors="backslashreplace"``
instead of the default ``"strict"``.
.. versionchanged:: 4.0
Added the ``color`` parameter.
"""
bytes_input = make_input_stream(input, self.charset)
echo_input = None
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = formatting.FORCED_WIDTH
formatting.FORCED_WIDTH = 80
env = self.make_env(env)
stream_mixer = StreamMixer()
if self.echo_stdin:
bytes_input = echo_input = t.cast(
t.BinaryIO, EchoingStdin(bytes_input, stream_mixer.stdout)
)
sys.stdin = text_input = _NamedTextIOWrapper(
bytes_input, encoding=self.charset, name="<stdin>", mode="r"
)
if self.echo_stdin:
# Force unbuffered reads, otherwise TextIOWrapper reads a
# large chunk which is echoed early.
text_input._CHUNK_SIZE = 1 # type: ignore
sys.stdout = _NamedTextIOWrapper(
stream_mixer.stdout, encoding=self.charset, name="<stdout>", mode="w"
)
sys.stderr = _NamedTextIOWrapper(
stream_mixer.stderr,
encoding=self.charset,
name="<stderr>",
mode="w",
errors="backslashreplace",
)
@_pause_echo(echo_input) # type: ignore
def visible_input(prompt: str | None = None) -> str:
sys.stdout.write(prompt or "")
try:
val = next(text_input).rstrip("\r\n")
except StopIteration as e:
raise EOFError() from e
sys.stdout.write(f"{val}\n")
sys.stdout.flush()
return val
@_pause_echo(echo_input) # type: ignore
def hidden_input(prompt: str | None = None) -> str:
sys.stdout.write(f"{prompt or ''}\n")
sys.stdout.flush()
try:
return next(text_input).rstrip("\r\n")
except StopIteration as e:
raise EOFError() from e
@_pause_echo(echo_input) # type: ignore
def _getchar(echo: bool) -> str:
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(
stream: t.IO[t.Any] | None = None, color: bool | None = None
) -> bool:
if color is None:
return not default_color
return not color
old_visible_prompt_func = termui.visible_prompt_func
old_hidden_prompt_func = termui.hidden_prompt_func
old__getchar_func = termui._getchar
old_should_strip_ansi = utils.should_strip_ansi # type: ignore
old__compat_should_strip_ansi = _compat.should_strip_ansi
termui.visible_prompt_func = visible_input
termui.hidden_prompt_func = hidden_input
termui._getchar = _getchar
utils.should_strip_ansi = should_strip_ansi # type: ignore
_compat.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in env.items():
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield (stream_mixer.stdout, stream_mixer.stderr, stream_mixer.output)
finally:
for key, value in old_env.items():
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
termui.visible_prompt_func = old_visible_prompt_func
termui.hidden_prompt_func = old_hidden_prompt_func
termui._getchar = old__getchar_func
utils.should_strip_ansi = old_should_strip_ansi # type: ignore
_compat.should_strip_ansi = old__compat_should_strip_ansi
formatting.FORCED_WIDTH = old_forced_width
def invoke(
self,
cli: Command,
args: str | cabc.Sequence[str] | None = None,
input: str | bytes | t.IO[t.Any] | None = None,
env: cabc.Mapping[str, str | None] | None = None,
catch_exceptions: bool | None = None,
color: bool = False,
**extra: t.Any,
) -> Result:
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
:param cli: the command to invoke
:param args: the arguments to invoke. It may be given as an iterable
or a string. When given as string it will be interpreted
as a Unix shell command. More details at
:func:`shlex.split`.
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``. If :data:`None`, the value
from :class:`CliRunner` is used.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
.. versionadded:: 8.2
The result object has the ``output_bytes`` attribute with
the mix of ``stdout_bytes`` and ``stderr_bytes``, as the user would
see it in its terminal.
.. versionchanged:: 8.2
The result object always returns the ``stderr_bytes`` stream.
.. versionchanged:: 8.0
The result object has the ``return_value`` attribute with
the value returned from the invoked command.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionchanged:: 3.0
Added the ``catch_exceptions`` parameter.
.. versionchanged:: 3.0
The result object has the ``exc_info`` attribute with the
traceback if available.
"""
exc_info = None
if catch_exceptions is None:
catch_exceptions = self.catch_exceptions
with self.isolation(input=input, env=env, color=color) as outstreams:
return_value = None
exception: BaseException | None = None
exit_code = 0
if isinstance(args, str):
args = shlex.split(args)
try:
prog_name = extra.pop("prog_name")
except KeyError:
prog_name = self.get_default_prog_name(cli)
try:
return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
except SystemExit as e:
exc_info = sys.exc_info()
e_code = t.cast("int | t.Any | None", e.code)
if e_code is None:
e_code = 0
if e_code != 0:
exception = e
if not isinstance(e_code, int):
sys.stdout.write(str(e_code))
sys.stdout.write("\n")
e_code = 1
exit_code = e_code
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = 1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
sys.stderr.flush()
stdout = outstreams[0].getvalue()
stderr = outstreams[1].getvalue()
output = outstreams[2].getvalue()
return Result(
runner=self,
stdout_bytes=stdout,
stderr_bytes=stderr,
output_bytes=output,
return_value=return_value,
exit_code=exit_code,
exception=exception,
exc_info=exc_info, # type: ignore
)
@contextlib.contextmanager
def isolated_filesystem(
self, temp_dir: str | os.PathLike[str] | None = None
) -> cabc.Iterator[str]:
"""A context manager that creates a temporary directory and
changes the current working directory to it. This isolates tests
that affect the contents of the CWD to prevent them from
interfering with each other.
:param temp_dir: Create the temporary directory under this
directory. If given, the created directory is not removed
when exiting.
.. versionchanged:: 8.0
Added the ``temp_dir`` parameter.
"""
cwd = os.getcwd()
dt = tempfile.mkdtemp(dir=temp_dir)
os.chdir(dt)
try:
yield dt
finally:
os.chdir(cwd)
if temp_dir is None:
import shutil
try:
shutil.rmtree(dt)
except OSError:
pass
|
CliRunner
|
python
|
getsentry__sentry
|
src/sentry/monitors/endpoints/project_monitor_details.py
|
{
"start": 875,
"end": 3181
}
|
class ____(ProjectMonitorEndpoint, MonitorDetailsMixin):
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
owner = ApiOwner.CRONS
@extend_schema(
operation_id="Retrieve a Monitor for a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
],
responses={
200: MonitorSerializer,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, project, monitor) -> Response:
"""
Retrieves details for a monitor.
"""
return self.get_monitor(request, project, monitor)
@extend_schema(
operation_id="Update a Monitor for a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
],
request=MonitorValidator,
responses={
200: MonitorSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def put(self, request: AuthenticatedHttpRequest, project, monitor) -> Response:
"""
Update a monitor.
"""
return self.update_monitor(request, project, monitor)
@extend_schema(
operation_id="Delete a Monitor or Monitor Environments for a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
GlobalParams.ENVIRONMENT,
],
request=MonitorValidator,
responses={
202: RESPONSE_ACCEPTED,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, project, monitor) -> Response:
"""
Delete a monitor or monitor environments.
"""
return self.delete_monitor(request, project, monitor)
|
ProjectMonitorDetailsEndpoint
|
python
|
getsentry__sentry
|
tests/sentry/seer/autofix/test_autofix.py
|
{
"start": 44329,
"end": 49246
}
|
class ____(TestCase):
def test_get_github_username_for_user_with_github(self) -> None:
"""Tests getting GitHub username from ExternalActor with GitHub provider."""
from sentry.integrations.models.external_actor import ExternalActor
from sentry.integrations.types import ExternalProviders
user = self.create_user()
organization = self.create_organization()
# Create an ExternalActor with GitHub provider
ExternalActor.objects.create(
user_id=user.id,
organization=organization,
provider=ExternalProviders.GITHUB.value,
external_name="@testuser",
external_id="12345",
integration_id=1,
)
username = _get_github_username_for_user(user, organization.id)
assert username == "testuser"
def test_get_github_username_for_user_with_github_enterprise(self) -> None:
"""Tests getting GitHub username from ExternalActor with GitHub Enterprise provider."""
from sentry.integrations.models.external_actor import ExternalActor
from sentry.integrations.types import ExternalProviders
user = self.create_user()
organization = self.create_organization()
# Create an ExternalActor with GitHub Enterprise provider
ExternalActor.objects.create(
user_id=user.id,
organization=organization,
provider=ExternalProviders.GITHUB_ENTERPRISE.value,
external_name="@gheuser",
external_id="67890",
integration_id=2,
)
username = _get_github_username_for_user(user, organization.id)
assert username == "gheuser"
def test_get_github_username_for_user_without_at_prefix(self) -> None:
"""Tests getting GitHub username when external_name doesn't have @ prefix."""
from sentry.integrations.models.external_actor import ExternalActor
from sentry.integrations.types import ExternalProviders
user = self.create_user()
organization = self.create_organization()
# Create an ExternalActor without @ prefix
ExternalActor.objects.create(
user_id=user.id,
organization=organization,
provider=ExternalProviders.GITHUB.value,
external_name="noprefixuser",
external_id="11111",
integration_id=3,
)
username = _get_github_username_for_user(user, organization.id)
assert username == "noprefixuser"
def test_get_github_username_for_user_no_mapping(self) -> None:
"""Tests that None is returned when user has no GitHub mapping."""
user = self.create_user()
organization = self.create_organization()
username = _get_github_username_for_user(user, organization.id)
assert username is None
def test_get_github_username_for_user_non_github_provider(self) -> None:
"""Tests that None is returned when user only has non-GitHub external actors."""
from sentry.integrations.models.external_actor import ExternalActor
from sentry.integrations.types import ExternalProviders
user = self.create_user()
organization = self.create_organization()
# Create an ExternalActor with Slack provider (should be ignored)
ExternalActor.objects.create(
user_id=user.id,
organization=organization,
provider=ExternalProviders.SLACK.value,
external_name="@slackuser",
external_id="slack123",
integration_id=4,
)
username = _get_github_username_for_user(user, organization.id)
assert username is None
def test_get_github_username_for_user_multiple_mappings(self) -> None:
"""Tests that most recent GitHub mapping is used when multiple exist."""
from sentry.integrations.models.external_actor import ExternalActor
from sentry.integrations.types import ExternalProviders
user = self.create_user()
organization = self.create_organization()
# Create older mapping
ExternalActor.objects.create(
user_id=user.id,
organization=organization,
provider=ExternalProviders.GITHUB.value,
external_name="@olduser",
external_id="old123",
integration_id=5,
date_added=before_now(days=10),
)
# Create newer mapping
ExternalActor.objects.create(
user_id=user.id,
organization=organization,
provider=ExternalProviders.GITHUB.value,
external_name="@newuser",
external_id="new456",
integration_id=6,
date_added=before_now(days=1),
)
username = _get_github_username_for_user(user, organization.id)
assert username == "newuser"
|
TestGetGithubUsernameForUser
|
python
|
numpy__numpy
|
numpy/f2py/tests/test_symbolic.py
|
{
"start": 450,
"end": 18561
}
|
class ____(util.F2PyTest):
def test_eliminate_quotes(self):
def worker(s):
r, d = eliminate_quotes(s)
s1 = insert_quotes(r, d)
assert s1 == s
for kind in ["", "mykind_"]:
worker(kind + '"1234" // "ABCD"')
worker(kind + '"1234" // ' + kind + '"ABCD"')
worker(kind + "\"1234\" // 'ABCD'")
worker(kind + '"1234" // ' + kind + "'ABCD'")
worker(kind + '"1\\"2\'AB\'34"')
worker("a = " + kind + "'1\\'2\"AB\"34'")
def test_sanity(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
assert x.op == Op.SYMBOL
assert repr(x) == "Expr(Op.SYMBOL, 'x')"
assert x == x
assert x != y
assert hash(x) is not None
n = as_number(123)
m = as_number(456)
assert n.op == Op.INTEGER
assert repr(n) == "Expr(Op.INTEGER, (123, 4))"
assert n == n
assert n != m
assert hash(n) is not None
fn = as_number(12.3)
fm = as_number(45.6)
assert fn.op == Op.REAL
assert repr(fn) == "Expr(Op.REAL, (12.3, 4))"
assert fn == fn
assert fn != fm
assert hash(fn) is not None
c = as_complex(1, 2)
c2 = as_complex(3, 4)
assert c.op == Op.COMPLEX
assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4)),"
" Expr(Op.INTEGER, (2, 4))))")
assert c == c
assert c != c2
assert hash(c) is not None
s = as_string("'123'")
s2 = as_string('"ABC"')
assert s.op == Op.STRING
assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s)
assert s == s
assert s != s2
a = as_array((n, m))
b = as_array((n, ))
assert a.op == Op.ARRAY
assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4)),"
" Expr(Op.INTEGER, (456, 4))))")
assert a == a
assert a != b
t = as_terms(x)
u = as_terms(y)
assert t.op == Op.TERMS
assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})"
assert t == t
assert t != u
assert hash(t) is not None
v = as_factors(x)
w = as_factors(y)
assert v.op == Op.FACTORS
assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})"
assert v == v
assert w != v
assert hash(v) is not None
t = as_ternary(x, y, z)
u = as_ternary(x, z, y)
assert t.op == Op.TERNARY
assert t == t
assert t != u
assert hash(t) is not None
e = as_eq(x, y)
f = as_lt(x, y)
assert e.op == Op.RELATIONAL
assert e == e
assert e != f
assert hash(e) is not None
def test_tostring_fortran(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
n = as_number(123)
m = as_number(456)
a = as_array((n, m))
c = as_complex(n, m)
assert str(x) == "x"
assert str(n) == "123"
assert str(a) == "[123, 456]"
assert str(c) == "(123, 456)"
assert str(Expr(Op.TERMS, {x: 1})) == "x"
assert str(Expr(Op.TERMS, {x: 2})) == "2 * x"
assert str(Expr(Op.TERMS, {x: -1})) == "-x"
assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x"
assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y"
assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y"
assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y"
assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y"
assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y"
assert str(Expr(Op.FACTORS, {x: 1})) == "x"
assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2"
assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1"
assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2"
assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y"
assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3"
v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3})
assert str(v) == "x ** 2 * (x + y) ** 3", str(v)
v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3})
assert str(v) == "x ** 2 * (x * y) ** 3", str(v)
assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()"
assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)"
assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)"
assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]"
assert str(as_ternary(x, y, z)) == "merge(y, z, x)"
assert str(as_eq(x, y)) == "x .eq. y"
assert str(as_ne(x, y)) == "x .ne. y"
assert str(as_lt(x, y)) == "x .lt. y"
assert str(as_le(x, y)) == "x .le. y"
assert str(as_gt(x, y)) == "x .gt. y"
assert str(as_ge(x, y)) == "x .ge. y"
def test_tostring_c(self):
language = Language.C
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
n = as_number(123)
assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x"
assert (Expr(Op.FACTORS, {
x + y: 2
}).tostring(language=language) == "(x + y) * (x + y)")
assert Expr(Op.FACTORS, {
x: 12
}).tostring(language=language) == "pow(x, 12)"
assert as_apply(ArithOp.DIV, x,
y).tostring(language=language) == "x / y"
assert (as_apply(ArithOp.DIV, x,
x + y).tostring(language=language) == "x / (x + y)")
assert (as_apply(ArithOp.DIV, x - y, x +
y).tostring(language=language) == "(x - y) / (x + y)")
assert (x + (x - y) / (x + y) +
n).tostring(language=language) == "123 + x + (x - y) / (x + y)"
assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)"
assert as_eq(x, y).tostring(language=language) == "x == y"
assert as_ne(x, y).tostring(language=language) == "x != y"
assert as_lt(x, y).tostring(language=language) == "x < y"
assert as_le(x, y).tostring(language=language) == "x <= y"
assert as_gt(x, y).tostring(language=language) == "x > y"
assert as_ge(x, y).tostring(language=language) == "x >= y"
def test_operations(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
assert x + x == Expr(Op.TERMS, {x: 2})
assert x - x == Expr(Op.INTEGER, (0, 4))
assert x + y == Expr(Op.TERMS, {x: 1, y: 1})
assert x - y == Expr(Op.TERMS, {x: 1, y: -1})
assert x * x == Expr(Op.FACTORS, {x: 2})
assert x * y == Expr(Op.FACTORS, {x: 1, y: 1})
assert +x == x
assert -x == Expr(Op.TERMS, {x: -1}), repr(-x)
assert 2 * x == Expr(Op.TERMS, {x: 2})
assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2})
assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3})
assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2})
assert x**2 == Expr(Op.FACTORS, {x: 2})
assert (x + y)**2 == Expr(
Op.TERMS,
{
Expr(Op.FACTORS, {x: 2}): 1,
Expr(Op.FACTORS, {y: 2}): 1,
Expr(Op.FACTORS, {
x: 1,
y: 1
}): 2,
},
)
assert (x + y) * x == x**2 + x * y
assert (x + y)**2 == x**2 + 2 * x * y + y**2
assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2
assert (x + y) * z == x * z + y * z
assert z * (x + y) == x * z + y * z
assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2))
assert (2 * x / 2) == x
assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2))
assert (4 * x / 2) == 2 * x
assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
assert (6 * x / 2) == 3 * x
assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply(
ArithOp.DIV, 5 * y, 4 * x)
assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x,
as_number(2)), (15 * x / 6) / 5
assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5))
assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5})
s = as_string('"ABC"')
t = as_string('"123"')
assert s // t == Expr(Op.STRING, ('"ABC123"', 1))
assert s // x == Expr(Op.CONCAT, (s, x))
assert x // s == Expr(Op.CONCAT, (x, s))
c = as_complex(1.0, 2.0)
assert -c == as_complex(-1.0, -2.0)
assert c + c == as_expr((1 + 2j) * 2)
assert c * c == as_expr((1 + 2j)**2)
def test_substitute(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
a = as_array((x, y))
assert x.substitute({x: y}) == y
assert (x + y).substitute({x: z}) == y + z
assert (x * y).substitute({x: z}) == y * z
assert (x**4).substitute({x: z}) == z**4
assert (x / y).substitute({x: z}) == z / y
assert x.substitute({x: y + z}) == y + z
assert a.substitute({x: y + z}) == as_array((y + z, y))
assert as_ternary(x, y,
z).substitute({x: y + z}) == as_ternary(y + z, y, z)
assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y)
def test_fromstring(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
f = as_symbol("f")
s = as_string('"ABC"')
t = as_string('"123"')
a = as_array((x, y))
assert fromstring("x") == x
assert fromstring("+ x") == x
assert fromstring("- x") == -x
assert fromstring("x + y") == x + y
assert fromstring("x + 1") == x + 1
assert fromstring("x * y") == x * y
assert fromstring("x * 2") == x * 2
assert fromstring("x / y") == x / y
assert fromstring("x ** 2", language=Language.Python) == x**2
assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3
assert fromstring("(x + y) * z") == (x + y) * z
assert fromstring("f(x)") == f(x)
assert fromstring("f(x,y)") == f(x, y)
assert fromstring("f[x]") == f[x]
assert fromstring("f[x][y]") == f[x][y]
assert fromstring('"ABC"') == s
assert (normalize(
fromstring('"ABC" // "123" ',
language=Language.Fortran)) == s // t)
assert fromstring('f("ABC")') == f(s)
assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND")
assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)")
assert fromstring("f((/x, y/))") == f(a)
assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, ))
assert fromstring("123") == as_number(123)
assert fromstring("123_2") == as_number(123, 2)
assert fromstring("123_myintkind") == as_number(123, "myintkind")
assert fromstring("123.0") == as_number(123.0, 4)
assert fromstring("123.0_4") == as_number(123.0, 4)
assert fromstring("123.0_8") == as_number(123.0, 8)
assert fromstring("123.0e0") == as_number(123.0, 4)
assert fromstring("123.0d0") == as_number(123.0, 8)
assert fromstring("123d0") == as_number(123.0, 8)
assert fromstring("123e-0") == as_number(123.0, 4)
assert fromstring("123d+0") == as_number(123.0, 8)
assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind")
assert fromstring("3E4") == as_number(30000.0, 4)
assert fromstring("(1, 2)") == as_complex(1, 2)
assert fromstring("(1e2, PI)") == as_complex(as_number(100.0),
as_symbol("PI"))
assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2)))
assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"),
x,
y=as_number(1))
assert fromstring(
'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply(
as_symbol("PERSON"),
name=as_string('"John"'),
age=as_number(50),
shape=as_array((as_number(34), as_number(23))),
)
assert fromstring("x?y:z") == as_ternary(x, y, z)
assert fromstring("*x") == as_deref(x)
assert fromstring("**x") == as_deref(as_deref(x))
assert fromstring("&x") == as_ref(x)
assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y)
assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y)
assert fromstring("*x * *y") == as_deref(x) * as_deref(y)
assert fromstring("*x**y") == as_deref(x) * as_deref(y)
assert fromstring("x == y") == as_eq(x, y)
assert fromstring("x != y") == as_ne(x, y)
assert fromstring("x < y") == as_lt(x, y)
assert fromstring("x > y") == as_gt(x, y)
assert fromstring("x <= y") == as_le(x, y)
assert fromstring("x >= y") == as_ge(x, y)
assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y)
assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y)
assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y)
assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y)
assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y)
assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y)
def test_traverse(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
f = as_symbol("f")
# Use traverse to substitute a symbol
def replace_visit(s, r=z):
if s == x:
return r
assert x.traverse(replace_visit) == z
assert y.traverse(replace_visit) == y
assert z.traverse(replace_visit) == z
assert (f(y)).traverse(replace_visit) == f(y)
assert (f(x)).traverse(replace_visit) == f(z)
assert (f[y]).traverse(replace_visit) == f[y]
assert (f[z]).traverse(replace_visit) == f[z]
assert (x + y + z).traverse(replace_visit) == (2 * z + y)
assert (x +
f(y, x - z)).traverse(replace_visit) == (z +
f(y, as_number(0)))
assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y)
# Use traverse to collect symbols, method 1
function_symbols = set()
symbols = set()
def collect_symbols(s):
if s.op is Op.APPLY:
oper = s.data[0]
function_symbols.add(oper)
if oper in symbols:
symbols.remove(oper)
elif s.op is Op.SYMBOL and s not in function_symbols:
symbols.add(s)
(x + f(y, x - z)).traverse(collect_symbols)
assert function_symbols == {f}
assert symbols == {x, y, z}
# Use traverse to collect symbols, method 2
def collect_symbols2(expr, symbols):
if expr.op is Op.SYMBOL:
symbols.add(expr)
symbols = set()
(x + f(y, x - z)).traverse(collect_symbols2, symbols)
assert symbols == {x, y, z, f}
# Use traverse to partially collect symbols
def collect_symbols3(expr, symbols):
if expr.op is Op.APPLY:
# skip traversing function calls
return expr
if expr.op is Op.SYMBOL:
symbols.add(expr)
symbols = set()
(x + f(y, x - z)).traverse(collect_symbols3, symbols)
assert symbols == {x}
def test_linear_solve(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
assert x.linear_solve(x) == (as_number(1), as_number(0))
assert (x + 1).linear_solve(x) == (as_number(1), as_number(1))
assert (2 * x).linear_solve(x) == (as_number(2), as_number(0))
assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3))
assert as_number(3).linear_solve(x) == (as_number(0), as_number(3))
assert y.linear_solve(x) == (as_number(0), y)
assert (y * z).linear_solve(x) == (as_number(0), y * z)
assert (x + y).linear_solve(x) == (as_number(1), y)
assert (z * x + y).linear_solve(x) == (z, y)
assert ((z + y) * x + y).linear_solve(x) == (z + y, y)
assert (z * y * x + y).linear_solve(x) == (z * y, y)
pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x))
def test_as_numer_denom(self):
x = as_symbol("x")
y = as_symbol("y")
n = as_number(123)
assert as_numer_denom(x) == (x, as_number(1))
assert as_numer_denom(x / n) == (x, n)
assert as_numer_denom(n / x) == (n, x)
assert as_numer_denom(x / y) == (x, y)
assert as_numer_denom(x * y) == (x * y, as_number(1))
assert as_numer_denom(n + x / y) == (x + n * y, y)
assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x)
def test_polynomial_atoms(self):
x = as_symbol("x")
y = as_symbol("y")
n = as_number(123)
assert x.polynomial_atoms() == {x}
assert n.polynomial_atoms() == set()
assert (y[x]).polynomial_atoms() == {y[x]}
assert (y(x)).polynomial_atoms() == {y(x)}
assert (y(x) + x).polynomial_atoms() == {y(x), x}
assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]}
assert (y(x)**x).polynomial_atoms() == {y(x)}
def test_unmatched_parenthesis_gh30268(self):
#gh - 30268
with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"):
Expr.parse("DATA (A, I=1, N", language=Language.Fortran)
|
TestSymbolic
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/ci/azp.py
|
{
"start": 571,
"end": 4180
}
|
class ____(CIProvider):
"""CI provider implementation for Azure Pipelines."""
def __init__(self) -> None:
self.auth = AzurePipelinesAuthHelper()
self._changes: AzurePipelinesChanges | None = None
@staticmethod
def is_supported() -> bool:
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
@property
def code(self) -> str:
"""Return a unique code representing this provider."""
return CODE
@property
def name(self) -> str:
"""Return descriptive name for this provider."""
return 'Azure Pipelines'
def generate_resource_prefix(self) -> str:
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'azp-%s-%s-%s' % (
os.environ['BUILD_BUILDID'],
os.environ['SYSTEM_JOBATTEMPT'],
os.environ['SYSTEM_JOBIDENTIFIER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
return prefix
def get_base_commit(self, args: CommonConfig) -> str:
"""Return the base commit or an empty string."""
return self._get_changes(args).base_commit or ''
def _get_changes(self, args: CommonConfig) -> AzurePipelinesChanges:
"""Return an AzurePipelinesChanges instance, which will be created on first use."""
if not self._changes:
self._changes = AzurePipelinesChanges(args)
return self._changes
def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
"""Initialize change detection."""
result = self._get_changes(args)
if result.is_pr:
job_type = 'pull request'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self) -> bool:
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_request(self, config: dict[str, object], context: AuthContext) -> dict[str, object]:
try:
request: dict[str, object] = dict(
type="azp:ssh",
config=config,
org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
project_name=os.environ['SYSTEM_TEAMPROJECT'],
build_id=int(os.environ['BUILD_BUILDID']),
task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
self.auth.sign_request(request, context)
return request
def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
"""Return details about git in the current environment."""
changes = self._get_changes(args)
details = dict(
base_commit=changes.base_commit,
commit=changes.commit,
)
return details
|
AzurePipelines
|
python
|
scipy__scipy
|
benchmarks/benchmarks/cluster.py
|
{
"start": 3077,
"end": 3529
}
|
class ____(Benchmark):
params = [[2, 10, 50], ['float32', 'float64']]
param_names = ['k', 'dtype']
def __init__(self):
rnd = np.random.RandomState(0)
self.data = rnd.rand(5000, 5)
self.cbook_source = rnd.rand(50, 5)
def setup(self, k, dtype):
self.obs = self.data.astype(dtype)
self.cbook = self.cbook_source[:k].astype(dtype)
def time_vq(self, k, dtype):
vq(self.obs, self.cbook)
|
VQ
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/frames.py
|
{
"start": 130330,
"end": 133918
}
|
class ____(Request):
"""
Get specific frames for a dataset version using the frame ids. Random Access API.
:param dataset: Dataset ID
:type dataset: str
:param version: Version ID
:type version: str
:param frame_ids: Frame IDs
:type frame_ids: Sequence[str]
:param projection: Used to select which parts of the frame will be returned.
Each string represents a field or sub-field (using dot-separated notation). In
order to specify a specific array element, use array index as a field name. To
specify all array elements, use '*'.
:type projection: Sequence[str]
"""
_service = "frames"
_action = "get_by_ids"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"dataset": {"description": "Dataset ID", "type": "string"},
"frame_ids": {
"description": "Frame IDs",
"items": {"type": "string"},
"type": "array",
},
"projection": {
"description": (
"Used to select which parts of the frame will be returned. Each string represents a\n "
" field or sub-field (using dot-separated notation). In order to specify a specific array"
" element,\n use array index as a field name. To specify all array elements, use"
" '*'."
),
"items": {"type": "string"},
"type": "array",
},
"version": {"description": "Version ID", "type": "string"},
},
"required": ["dataset", "version", "frame_ids"],
"type": "object",
}
def __init__(self, dataset, version, frame_ids, projection=None, **kwargs):
super(GetByIdsRequest, self).__init__(**kwargs)
self.dataset = dataset
self.version = version
self.frame_ids = frame_ids
self.projection = projection
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("frame_ids")
def frame_ids(self):
return self._property_frame_ids
@frame_ids.setter
def frame_ids(self, value):
if value is None:
self._property_frame_ids = None
return
self.assert_isinstance(value, "frame_ids", (list, tuple))
self.assert_isinstance(value, "frame_ids", six.string_types, is_array=True)
self._property_frame_ids = value
@schema_property("projection")
def projection(self):
return self._property_projection
@projection.setter
def projection(self, value):
if value is None:
self._property_projection = None
return
self.assert_isinstance(value, "projection", (list, tuple))
self.assert_isinstance(value, "projection", six.string_types, is_array=True)
self._property_projection = value
|
GetByIdsRequest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py
|
{
"start": 4994,
"end": 5385
}
|
class ____(rnn_cell_wrapper_impl.DeviceWrapperBase,
_RNNCellWrapperV2):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DeviceWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__
|
DeviceWrapper
|
python
|
ipython__ipython
|
IPython/lib/display.py
|
{
"start": 9136,
"end": 10185
}
|
class ____:
"""
Generic class to embed an iframe in an IPython notebook
"""
iframe = """
<iframe
width="{width}"
height="{height}"
src="{src}{params}"
frameborder="0"
allowfullscreen
{extras}
></iframe>
"""
def __init__(
self, src, width, height, extras: Optional[Iterable[str]] = None, **kwargs
):
if extras is None:
extras = []
self.src = src
self.width = width
self.height = height
self.extras = extras
self.params = kwargs
def _repr_html_(self):
"""return the embed iframe"""
if self.params:
from urllib.parse import urlencode
params = "?" + urlencode(self.params)
else:
params = ""
return self.iframe.format(
src=self.src,
width=self.width,
height=self.height,
params=params,
extras=" ".join(self.extras),
)
|
IFrame
|
python
|
openai__openai-python
|
src/openai/_module_client.py
|
{
"start": 2131,
"end": 2258
}
|
class ____(LazyProxy["Videos"]):
@override
def __load__(self) -> Videos:
return _load_client().videos
|
VideosProxy
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 29