repo
stringlengths 5
51
| instance_id
stringlengths 11
56
| base_commit
stringlengths 40
40
| patch
stringlengths 400
333k
| test_patch
stringlengths 0
895k
| problem_statement
stringlengths 27
55.6k
| hints_text
stringlengths 0
72k
| created_at
int64 1,447B
1,739B
| labels
sequencelengths 0
7
⌀ | category
stringclasses 4
values | edit_functions
sequencelengths 1
10
| added_functions
sequencelengths 0
20
| edit_functions_length
int64 1
10
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
pydantic/pydantic | pydantic__pydantic-10789 | 0157e343bac2313064492744ef07493b5c42bd5b | diff --git a/pydantic/_internal/_generate_schema.py b/pydantic/_internal/_generate_schema.py
index bebe58c6c0..c9bceddb99 100644
--- a/pydantic/_internal/_generate_schema.py
+++ b/pydantic/_internal/_generate_schema.py
@@ -1892,34 +1892,27 @@ def _call_schema(self, function: ValidateCallSupportedTypes) -> core_schema.Call
)
def _unsubstituted_typevar_schema(self, typevar: typing.TypeVar) -> core_schema.CoreSchema:
- assert isinstance(typevar, typing.TypeVar)
-
- bound = typevar.__bound__
- constraints = typevar.__constraints__
-
try:
- typevar_has_default = typevar.has_default() # type: ignore
+ has_default = typevar.has_default()
except AttributeError:
- # could still have a default if it's an old version of typing_extensions.TypeVar
- typevar_has_default = getattr(typevar, '__default__', None) is not None
+ # Happens if using `typing.TypeVar` on Python < 3.13
+ pass
+ else:
+ if has_default:
+ return self.generate_schema(typevar.__default__)
- if (bound is not None) + (len(constraints) != 0) + typevar_has_default > 1:
- raise NotImplementedError(
- 'Pydantic does not support mixing more than one of TypeVar bounds, constraints and defaults'
- )
+ if constraints := typevar.__constraints__:
+ return self._union_schema(typing.Union[constraints])
- if typevar_has_default:
- return self.generate_schema(typevar.__default__) # type: ignore
- elif constraints:
- return self._union_schema(typing.Union[constraints]) # type: ignore
- elif bound:
+ if bound := typevar.__bound__:
schema = self.generate_schema(bound)
schema['serialization'] = core_schema.wrap_serializer_function_ser_schema(
- lambda x, h: h(x), schema=core_schema.any_schema()
+ lambda x, h: h(x),
+ schema=core_schema.any_schema(),
)
return schema
- else:
- return core_schema.any_schema()
+
+ return core_schema.any_schema()
def _computed_field_schema(
self,
| diff --git a/tests/test_generics.py b/tests/test_generics.py
index 2e11d871f8..ea92e7aba7 100644
--- a/tests/test_generics.py
+++ b/tests/test_generics.py
@@ -2836,14 +2836,93 @@ class MyErrorDetails(ErrorDetails):
}
-def test_mix_default_and_constraints() -> None:
- T = TypingExtensionsTypeVar('T', str, int, default=str)
+def test_serialize_typevars_default_and_bound_with_user_model() -> None:
+ class MyErrorDetails(BaseModel):
+ bar: str
+
+ class ExtendedMyErrorDetails(MyErrorDetails):
+ foo: str
- msg = 'Pydantic does not support mixing more than one of TypeVar bounds, constraints and defaults'
- with pytest.raises(NotImplementedError, match=msg):
+ class MoreExtendedMyErrorDetails(ExtendedMyErrorDetails):
+ suu: str
- class _(BaseModel, Generic[T]):
- x: T
+ T = TypingExtensionsTypeVar('T', bound=MyErrorDetails, default=ExtendedMyErrorDetails)
+
+ class Error(BaseModel, Generic[T]):
+ message: str
+ details: T
+
+ # bound small parent model
+ sample_error = Error[MyErrorDetails](
+ message='We just had an error',
+ details=MyErrorDetails(foo='var', bar='baz', suu='suu'),
+ )
+
+ assert sample_error.details.model_dump() == {
+ 'bar': 'baz',
+ }
+ assert sample_error.model_dump() == {
+ 'message': 'We just had an error',
+ 'details': {
+ 'bar': 'baz',
+ },
+ }
+
+ # default middle child model
+ sample_error = Error(
+ message='We just had an error',
+ details=MoreExtendedMyErrorDetails(foo='var', bar='baz', suu='suu'),
+ )
+
+ assert sample_error.details.model_dump() == {
+ 'foo': 'var',
+ 'bar': 'baz',
+ 'suu': 'suu',
+ }
+ assert sample_error.model_dump() == {
+ 'message': 'We just had an error',
+ 'details': {'foo': 'var', 'bar': 'baz'},
+ }
+
+ # bound big child model
+ sample_error = Error[MoreExtendedMyErrorDetails](
+ message='We just had an error',
+ details=MoreExtendedMyErrorDetails(foo='var', bar='baz', suu='suu'),
+ )
+
+ assert sample_error.details.model_dump() == {
+ 'foo': 'var',
+ 'bar': 'baz',
+ 'suu': 'suu',
+ }
+ assert sample_error.model_dump() == {
+ 'message': 'We just had an error',
+ 'details': {
+ 'foo': 'var',
+ 'bar': 'baz',
+ 'suu': 'suu',
+ },
+ }
+
+
+def test_typevars_default_model_validation_error() -> None:
+ class MyErrorDetails(BaseModel):
+ bar: str
+
+ class ExtendedMyErrorDetails(MyErrorDetails):
+ foo: str
+
+ T = TypingExtensionsTypeVar('T', bound=MyErrorDetails, default=ExtendedMyErrorDetails)
+
+ class Error(BaseModel, Generic[T]):
+ message: str
+ details: T
+
+ with pytest.raises(ValidationError):
+ Error(
+ message='We just had an error',
+ details=MyErrorDetails(foo='var', bar='baz'),
+ )
def test_generic_with_not_required_in_typed_dict() -> None:
| Support TypeVar with Identical Bound and Default
### Initial Checks
- [X] I have searched Google & GitHub for similar requests and couldn't find anything
- [X] I have read and followed [the docs](https://docs.pydantic.dev) and still think this feature is missing
### Description
# The Problem
Currently Pydantic [does not support](https://github.com/pydantic/pydantic/blob/07b64739a251760a71082324e358c9d8291b7ab6/pydantic/_internal/_generate_schema.py#L1667) `TypeVar` with more than one of a bound, default, or constraints declared. This is a problem because users are forced to choose between better usability or greater correctness. As described in the [motivations](https://peps.python.org/pep-0696/#motivation) for the PEP which added `TypeVar` defaults declaring both a bound a default is relatively common:
```python
from typing_extensions import TypeVar
T = TypeVar("T", bound=int, default=int)
def f(x: T = 1) -> T:
return x
f("hello") # error because str not subtype of int
```
If we're forced to only declare a `default` we won't get appropriate type warnings:
```python
from typing_extensions import TypeVar
T = TypeVar("T", default=int)
def f(x: T = 1) -> T:
return x
f("hello") # no error
```
# Partial Solution
After inspecting the downstream code which [handles bounds and defaults](https://github.com/pydantic/pydantic/blob/07b64739a251760a71082324e358c9d8291b7ab6/pydantic/_internal/_generate_schema.py#L1670-L1681), it appears that it would relatively easy to at least support the case of a `TypeVar` with an identical bound and default declared. From my anecdotal experience, this likely covers the majority of cases where someone might want to specify both a bound and a default.
Supporting this would involve rewriting:
```python
if (bound is not None) + (len(constraints) != 0) + (default is not None) > 1:
raise NotImplementedError(
'Pydantic does not support mixing more than one of TypeVar bounds, constraints and defaults'
)
if default is not None:
return self.generate_schema(default)
elif constraints:
return self._union_schema(typing.Union[constraints]) # type: ignore
elif bound:
schema = self.generate_schema(bound)
schema['serialization'] = core_schema.wrap_serializer_function_ser_schema(
lambda x, h: h(x), schema=core_schema.any_schema()
)
return schema
else:
return core_schema.any_schema()
```
To look more like:
```python
if len(constraints) != 0:
if (bound is not None or default is not None):
raise RuntimeError(...)
return self._union_schema(typing.Union[constraints])
if bound is not None and default is not None and bound != default:
raise RuntimeError(...)
else:
schema = generate_schema(bound or default)
if bound is not None:
schema['serialization'] = core_schema.wrap_serializer_function_ser_schema(
lambda x, h: h(x), schema=core_schema.any_schema()
)
return schema
```
The main question I have is how feasible it is to actually check `bound != default`. Besides that, this seems fairly sound.
### Affected Components
- [ ] [Compatibility between releases](https://docs.pydantic.dev/changelog/)
- [X] [Data validation/parsing](https://docs.pydantic.dev/concepts/models/#basic-model-usage)
- [ ] [Data serialization](https://docs.pydantic.dev/concepts/serialization/) - `.model_dump()` and `.model_dump_json()`
- [ ] [JSON Schema](https://docs.pydantic.dev/concepts/json_schema/)
- [ ] [Dataclasses](https://docs.pydantic.dev/concepts/dataclasses/)
- [ ] [Model Config](https://docs.pydantic.dev/concepts/config/)
- [ ] [Field Types](https://docs.pydantic.dev/api/types/) - adding or changing a particular data type
- [ ] [Function validation decorator](https://docs.pydantic.dev/concepts/validation_decorator/)
- [X] [Generic Models](https://docs.pydantic.dev/concepts/models/#generic-models)
- [ ] [Other Model behaviour](https://docs.pydantic.dev/concepts/models/) - `model_construct()`, pickling, private attributes, ORM mode
- [ ] [Plugins](https://docs.pydantic.dev/) and integration with other tools - mypy, FastAPI, python-devtools, Hypothesis, VS Code, PyCharm, etc.
| @rmorshea,
Thanks for the feature request. I've added the label `change` here as well.
At a first glance, your approach sounds good. Feel free to open a PR with the changes, and I can do a more thorough review! We'll want to test this pretty thoroughly before we introduce the change.
I have a similar case - a fix would be most helpful!
```python
from pydantic import BaseModel
from pint import Quantity
from pydantic_pint import PydanticPintQuantity
from typing_extensions import Generic
DimensionlessQuantity = Annotated[Quantity, PydanticPintQuantity("dimensionless", strict=False)]
QuantityType = TypeVar("QuantityType", bound=Quantity, default=DimensionlessQuantity)
class GenericModel(BaseModel, Generic[QuantityType]):
some_field: QuantityType
```
voting up for this feature 👍
I have quite complext structure with many generics, so pydantic supporting default would be most helpful :) | 1,731,001,311,000 | [
"awaiting author revision",
"relnotes-feature"
] | Feature Request | [
"pydantic/_internal/_generate_schema.py:GenerateSchema._unsubstituted_typevar_schema"
] | [] | 1 |
pydantic/pydantic | pydantic__pydantic-10601 | c772b43edb952c5fe54bb28da5124b10d5470caf | diff --git a/pydantic/networks.py b/pydantic/networks.py
index 256f2bf1e4..a8ed1e248d 100644
--- a/pydantic/networks.py
+++ b/pydantic/networks.py
@@ -722,7 +722,7 @@ def _build_pretty_email_regex() -> re.Pattern[str]:
name_chars = r'[\w!#$%&\'*+\-/=?^_`{|}~]'
unquoted_name_group = rf'((?:{name_chars}+\s+)*{name_chars}+)'
quoted_name_group = r'"((?:[^"]|\")+)"'
- email_group = r'<\s*(.+)\s*>'
+ email_group = r'<(.+)>'
return re.compile(rf'\s*(?:{unquoted_name_group}|{quoted_name_group})?\s*{email_group}\s*')
| diff --git a/tests/test_networks.py b/tests/test_networks.py
index f576705473..3ac7204e96 100644
--- a/tests/test_networks.py
+++ b/tests/test_networks.py
@@ -908,6 +908,8 @@ class Model(BaseModel):
('FOO bar <[email protected]> ', 'FOO bar', '[email protected]'),
(' Whatever <[email protected]>', 'Whatever', '[email protected]'),
('Whatever < [email protected]>', 'Whatever', '[email protected]'),
+ ('Whatever <[email protected] >', 'Whatever', '[email protected]'),
+ ('Whatever < [email protected] >', 'Whatever', '[email protected]'),
('<[email protected]> ', 'FOOBAR', '[email protected]'),
('ñoñó@example.com', 'ñoñó', 'ñoñó@example.com'),
('我買@example.com', '我買', '我買@example.com'),
| Email parsing slowdown on edgecases
### Initial Checks
- [X] I confirm that I'm using Pydantic V2
### Description
I found that current pydantic-level regexp is not optimal and may cause unexpected slowdowns on some edgecases. Email strings like `< ` (+many spaces).
POC below shows it. You can run it with command like `/usr/bin/time python pydantic-poc.py 500`.
The third value in a line should be near constant which shows that approximate complexity of one operation is ~ O(N^3)
I believe it is not a security issue because it runs fast enough even for largest possible sequences however.
### Example Code
```Python
import sys
from pydantic import BaseModel, EmailStr, ValidationError
import time
class TestModel(BaseModel):
email: EmailStr
def main(max_ind: int):
for ind in range(1, max_ind):
begin_at = time.time()
try:
TestModel.parse_obj({"email": "<" + " " * ind})
except ValidationError:
pass
delta = time.time() - begin_at
print(
f"{ind} takes: {delta:0.6f}, delta**1/3 / ind is {delta**(1/3) / ind:0.6f}"
)
return
if __name__ == "__main__":
main(int(sys.argv[1]))
```
### Python, Pydantic & OS Version
```Text
pydantic version: 2.9.2
pydantic-core version: 2.23.4
pydantic-core build: profile=release pgo=false
install path: /home/alex/Projects/re-conference/venv/lib/python3.12/site-packages/pydantic
python version: 3.12.3 (main, Sep 11 2024, 14:17:37) [GCC 13.2.0]
platform: Linux-6.8.0-45-generic-x86_64-with-glibc2.39
related packages: typing_extensions-4.12.2
commit: unknown
```
| 1,728,593,695,000 | [
"ready for review",
"relnotes-performance",
"relnotes-fix"
] | Performance Issue | [
"pydantic/networks.py:_build_pretty_email_regex"
] | [] | 1 |
|
pydantic/pydantic | pydantic__pydantic-10374 | 204e109691c69583e656c6e16a62ad79da2f59b9 | diff --git a/pydantic/validate_call_decorator.py b/pydantic/validate_call_decorator.py
index fef84af912..509a7463c5 100644
--- a/pydantic/validate_call_decorator.py
+++ b/pydantic/validate_call_decorator.py
@@ -3,6 +3,7 @@
from __future__ import annotations as _annotations
import functools
+import inspect
from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload
from ._internal import _typing_extra, _validate_call
@@ -55,12 +56,18 @@ def validate(function: AnyCallableT) -> AnyCallableT:
validate_call_wrapper = _validate_call.ValidateCallWrapper(function, config, validate_return, local_ns)
- @functools.wraps(function)
- def wrapper_function(*args, **kwargs):
- return validate_call_wrapper(*args, **kwargs)
+ if inspect.iscoroutinefunction(function):
- wrapper_function.raw_function = function # type: ignore
+ @functools.wraps(function)
+ async def wrapper_function(*args, **kwargs): # type: ignore
+ return await validate_call_wrapper(*args, **kwargs)
+ else:
+
+ @functools.wraps(function)
+ def wrapper_function(*args, **kwargs):
+ return validate_call_wrapper(*args, **kwargs)
+ wrapper_function.raw_function = function # type: ignore
return wrapper_function # type: ignore
if func:
| diff --git a/tests/test_validate_call.py b/tests/test_validate_call.py
index 12547f5a4f..e0a10504c9 100644
--- a/tests/test_validate_call.py
+++ b/tests/test_validate_call.py
@@ -292,6 +292,9 @@ async def run():
v = await foo(1, 2)
assert v == 'a=1 b=2'
+ # insert_assert(inspect.iscoroutinefunction(foo) is True)
+ assert inspect.iscoroutinefunction(foo) is True
+
asyncio.run(run())
with pytest.raises(ValidationError) as exc_info:
asyncio.run(foo('x'))
| Support async functions with `@validate_call` decorator
### Initial Checks
- [X] I confirm that I'm using Pydantic V2
### Description
I'm writing a project using `asgiref`.
When I use `async_to_sync`, I got a error ` UserWarning: async_to_sync was passed a non-async-marked callable`.
I think it's due to `@validate_call` decorator because it's not appear if i delete `@validate_call`.
For example, here is a simple code. The correct output is `True` but actual output is `False`. How can we fix this bug?
### Example Code
```Python
import asyncio
import inspect
from pydantic import validate_call
@validate_call(validate_return=True)
async def delay_number(n: int) -> int:
await asyncio.sleep(1)
return n
if __name__ == "__main__":
print(inspect.iscoroutinefunction(delay_number))
```
### Python, Pydantic & OS Version
```Text
pydantic version: 2.9.1
pydantic-core version: 2.23.3
pydantic-core build: profile=release pgo=false
install path: C:\Users\movis\miniconda3\envs\volans_env\Lib\site-packages\pydantic
python version: 3.11.9 | packaged by Anaconda, Inc. | (main, Apr 19 2024, 16:40:41) [MSC v.1916 64 bit (AMD64)]
platform: Windows-10-10.0.22631-SP0
related packages: fastapi-0.112.4 typing_extensions-4.12.2
commit: unknown
```
| Maybe `pydantic._internal._validate_call.ValidateCallWrapper` need some changes to adapt async function(coroutine)?
Yeah. I don't think it's likely that we'll support async functions in the short term, but we can leave this open as a potential feature.
> Yeah. I don't think it's likely that we'll support async functions in the short term, but we can leave this open as a potential feature.
Thanks. In fact, the async function in this example works just fine in asynchronous way. This is also mentioned in the documentation: https://docs.pydantic.dev/2.9/concepts/validation_decorator/#async-functions
It looks like the cpython interpreter knows that is a coroutine, but `inspect.iscoroutinefunction` doesn't! So I think it's just a problem of recognizing, I tried to override the decorator, and get correct answer `True` from `inspect.iscoroutinefunction`.
Raw Decorator(pydantic.validate_call_decorator.py):
```python
from __future__ import annotations as _annotations
import functools
from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload
from ._internal import _typing_extra, _validate_call
__all__ = ('validate_call',)
if TYPE_CHECKING:
from .config import ConfigDict
AnyCallableT = TypeVar('AnyCallableT', bound=Callable[..., Any])
def validate_call(
func: AnyCallableT | None = None,
/,
*,
config: ConfigDict | None = None,
validate_return: bool = False,
) -> AnyCallableT | Callable[[AnyCallableT], AnyCallableT]:
local_ns = _typing_extra.parent_frame_namespace()
def validate(function: AnyCallableT) -> AnyCallableT:
if isinstance(function, (classmethod, staticmethod)):
name = type(function).__name__
raise TypeError(f'The `@{name}` decorator should be applied after `@validate_call` (put `@{name}` on top)')
validate_call_wrapper = _validate_call.ValidateCallWrapper(function, config, validate_return, local_ns)
@functools.wraps(function)
def wrapper_function(*args, **kwargs):
return validate_call_wrapper(*args, **kwargs)
wrapper_function.raw_function = function # type: ignore
return wrapper_function # type: ignore
if func:
return validate(func)
else:
return validate
```
New Decorator(add judgment on async functions in decorator):
```python
from __future__ import annotations as _annotations
import functools
import inspect
from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload
from pydantic._internal import _typing_extra, _validate_call
__all__ = ("validate_call",)
if TYPE_CHECKING:
from pydantic.config import ConfigDict
AnyCallableT = TypeVar("AnyCallableT", bound=Callable[..., Any])
def validate_call(
func: AnyCallableT | None = None,
/,
*,
config: ConfigDict | None = None,
validate_return: bool = False,
) -> AnyCallableT | Callable[[AnyCallableT], AnyCallableT]:
local_ns = _typing_extra.parent_frame_namespace()
def validate(function: AnyCallableT) -> AnyCallableT:
if isinstance(function, (classmethod, staticmethod)):
name = type(function).__name__
raise TypeError(
f"The `@{name}` decorator should be applied after `@validate_call` (put `@{name}` on top)"
)
validate_call_wrapper = _validate_call.ValidateCallWrapper(
function, config, validate_return, local_ns
)
@functools.wraps(function)
def wrapper_function(*args, **kwargs):
return validate_call_wrapper(*args, **kwargs)
@functools.wraps(function)
async def async_wrapper_function(*args, **kwargs):
return validate_call_wrapper(*args, **kwargs)
wrapper_function.raw_function = function # type: ignore
if inspect.iscoroutinefunction(function):
return async_wrapper_function
else:
return wrapper_function # type: ignore
if func:
return validate(func)
else:
return validate
```
So do you think it's right?(Thanks for your reply)
By the way, it's related to https://github.com/python/cpython/issues/100317
Huh, I forgot that we offered support! That's great, thanks for the ref!
I'd say we probably want to have the functions defined within this conditional:
```
if inspect.iscoroutinefunction(function):
return async_wrapper_function
else:
return wrapper_function # type: ignore
```
Other than that, looks good as a first pass to me, feel free to open a PR and I can review! | 1,726,001,590,000 | [
"awaiting author revision",
"relnotes-fix"
] | Feature Request | [
"pydantic/validate_call_decorator.py:validate_call"
] | [] | 1 |
pydantic/pydantic | pydantic__pydantic-10210 | 74c81a8c0d8018e4b436c547c74df01f5c3155ee | diff --git a/pydantic/_internal/_known_annotated_metadata.py b/pydantic/_internal/_known_annotated_metadata.py
index 0f1274090f..efdb8924c7 100644
--- a/pydantic/_internal/_known_annotated_metadata.py
+++ b/pydantic/_internal/_known_annotated_metadata.py
@@ -299,17 +299,25 @@ def _apply_constraint_with_incompatibility_info(
partial(get_constraint_validator(constraint), {constraint: getattr(annotation, constraint)}), schema
)
continue
- elif isinstance(annotation, at.Predicate):
- predicate_name = f'{annotation.func.__qualname__} ' if hasattr(annotation.func, '__qualname__') else ''
+ elif isinstance(annotation, (at.Predicate, at.Not)):
+ predicate_name = f'{annotation.func.__qualname__}' if hasattr(annotation.func, '__qualname__') else ''
def val_func(v: Any) -> Any:
+ predicate_satisfied = annotation.func(v) # noqa: B023
+
# annotation.func may also raise an exception, let it pass through
- if not annotation.func(v): # noqa: B023
- raise PydanticCustomError(
- 'predicate_failed',
- f'Predicate {predicate_name}failed', # type: ignore # noqa: B023
- )
- return v
+ if isinstance(annotation, at.Predicate): # noqa: B023
+ if not predicate_satisfied:
+ raise PydanticCustomError(
+ 'predicate_failed',
+ f'Predicate {predicate_name} failed', # type: ignore # noqa: B023
+ )
+ else:
+ if predicate_satisfied:
+ raise PydanticCustomError(
+ 'not_operation_failed',
+ f'Not of {predicate_name} failed', # type: ignore # noqa: B023
+ )
schema = cs.no_info_after_validator_function(val_func, schema)
else:
| diff --git a/tests/test_annotated.py b/tests/test_annotated.py
index fec98002a8..e0d3da7996 100644
--- a/tests/test_annotated.py
+++ b/tests/test_annotated.py
@@ -5,7 +5,7 @@
import pytest
import pytz
-from annotated_types import BaseMetadata, GroupedMetadata, Gt, Lt, Predicate
+from annotated_types import BaseMetadata, GroupedMetadata, Gt, Lt, Not, Predicate
from pydantic_core import CoreSchema, PydanticUndefined, core_schema
from typing_extensions import Annotated
@@ -402,6 +402,23 @@ def test_predicate_error_python() -> None:
]
+def test_not_operation_error_python() -> None:
+ ta = TypeAdapter(Annotated[int, Not(lambda x: x > 5)])
+
+ with pytest.raises(ValidationError) as exc_info:
+ ta.validate_python(6)
+
+ # insert_assert(exc_info.value.errors(include_url=False))
+ assert exc_info.value.errors(include_url=False) == [
+ {
+ 'type': 'not_operation_failed',
+ 'loc': (),
+ 'msg': 'Not of test_not_operation_error_python.<locals>.<lambda> failed',
+ 'input': 6,
+ }
+ ]
+
+
def test_annotated_field_info_not_lost_from_forwardref():
from pydantic import BaseModel
diff --git a/tests/test_types.py b/tests/test_types.py
index 47a3adc78f..88361f3399 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -6275,6 +6275,7 @@ class Model(BaseModel):
min_length: Annotated[CustomType, annotated_types.MinLen(1)]
max_length: Annotated[CustomType, annotated_types.MaxLen(1)]
predicate: Annotated[CustomType, annotated_types.Predicate(lambda x: x > 0)]
+ not_multiple_of_3: Annotated[CustomType, annotated_types.Not(lambda x: x % 3 == 0)]
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -6287,6 +6288,7 @@ class Model(BaseModel):
max_length=CustomType([1]),
multiple_of=CustomType(4),
predicate=CustomType(1),
+ not_multiple_of_3=CustomType(4),
)
with pytest.raises(ValidationError) as exc_info:
@@ -6299,6 +6301,7 @@ class Model(BaseModel):
max_length=CustomType([1, 2, 3]),
multiple_of=CustomType(3),
predicate=CustomType(-1),
+ not_multiple_of_3=CustomType(6),
)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
@@ -6357,6 +6360,12 @@ class Model(BaseModel):
'msg': 'Predicate test_constraints_arbitrary_type.<locals>.Model.<lambda> failed',
'input': CustomType(-1),
},
+ {
+ 'type': 'not_operation_failed',
+ 'loc': ('not_multiple_of_3',),
+ 'msg': 'Not of test_constraints_arbitrary_type.<locals>.Model.<lambda> failed',
+ 'input': CustomType(6),
+ },
]
| Support for `annotated_types.Not`
### Initial Checks
- [X] I have searched Google & GitHub for similar requests and couldn't find anything
- [X] I have read and followed [the docs](https://docs.pydantic.dev) and still think this feature is missing
### Description
I need to use a `PositiveOddInt` type. Such a type can be generalized to `PositiveInt` and `NotMultipleOf(2)`.
json-schema for `NotMultipleOf(2)` would look like `"not": {"multipleOf": 2}`
Adding support for `annotated_types.Not` would make possible something like this:
```python
class PositiveOddIntRootModel(pydantic.RootModel):
root: Annotated[int, annotated_types.Not(annotated_types.MultipleOf(2)), annotated_types.Ge(0)]
```
I have a separate implementation of NotMultipleOf and PositiveOddInt [here](https://github.com/h2020charisma/ramanchada2/blob/main/src/ramanchada2/misc/types/positive_not_multiple.py) but it uses pydantic1. Now as i am trying to migrate to pydantic2, [here](https://docs.pydantic.dev/latest/concepts/types/#customizing_validation_with_get_pydantic_core_schema) i have seen that future changes are possible and that annotated-types and native types are preferred. Would it be possible to add support for `annotated_types.Not` or could you suggest an alternative?
### Affected Components
- [ ] [Compatibility between releases](https://docs.pydantic.dev/changelog/)
- [X] [Data validation/parsing](https://docs.pydantic.dev/concepts/models/#basic-model-usage)
- [X] [Data serialization](https://docs.pydantic.dev/concepts/serialization/) - `.model_dump()` and `.model_dump_json()`
- [X] [JSON Schema](https://docs.pydantic.dev/concepts/json_schema/)
- [ ] [Dataclasses](https://docs.pydantic.dev/concepts/dataclasses/)
- [ ] [Model Config](https://docs.pydantic.dev/concepts/config/)
- [X] [Field Types](https://docs.pydantic.dev/api/types/) - adding or changing a particular data type
- [ ] [Function validation decorator](https://docs.pydantic.dev/concepts/validation_decorator/)
- [ ] [Generic Models](https://docs.pydantic.dev/concepts/models/#generic-models)
- [ ] [Other Model behaviour](https://docs.pydantic.dev/concepts/models/) - `model_construct()`, pickling, private attributes, ORM mode
- [ ] [Plugins](https://docs.pydantic.dev/) and integration with other tools - mypy, FastAPI, python-devtools, Hypothesis, VS Code, PyCharm, etc.
| We'd welcome support for this. PRs welcome!
Hi @sydney-runkle, Could you please assign this to me ?
Hi @georgievgeorgi @sydney-runkle
Can we use something like this ?
```
from functools import partial
from typing_extensions import Annotated
import annotated_types
from pydantic import AfterValidator, TypeAdapter, WithJsonSchema
def validate_not_multiple_of(x: int, y: int) -> int:
if x % y == 0:
raise ValueError(f"value must not be multiple of {y}")
return x
PositiveOddInt = Annotated[
int,
annotated_types.Gt(0),
AfterValidator(partial(validate_not_multiple_of, y=2)),
WithJsonSchema({"type": "positive_odd_int"}),
]
ta = TypeAdapter(PositiveOddInt)
assert ta.validate_python(3) == 3
assert ta.dump_json(3) == b"3"
assert ta.json_schema() == {"type": "positive_odd_int"}
```
If I am not mistaken, supporting `annotated_types.Not` would require changes in `pydantic-core`. The implementation would differ from other annotated types, such as `annotated_types.Le`, as `annotated_types.Not` takes a `Callable` as input. I would like to discuss the solution first before proceeding with the implementation, as it might require significant changes.
@sydney-runkle What do you think ?
@aditkumar72, good question. I think we can handle this similarly to `annotated_types.Predicate`:
See this as a reference: https://github.com/pydantic/pydantic/blob/8cd7557df17829eb0d0f7eb2fbfd0793364fd8a9/pydantic/_internal/_known_annotated_metadata.py#L302-L314 | 1,724,337,737,000 | [
"awaiting author revision",
"relnotes-feature"
] | Feature Request | [
"pydantic/_internal/_known_annotated_metadata.py:apply_known_metadata"
] | [] | 1 |
pydantic/pydantic | pydantic__pydantic-9478 | 777cff056954e083299ea1ceaf241b3c6324c19b | diff --git a/pydantic/_internal/_decorators.py b/pydantic/_internal/_decorators.py
index 78414cbe33..65ea8380de 100644
--- a/pydantic/_internal/_decorators.py
+++ b/pydantic/_internal/_decorators.py
@@ -514,12 +514,11 @@ def inspect_validator(validator: Callable[..., Any], mode: FieldValidatorModes)
"""
try:
sig = signature(validator)
- except ValueError:
- # builtins and some C extensions don't have signatures
- # assume that they don't take an info argument and only take a single argument
- # e.g. `str.strip` or `datetime.datetime`
+ except (ValueError, TypeError):
+ # `inspect.signature` might not be able to infer a signature, e.g. with C objects.
+ # In this case, we assume no info argument is present:
return False
- n_positional = count_positional_params(sig)
+ n_positional = count_positional_required_params(sig)
if mode == 'wrap':
if n_positional == 3:
return True
@@ -555,12 +554,17 @@ def inspect_field_serializer(
Returns:
Tuple of (is_field_serializer, info_arg).
"""
- sig = signature(serializer)
+ try:
+ sig = signature(serializer)
+ except (ValueError, TypeError):
+ # `inspect.signature` might not be able to infer a signature, e.g. with C objects.
+ # In this case, we assume no info argument is present and this is not a method:
+ return (False, False)
first = next(iter(sig.parameters.values()), None)
is_field_serializer = first is not None and first.name == 'self'
- n_positional = count_positional_params(sig)
+ n_positional = count_positional_required_params(sig)
if is_field_serializer:
# -1 to correct for self parameter
info_arg = _serializer_info_arg(mode, n_positional - 1)
@@ -599,7 +603,7 @@ def inspect_annotated_serializer(serializer: Callable[..., Any], mode: Literal['
# `inspect.signature` might not be able to infer a signature, e.g. with C objects.
# In this case, we assume no info argument is present:
return False
- info_arg = _serializer_info_arg(mode, count_positional_params(sig))
+ info_arg = _serializer_info_arg(mode, count_positional_required_params(sig))
if info_arg is None:
raise PydanticUserError(
f'Unrecognized field_serializer function signature for {serializer} with `mode={mode}`:{sig}',
@@ -627,7 +631,7 @@ def inspect_model_serializer(serializer: Callable[..., Any], mode: Literal['plai
)
sig = signature(serializer)
- info_arg = _serializer_info_arg(mode, count_positional_params(sig))
+ info_arg = _serializer_info_arg(mode, count_positional_required_params(sig))
if info_arg is None:
raise PydanticUserError(
f'Unrecognized model_serializer function signature for {serializer} with `mode={mode}`:{sig}',
@@ -773,8 +777,26 @@ def get_function_return_type(
return explicit_return_type
-def count_positional_params(sig: Signature) -> int:
- return sum(1 for param in sig.parameters.values() if can_be_positional(param))
+def count_positional_required_params(sig: Signature) -> int:
+ """Get the number of positional (required) arguments of a signature.
+
+ This function should only be used to inspect signatures of validation and serialization functions.
+ The first argument (the value being serialized or validated) is counted as a required argument
+ even if a default value exists.
+
+ Returns:
+ The number of positional arguments of a signature.
+ """
+ parameters = list(sig.parameters.values())
+ return sum(
+ 1
+ for param in parameters
+ if can_be_positional(param)
+ # First argument is the value being validated/serialized, and can have a default value
+ # (e.g. `float`, which has signature `(x=0, /)`). We assume other parameters (the info arg
+ # for instance) should be required, and thus without any default value.
+ and (param.default is Parameter.empty or param == parameters[0])
+ )
def can_be_positional(param: Parameter) -> bool:
| diff --git a/tests/test_decorators.py b/tests/test_decorators.py
index abc588362b..b28cf159b7 100644
--- a/tests/test_decorators.py
+++ b/tests/test_decorators.py
@@ -1,11 +1,49 @@
-import platform
-
import pytest
from pydantic import PydanticUserError
from pydantic._internal._decorators import inspect_annotated_serializer, inspect_validator
+def _two_pos_required_args(a, b):
+ pass
+
+
+def _two_pos_required_args_extra_optional(a, b, c=1, d=2, *, e=3):
+ pass
+
+
+def _three_pos_required_args(a, b, c):
+ pass
+
+
+def _one_pos_required_arg_one_optional(a, b=1):
+ pass
+
+
[email protected](
+ [
+ 'obj',
+ 'mode',
+ 'expected',
+ ],
+ [
+ (str, 'plain', False),
+ (float, 'plain', False),
+ (int, 'plain', False),
+ (lambda a: str(a), 'plain', False),
+ (lambda a='': str(a), 'plain', False),
+ (_two_pos_required_args, 'plain', True),
+ (_two_pos_required_args, 'wrap', False),
+ (_two_pos_required_args_extra_optional, 'plain', True),
+ (_two_pos_required_args_extra_optional, 'wrap', False),
+ (_three_pos_required_args, 'wrap', True),
+ (_one_pos_required_arg_one_optional, 'plain', False),
+ ],
+)
+def test_inspect_validator(obj, mode, expected):
+ assert inspect_validator(obj, mode=mode) == expected
+
+
def test_inspect_validator_error_wrap():
def validator1(arg1):
pass
@@ -43,18 +81,6 @@ def validator3(arg1, arg2, arg3):
assert e.value.code == 'validator-signature'
-def _accepts_info_arg_plain(a, b):
- pass
-
-
-def _accepts_info_arg_wrap(a, b, c):
- pass
-
-
[email protected](
- platform.python_implementation() == 'PyPy',
- reason='`inspect.signature works differently on PyPy`.',
-)
@pytest.mark.parametrize(
[
'obj',
@@ -66,9 +92,13 @@ def _accepts_info_arg_wrap(a, b, c):
(float, 'plain', False),
(int, 'plain', False),
(lambda a: str(a), 'plain', False),
- (_accepts_info_arg_plain, 'plain', True),
- (_accepts_info_arg_plain, 'wrap', False),
- (_accepts_info_arg_wrap, 'wrap', True),
+ (lambda a='': str(a), 'plain', False),
+ (_two_pos_required_args, 'plain', True),
+ (_two_pos_required_args, 'wrap', False),
+ (_two_pos_required_args_extra_optional, 'plain', True),
+ (_two_pos_required_args_extra_optional, 'wrap', False),
+ (_three_pos_required_args, 'wrap', True),
+ (_one_pos_required_arg_one_optional, 'plain', False),
],
)
def test_inspect_annotated_serializer(obj, mode, expected):
diff --git a/tests/test_serialize.py b/tests/test_serialize.py
index c28debcbb6..d1ce9818fe 100644
--- a/tests/test_serialize.py
+++ b/tests/test_serialize.py
@@ -225,6 +225,10 @@ def test_serialize_valid_signatures():
def ser_plain(v: Any, info: SerializationInfo) -> Any:
return f'{v:,}'
+ def ser_plain_no_info(v: Any, unrelated_arg: int = 1, other_unrelated_arg: int = 2) -> Any:
+ # Arguments with default values are not treated as info arg.
+ return f'{v:,}'
+
def ser_wrap(v: Any, nxt: SerializerFunctionWrapHandler, info: SerializationInfo) -> Any:
return f'{nxt(v):,}'
@@ -233,6 +237,7 @@ class MyModel(BaseModel):
f2: int
f3: int
f4: int
+ f5: int
@field_serializer('f1')
def ser_f1(self, v: Any, info: FieldSerializationInfo) -> Any:
@@ -249,7 +254,8 @@ def ser_f2(self, v: Any, nxt: SerializerFunctionWrapHandler, info: FieldSerializ
return f'{nxt(v):,}'
ser_f3 = field_serializer('f3')(ser_plain)
- ser_f4 = field_serializer('f4', mode='wrap')(ser_wrap)
+ ser_f4 = field_serializer('f4')(ser_plain_no_info)
+ ser_f5 = field_serializer('f5', mode='wrap')(ser_wrap)
m = MyModel(**{f'f{x}': x * 1_000 for x in range(1, 9)})
@@ -258,8 +264,9 @@ def ser_f2(self, v: Any, nxt: SerializerFunctionWrapHandler, info: FieldSerializ
'f2': '2,000',
'f3': '3,000',
'f4': '4,000',
+ 'f5': '5,000',
}
- assert m.model_dump_json() == '{"f1":"1,000","f2":"2,000","f3":"3,000","f4":"4,000"}'
+ assert m.model_dump_json() == '{"f1":"1,000","f2":"2,000","f3":"3,000","f4":"4,000","f5":"5,000"}'
def test_invalid_signature_no_params() -> None:
@@ -906,7 +913,7 @@ class Model(TypedDict):
y: float
@model_serializer(mode='wrap')
- def _serialize(self, handler, info: Optional[SerializationInfo] = None):
+ def _serialize(self, handler, info: SerializationInfo):
data = handler(self)
if info.context and info.context.get('mode') == 'x-only':
data.pop('y')
diff --git a/tests/test_validators.py b/tests/test_validators.py
index 52a8bda88f..d2518cb77b 100644
--- a/tests/test_validators.py
+++ b/tests/test_validators.py
@@ -1,6 +1,5 @@
import contextlib
import re
-import sys
from collections import deque
from dataclasses import dataclass
from datetime import date, datetime
@@ -64,7 +63,6 @@ class Model(BaseModel):
assert Model(x='1.0').x == 1.0
[email protected](sys.version_info >= (3, 9) and sys.implementation.name == 'pypy', reason='PyPy 3.9+ bug')
def test_annotated_validator_builtin() -> None:
"""https://github.com/pydantic/pydantic/issues/6752"""
TruncatedFloat = Annotated[float, BeforeValidator(int)]
@@ -2886,3 +2884,17 @@ class UnsupportedClass:
model = type_adapter.validate_python('abcdefg')
assert isinstance(model, UnsupportedClass)
assert isinstance(type_adapter.dump_python(model), UnsupportedClass)
+
+
+def test_validator_with_default_values() -> None:
+ def validate_x(v: int, unrelated_arg: int = 1, other_unrelated_arg: int = 2) -> int:
+ assert v != -1
+ return v
+
+ class Model(BaseModel):
+ x: int
+
+ val_x = field_validator('x')(validate_x)
+
+ with pytest.raises(ValidationError):
+ Model(x=-1)
| Support more callables in `PlainSerializer`
### Initial Checks
- [X] I have searched Google & GitHub for similar requests and couldn't find anything
- [X] I have read and followed [the docs](https://docs.pydantic.dev) and still think this feature is missing
### Description
As [noticed](https://github.com/pydantic/pydantic/pull/9450#issuecomment-2120916182) in https://github.com/pydantic/pydantic/pull/9450, the callable passed into `PlainSerializer`/`WrapSerializer` can optionally take a `serializer` and `info` argument. However, callables with arguments with default values will not be accepted, even though it could work fine:
```python
from typing import Annotated
from pydantic import BaseModel, PlainSerializer
def my_callable(value, unrelated_arg = None, other_unrelated_arg = 1, even_more = "a"): ...
class Model(BaseModel):
foo: Annotated[int, PlainSerializer(my_callable)]
```
This currently fails as `my_callable` has `4` positional arguments, and Pydantic expects at most 3 (for the wrap serializer). One real use case is with `str`, having the signature `<Signature (object=<no value>, encoding=<no value>, errors=<no value>)>`.
I think it is reasonable to assume that any argument with default values should not be counted by `count_positional_params`. After all, if you need the `info` argument there's no reason to have a default value. I'll try to come up with a solution which will hopefully not break existing use cases.
### Affected Components
- [ ] [Compatibility between releases](https://docs.pydantic.dev/changelog/)
- [ ] [Data validation/parsing](https://docs.pydantic.dev/concepts/models/#basic-model-usage)
- [ ] [Data serialization](https://docs.pydantic.dev/concepts/serialization/) - `.model_dump()` and `.model_dump_json()`
- [ ] [JSON Schema](https://docs.pydantic.dev/concepts/json_schema/)
- [ ] [Dataclasses](https://docs.pydantic.dev/concepts/dataclasses/)
- [ ] [Model Config](https://docs.pydantic.dev/concepts/config/)
- [ ] [Field Types](https://docs.pydantic.dev/api/types/) - adding or changing a particular data type
- [ ] [Function validation decorator](https://docs.pydantic.dev/concepts/validation_decorator/)
- [ ] [Generic Models](https://docs.pydantic.dev/concepts/models/#generic-models)
- [X] [Other Model behaviour](https://docs.pydantic.dev/concepts/models/) - `model_construct()`, pickling, private attributes, ORM mode
- [ ] [Plugins](https://docs.pydantic.dev/) and integration with other tools - mypy, FastAPI, python-devtools, Hypothesis, VS Code, PyCharm, etc.
| 1,716,322,604,000 | [
"ready for review",
"relnotes-fix"
] | Feature Request | [
"pydantic/_internal/_decorators.py:inspect_validator",
"pydantic/_internal/_decorators.py:inspect_field_serializer",
"pydantic/_internal/_decorators.py:inspect_annotated_serializer",
"pydantic/_internal/_decorators.py:inspect_model_serializer",
"pydantic/_internal/_decorators.py:count_positional_params"
] | [
"pydantic/_internal/_decorators.py:count_positional_required_params"
] | 5 |
|
pydantic/pydantic | pydantic__pydantic-8706 | 8898b8fee91c5a9dd99f4eb0c05f6950c2f684e0 | diff --git a/pydantic/types.py b/pydantic/types.py
index c2534c8815..19ec5f7edb 100644
--- a/pydantic/types.py
+++ b/pydantic/types.py
@@ -1740,6 +1740,8 @@ class MyModel(BaseModel):
#> 44.4PiB
print(m.size.human_readable(decimal=True))
#> 50.0PB
+ print(m.size.human_readable(separator=' '))
+ #> 44.4 PiB
print(m.size.to('TiB'))
#> 45474.73508864641
@@ -1818,12 +1820,13 @@ def _validate(cls, __input_value: Any, _: core_schema.ValidationInfo) -> ByteSiz
return cls(int(float(scalar) * unit_mult))
- def human_readable(self, decimal: bool = False) -> str:
+ def human_readable(self, decimal: bool = False, separator: str = '') -> str:
"""Converts a byte size to a human readable string.
Args:
decimal: If True, use decimal units (e.g. 1000 bytes per KB). If False, use binary units
(e.g. 1024 bytes per KiB).
+ separator: A string used to split the value and unit. Defaults to an empty string ('').
Returns:
A human readable string representation of the byte size.
@@ -1841,12 +1844,12 @@ def human_readable(self, decimal: bool = False) -> str:
for unit in units:
if abs(num) < divisor:
if unit == 'B':
- return f'{num:0.0f}{unit}'
+ return f'{num:0.0f}{separator}{unit}'
else:
- return f'{num:0.1f}{unit}'
+ return f'{num:0.1f}{separator}{unit}'
num /= divisor
- return f'{num:0.1f}{final_unit}'
+ return f'{num:0.1f}{separator}{final_unit}'
def to(self, unit: str) -> float:
"""Converts a byte size to another unit, including both byte and bit units.
| diff --git a/tests/test_types.py b/tests/test_types.py
index 63f0d549f4..849a99a9b9 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -4436,23 +4436,23 @@ class FrozenSetModel(BaseModel):
@pytest.mark.parametrize(
- 'input_value,output,human_bin,human_dec',
+ 'input_value,output,human_bin,human_dec,human_sep',
(
- (1, 1, '1B', '1B'),
- ('1', 1, '1B', '1B'),
- ('1.0', 1, '1B', '1B'),
- ('1b', 1, '1B', '1B'),
- ('1.5 KB', int(1.5e3), '1.5KiB', '1.5KB'),
- ('1.5 K', int(1.5e3), '1.5KiB', '1.5KB'),
- ('1.5 MB', int(1.5e6), '1.4MiB', '1.5MB'),
- ('1.5 M', int(1.5e6), '1.4MiB', '1.5MB'),
- ('5.1kib', 5222, '5.1KiB', '5.2KB'),
- ('6.2EiB', 7148113328562451456, '6.2EiB', '7.1EB'),
- ('8bit', 1, '1B', '1B'),
- ('1kbit', 125, '125B', '125B'),
+ (1, 1, '1B', '1B', '1 B'),
+ ('1', 1, '1B', '1B', '1 B'),
+ ('1.0', 1, '1B', '1B', '1 B'),
+ ('1b', 1, '1B', '1B', '1 B'),
+ ('1.5 KB', int(1.5e3), '1.5KiB', '1.5KB', '1.5 KiB'),
+ ('1.5 K', int(1.5e3), '1.5KiB', '1.5KB', '1.5 KiB'),
+ ('1.5 MB', int(1.5e6), '1.4MiB', '1.5MB', '1.4 MiB'),
+ ('1.5 M', int(1.5e6), '1.4MiB', '1.5MB', '1.4 MiB'),
+ ('5.1kib', 5222, '5.1KiB', '5.2KB', '5.1 KiB'),
+ ('6.2EiB', 7148113328562451456, '6.2EiB', '7.1EB', '6.2 EiB'),
+ ('8bit', 1, '1B', '1B', '1 B'),
+ ('1kbit', 125, '125B', '125B', '125 B'),
),
)
-def test_bytesize_conversions(input_value, output, human_bin, human_dec):
+def test_bytesize_conversions(input_value, output, human_bin, human_dec, human_sep):
class Model(BaseModel):
size: ByteSize
@@ -4462,6 +4462,7 @@ class Model(BaseModel):
assert m.size.human_readable() == human_bin
assert m.size.human_readable(decimal=True) == human_dec
+ assert m.size.human_readable(separator=' ') == human_sep
def test_bytesize_to():
| `ByteSize.human_readable` with an optional separator (e.g. whitespace) between value and unit
### Initial Checks
- [X] I have searched Google & GitHub for similar requests and couldn't find anything
- [X] I have read and followed [the docs](https://docs.pydantic.dev) and still think this feature is missing
### Description
Currently, the return value of `ByteSize.human_readable` does not have any separator between the value and unit.
```python
>>> size = ByteSize(12345678)
>>> size.human_readable()
'11.8MiB'
```
It looks a little bit weird to me, and I think it could be better if there's a space separating the value and unit. Though there are many ways to handle this need, it may be a better idea to provide an optional argument with this method? For example:
```python
ByteSize.human_readable(decimal: bool = False, separator: bool = False) -> str
```
When I switch the `separator` on:
```python
>>> size = ByteSize(12345678)
>>> size.human_readable(separator=True)
'11.8 MiB'
```
It's probably easy to be implemented, so I can create a PR to implement this feature if it will be acceptable.
What do you think about it?
### Affected Components
- [ ] [Compatibility between releases](https://docs.pydantic.dev/changelog/)
- [ ] [Data validation/parsing](https://docs.pydantic.dev/concepts/models/#basic-model-usage)
- [ ] [Data serialization](https://docs.pydantic.dev/concepts/serialization/) - `.model_dump()` and `.model_dump_json()`
- [ ] [JSON Schema](https://docs.pydantic.dev/concepts/json_schema/)
- [ ] [Dataclasses](https://docs.pydantic.dev/concepts/dataclasses/)
- [ ] [Model Config](https://docs.pydantic.dev/concepts/config/)
- [X] [Field Types](https://docs.pydantic.dev/api/types/) - adding or changing a particular data type
- [ ] [Function validation decorator](https://docs.pydantic.dev/concepts/validation_decorator/)
- [ ] [Generic Models](https://docs.pydantic.dev/concepts/models/#generic-models)
- [ ] [Other Model behaviour](https://docs.pydantic.dev/concepts/models/) - `model_construct()`, pickling, private attributes, ORM mode
- [ ] [Plugins](https://docs.pydantic.dev/) and integration with other tools - mypy, FastAPI, python-devtools, Hypothesis, VS Code, PyCharm, etc.
| @jks15satoshi,
Go for it! Just makes sure that by default, the separator is `''` so that this isn't a breaking change. Thanks for the suggestion. Ping me when you need a review :). | 1,706,861,212,000 | [
"awaiting author revision",
"relnotes-feature"
] | Feature Request | [
"pydantic/types.py:ByteSize.human_readable"
] | [] | 1 |
facebookresearch/xformers | facebookresearch__xformers-1166 | 6e10bd21ac6fc878657b24684723ccd05e41d385 | diff --git a/xformers/ops/swiglu_op.py b/xformers/ops/swiglu_op.py
index 630335ac6c..bb27e43308 100644
--- a/xformers/ops/swiglu_op.py
+++ b/xformers/ops/swiglu_op.py
@@ -212,11 +212,11 @@ def info(self):
def _eager_functional_swiglu(
x: torch.Tensor,
w1: torch.Tensor,
- b1: torch.Tensor,
+ b1: Optional[torch.Tensor],
w2: torch.Tensor,
- b2: torch.Tensor,
+ b2: Optional[torch.Tensor],
w3: torch.Tensor,
- b3: torch.Tensor,
+ b3: Optional[torch.Tensor],
) -> torch.Tensor:
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
| diff --git a/tests/test_swiglu.py b/tests/test_swiglu.py
index 6600488135..46b380f68a 100644
--- a/tests/test_swiglu.py
+++ b/tests/test_swiglu.py
@@ -7,7 +7,7 @@
import functools
import random
from contextlib import nullcontext
-from typing import ContextManager, Optional, Sequence, cast
+from typing import ContextManager, Optional, Sequence, Union, cast
import pytest
import torch
@@ -95,8 +95,10 @@ def generate_test_shapes():
(4728, 1536, 2736),
# GPT-3 (small)
(2048, 2048, 5632),
+ # TODO: Not enough memory for this shape in github CI.
+ # restore it after rearrange the code (fw, fw, bw, bw) -> (fw, bw, fw, bw)
# Chinchilla
- (2048, 8192, 22016),
+ # (2048, 8192, 22016),
]
# Add some random shapes
r = random.Random(0)
@@ -113,7 +115,11 @@ def generate_test_shapes():
_dtypes = [torch.float16]
if _is_sm80:
_dtypes += [torch.bfloat16]
-_ops: Sequence[xsw.SwiGLUOp] = [xsw.SwiGLUFusedOp, xsw.SwiGLUPackedFusedOp]
+_ops: Sequence[Union[xsw.SwiGLUOp, None]] = [
+ xsw.SwiGLUFusedOp,
+ xsw.SwiGLUPackedFusedOp,
+ None,
+]
FORWARD_ATOL = {torch.float: 2e-6, torch.half: 1e-2, torch.bfloat16: 1e-2}
FORWARD_RTOL = {torch.float: 1e-5, torch.half: 4e-3, torch.bfloat16: 4e-3}
@@ -125,7 +131,7 @@ def generate_test_shapes():
}
BACKWARD_RTOL = {
torch.float: 2e-3,
- torch.half: 1e-2,
+ torch.half: 3e-2,
torch.bfloat16: 4e-2,
}
@@ -138,7 +144,9 @@ def create_module_cached(**kwargs) -> xsw.SwiGLU:
@disable_tf32
@disable_on_rocm
@pytest.mark.parametrize("autocast", [False, True], ids=["regular", "autocast"])
[email protected]("op", _ops, ids=[x.NAME for x in _ops])
[email protected](
+ "op", _ops, ids=[x.NAME if x is not None else "auto_selected_op" for x in _ops]
+)
@pytest.mark.parametrize("dtype", _dtypes, ids=[str(x) for x in _dtypes])
@pytest.mark.parametrize("device", _devices)
@pytest.mark.parametrize("bias", [False, True], ids=["nobias", "bias"])
@@ -159,7 +167,7 @@ def test_forward_backward(
):
torch.manual_seed(shape[0] * shape[1] * shape[2])
- if not op.supports(
+ if op is not None and not op.supports(
xsw.SwiGLUOpDispatch(
device=device,
dtype=dtype,
@@ -170,6 +178,10 @@ def test_forward_backward(
):
pytest.skip("Not supported by operator")
+ if op is not None:
+ if pack_weights and not op.PACKED_WEIGHTS:
+ pytest.skip("Not supported combination when module.op is set manually")
+
inp_model_dtype = torch.float if autocast else dtype
x = torch.randn(shape[:2], device=device, dtype=inp_model_dtype)
@@ -200,8 +212,10 @@ def test_forward_backward(
torch.autocast("cuda", dtype=dtype) if autocast else nullcontext(),
)
with cm:
- ref = module(x)
- out = xsw.swiglu(x, *module._ordered_params(), op=op)
+ ref = xsw._eager_functional_swiglu(x, *module._ordered_params())
+
+ module.op = op
+ out = module(x)
if ref_f32 is None:
ref_f32 = ref
| Test of SwiGLU is passing when it should fail
# 🐛 Bug
Test is passing when it should fail if you corrupt output of swiglu_packedw.
## To Reproduce
Steps to reproduce the behavior:
In the file `xformers/csrc/swiglu/swiglu_packedw.cpp`
```c++
class SwiGLUPackedWeights
: public torch::autograd::Function<SwiGLUPackedWeights> {
public:
static at::Tensor forward(
torch::autograd::AutogradContext* ctx,
const at::Tensor& x,
const at::Tensor& w1w2,
const std::optional<at::Tensor>& b1b2,
const at::Tensor w3,
const std::optional<at::Tensor>& b3) {
at::AutoDispatchBelowADInplaceOrView g;
auto w1 = w1w2[0];
auto w2 = w1w2[1];
std::optional<at::Tensor> b1, b2;
if (b1b2.has_value()) {
b1 = b1b2.value()[0];
b2 = b1b2.value()[1];
}
at::Tensor x1, x2, x4;
std::tie(x1, x2, x4) = dual_gemm_silu_identity_mul(x, w1, b1, w2, b2);
auto x5 = torch::nn::functional::linear(
x4, w3, b3.has_value() ? b3.value() : at::Tensor());
ctx->save_for_backward({x, w1w2, w3, x1, x2});
ctx->saved_data["has_b1b2"] = b1b2.has_value();
ctx->saved_data["has_b3"] = b3.has_value();
return x5;
}
```
Try to replace `return x5;` to `return x5 * x5;` for example and run the tests. Tests will wrongly pass.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
## Environment
```
Collecting environment information...
PyTorch version: 2.5.1+cu124
Is debug build: False
CUDA used to build PyTorch: 12.4
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: Could not collect
CMake version: version 3.31.0
Libc version: glibc-2.31
Python version: 3.9.5 (default, Nov 23 2021, 15:27:38) [GCC 9.3.0] (64-bit runtime)
Python platform: Linux-5.13.0-35-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 12.6.77
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA A100-SXM4-80GB
GPU 1: NVIDIA A100-SXM4-80GB
GPU 2: NVIDIA A100-SXM4-80GB
GPU 3: NVIDIA A100-SXM4-80GB
Nvidia driver version: 560.28.03
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_precompiled.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_runtime_compiled.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_graph.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_heuristic.so.9.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops.so.9.5.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 43 bits physical, 48 bits virtual
CPU(s): 112
On-line CPU(s) list: 0-111
Thread(s) per core: 2
Core(s) per socket: 56
Socket(s): 1
NUMA node(s): 1
Vendor ID: AuthenticAMD
CPU family: 23
Model: 1
Model name: AMD EPYC Processor
Stepping: 2
CPU MHz: 1999.999
BogoMIPS: 3999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 1.8 MiB
L1i cache: 3.5 MiB
L2 cache: 28 MiB
L3 cache: 112 MiB
NUMA node0 CPU(s): 0-111
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; LFENCE, IBPB conditional, STIBP conditional, RSB filling
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm rep_good nopl cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext ssbd ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 arat umip
Versions of relevant libraries:
[pip3] flake8==6.1.0
[pip3] flake8-copyright==0.2.4
[pip3] mypy==1.10.0
[pip3] mypy-extensions==1.0.0
[pip3] numpy==2.0.2
[pip3] pytorch_sphinx_theme==0.0.24
[pip3] torch==2.5.1
[pip3] torchaudio==2.5.1
[pip3] torchvision==0.20.1
[pip3] triton==3.1.0
[conda] Could not collect
```
- PyTorch Version: 2.5.1+cu124
- OS: Linux
- Python version: 3.9.5
| 1,732,720,286,000 | [
"CLA Signed"
] | Bug Report | [
"xformers/ops/swiglu_op.py:_eager_functional_swiglu"
] | [] | 1 |
|
yt-dlp/yt-dlp | yt-dlp__yt-dlp-11751 | fca3eb5f8be08d5fab2e18b45b7281a12e566725 | diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py
index 83dde7d9c41f..c9b83161871d 100644
--- a/yt_dlp/extractor/youtube.py
+++ b/yt_dlp/extractor/youtube.py
@@ -3119,19 +3119,26 @@ def _genslice(start, end, step):
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
+ # Examples where `sig` is funcname:
+ # sig=function(a){a=a.split(""); ... ;return a.join("")};
+ # ;c&&(c=sig(decodeURIComponent(c)),a.set(b,encodeURIComponent(c)));return a};
+ # {var l=f,m=h.sp,n=sig(decodeURIComponent(h.s));l.set(m,encodeURIComponent(n))}
+ # sig=function(J){J=J.split(""); ... ;return J.join("")};
+ # ;N&&(N=sig(decodeURIComponent(N)),J.set(R,encodeURIComponent(N)));return J};
+ # {var H=u,k=f.sp,v=sig(decodeURIComponent(f.s));H.set(k,encodeURIComponent(v))}
funcname = self._search_regex(
- (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
+ (r'\b(?P<var>[a-zA-Z0-9$]+)&&\((?P=var)=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\((?P=var)\)\)',
+ r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*(?P<arg>[a-zA-Z0-9$]+)\s*\)\s*{\s*(?P=arg)\s*=\s*(?P=arg)\.split\(\s*""\s*\)\s*;\s*[^}]+;\s*return\s+(?P=arg)\.join\(\s*""\s*\)',
+ r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
+ # Old patterns
+ r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
- r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
- r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
- r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
- r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
| diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py
index 0f7ae34f44f5..56db096caa77 100644
--- a/test/test_youtube_signature.py
+++ b/test/test_youtube_signature.py
@@ -68,6 +68,11 @@
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'AOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL2QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
),
+ (
+ 'https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ 'MyOSJXtKI3m-uME_jv7-pT12gOFC02RFkGoqWpzE0Cs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ ),
]
_NSIG_TESTS = [
| [youtube] player `3bb1f723`: Signature extraction failed: Some formats may be missing
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
USA
### Provide a description that is worded well enough to be understood
im using the nightly build with a system config file. that is it.
the config file contents are:
```
$ cat /etc/yt-dlp.conf
--cookies /home/ubuntu/PycharmProjects/cookiesRefresher/cookies.txt
```
the cookie file is refreshed every 24 hours and uses a dummy google account.
this setup used to work less than a week ago but now is broken. i havent changed anything.
today google asked me to verify this acoount. i verfied with a phone number and i was able to play videos on the same machine on a firefox browser.
yt-dlp version and debug info:
```
$ yt-dlp -vU
[debug] Command-line config: ['-vU']
[debug] System config "/etc/yt-dlp.conf": ['--cookies', '/home/ubuntu/PycharmProjects/cookiesRefresher/cookies.txt']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [fca3eb5f8]
[debug] Python 3.12.3 (CPython aarch64 64bit) - Linux-6.8.0-1016-oracle-aarch64-with-glibc2.39 (OpenSSL 3.0.13 30 Jan 2024, glibc 2.39)
[debug] exe versions: ffmpeg 6.1.1 (setts), ffprobe 6.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, mutagen-1.47.0, requests-2.32.3, sqlite3-3.45.1, urllib3-2.2.3, websockets-14.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets
[debug] Loaded 1837 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: [email protected] from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date ([email protected] from yt-dlp/yt-dlp-nightly-builds)
```
when attempting to download a video:
```
yt-dlp https://www.youtube.com/watch?v=yaie5Uia4k8 -vU
[debug] Command-line config: ['https://www.youtube.com/watch?v=yaie5Uia4k8', '-vU']
[debug] System config "/etc/yt-dlp.conf": ['--cookies', '/home/ubuntu/PycharmProjects/cookiesRefresher/cookies.txt']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [fca3eb5f8]
[debug] Python 3.12.3 (CPython aarch64 64bit) - Linux-6.8.0-1016-oracle-aarch64-with-glibc2.39 (OpenSSL 3.0.13 30 Jan 2024, glibc 2.39)
[debug] exe versions: ffmpeg 6.1.1 (setts), ffprobe 6.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, mutagen-1.47.0, requests-2.32.3, sqlite3-3.45.1, urllib3-2.2.3, websockets-14.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets
[debug] Loaded 1837 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: [email protected] from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date ([email protected] from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=yaie5Uia4k8
[youtube] yaie5Uia4k8: Downloading webpage
[debug] [youtube] Extracted SAPISID cookie
[youtube] yaie5Uia4k8: Downloading web creator player API JSON
[youtube] yaie5Uia4k8: Downloading mweb player API JSON
[debug] [youtube] Extracting signature function js_3bb1f723_106
[youtube] yaie5Uia4k8: Downloading player 3bb1f723
WARNING: [youtube] yaie5Uia4k8: Signature extraction failed: Some formats may be missing
[debug] [youtube] Could not find JS function "decodeURIComponent"; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
[debug] [youtube] Extracting signature function js_3bb1f723_110
WARNING: Only images are available for download. use --list-formats to see them
[debug] Sort order given by extractor: quality, res, fps, hdr:12, source, vcodec, channels, acodec, lang, proto
[debug] Formats sorted by: hasvid, ie_pref, quality, res, fps, hdr:12(7), source, vcodec, channels, acodec, lang, proto, size, br, asr, vext, aext, hasaud, id
[debug] Default format spec: bestvideo*+bestaudio/best
ERROR: [youtube] yaie5Uia4k8: Requested format is not available. Use --list-formats for a list of available formats
Traceback (most recent call last):
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 1624, in wrapper
return func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 1780, in __extract_info
return self.process_ie_result(ie_result, download, extra_info)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 1839, in process_ie_result
ie_result = self.process_video_result(ie_result, download=download)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 2973, in process_video_result
raise ExtractorError(
yt_dlp.utils.ExtractorError: [youtube] yaie5Uia4k8: Requested format is not available. Use --list-formats for a list of available formats
```
attempting to download the same video while ignoring the config fie:
```
$ yt-dlp https://www.youtube.com/watch?v=e8i32DH5rhQ --ignore-config -vU
[debug] Command-line config: ['https://www.youtube.com/watch?v=e8i32DH5rhQ', '--ignore-config', '-vU']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [fca3eb5f8]
[debug] Python 3.12.3 (CPython aarch64 64bit) - Linux-6.8.0-1016-oracle-aarch64-with-glibc2.39 (OpenSSL 3.0.13 30 Jan 2024, glibc 2.39)
[debug] exe versions: ffmpeg 6.1.1 (setts), ffprobe 6.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, mutagen-1.47.0, requests-2.32.3, sqlite3-3.45.1, urllib3-2.2.3, websockets-14.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets
[debug] Loaded 1837 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: [email protected] from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date ([email protected] from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=e8i32DH5rhQ
[youtube] e8i32DH5rhQ: Downloading webpage
[youtube] e8i32DH5rhQ: Downloading ios player API JSON
[youtube] e8i32DH5rhQ: Downloading mweb player API JSON
ERROR: [youtube] e8i32DH5rhQ: Sign in to confirm you’re not a bot. Use --cookies-from-browser or --cookies for the authentication. See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies. Also see https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for tips on effectively exporting YouTube cookies
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/extractor/common.py", line 742, in extract
ie_result = self._real_extract(url)
^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/extractor/youtube.py", line 4478, in _real_extract
self.raise_no_formats(reason, expected=True)
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/extractor/common.py", line 1276, in raise_no_formats
raise ExtractorError(msg, expected=expected, video_id=video_id)
```
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
yt-dlp https://www.youtube.com/watch?v=yaie5Uia4k8 -vU
[debug] Command-line config: ['https://www.youtube.com/watch?v=yaie5Uia4k8', '-vU']
[debug] System config "/etc/yt-dlp.conf": ['--cookies', '/home/ubuntu/PycharmProjects/cookiesRefresher/cookies.txt']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [fca3eb5f8]
[debug] Python 3.12.3 (CPython aarch64 64bit) - Linux-6.8.0-1016-oracle-aarch64-with-glibc2.39 (OpenSSL 3.0.13 30 Jan 2024, glibc 2.39)
[debug] exe versions: ffmpeg 6.1.1 (setts), ffprobe 6.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, mutagen-1.47.0, requests-2.32.3, sqlite3-3.45.1, urllib3-2.2.3, websockets-14.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets
[debug] Loaded 1837 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: [email protected] from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date ([email protected] from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=yaie5Uia4k8
[youtube] yaie5Uia4k8: Downloading webpage
[debug] [youtube] Extracted SAPISID cookie
[youtube] yaie5Uia4k8: Downloading web creator player API JSON
[youtube] yaie5Uia4k8: Downloading mweb player API JSON
[debug] [youtube] Extracting signature function js_3bb1f723_110
[youtube] yaie5Uia4k8: Downloading player 3bb1f723
WARNING: [youtube] yaie5Uia4k8: Signature extraction failed: Some formats may be missing
[debug] [youtube] Could not find JS function "decodeURIComponent"; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
[debug] [youtube] Extracting signature function js_3bb1f723_106
WARNING: Only images are available for download. use --list-formats to see them
[debug] Sort order given by extractor: quality, res, fps, hdr:12, source, vcodec, channels, acodec, lang, proto
[debug] Formats sorted by: hasvid, ie_pref, quality, res, fps, hdr:12(7), source, vcodec, channels, acodec, lang, proto, size, br, asr, vext, aext, hasaud, id
[debug] Default format spec: bestvideo*+bestaudio/best
ERROR: [youtube] yaie5Uia4k8: Requested format is not available. Use --list-formats for a list of available formats
Traceback (most recent call last):
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 1624, in wrapper
return func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 1780, in __extract_info
return self.process_ie_result(ie_result, download, extra_info)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 1839, in process_ie_result
ie_result = self.process_video_result(ie_result, download=download)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ubuntu/.local/pipx/venvs/yt-dlp/lib/python3.12/site-packages/yt_dlp/YoutubeDL.py", line 2973, in process_video_result
raise ExtractorError(
yt_dlp.utils.ExtractorError: [youtube] yaie5Uia4k8: Requested format is not available. Use --list-formats for a list of available formats
```
| Same issue here..
same :(
Signature function extraction is broken for the new player along with nsig.
Keeping this open as a separate issue from #11744 since this is what end users will see in their output | 1,733,495,046,000 | [
"high-priority",
"site-bug",
"site:youtube"
] | Bug Report | [
"yt_dlp/extractor/youtube.py:YoutubeIE._parse_sig_js"
] | [] | 1 |
yt-dlp/yt-dlp | yt-dlp__yt-dlp-11750 | fca3eb5f8be08d5fab2e18b45b7281a12e566725 | diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py
index 83dde7d9c41f..65cffabb8d83 100644
--- a/yt_dlp/extractor/youtube.py
+++ b/yt_dlp/extractor/youtube.py
@@ -3205,6 +3205,7 @@ def _extract_n_function_name(self, jscode, player_url=None):
# * a.D&&(b="nn"[+a.D],c=a.get(b))&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
# * a.D&&(PL(a),b=a.j.n||null)&&(b=narray[0](b),a.set("n",b),narray.length||nfunc("")
# * a.D&&(b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
+ # * J.J="";J.url="";J.Z&&(R="nn"[+J.Z],mW(J),N=J.K[R]||null)&&(N=narray[idx](N),J.set(R,N))}};
funcname, idx = self._search_regex(
r'''(?x)
(?:
@@ -3221,7 +3222,7 @@ def _extract_n_function_name(self, jscode, player_url=None):
)\)&&\(c=|
\b(?P<var>[a-zA-Z0-9_$]+)=
)(?P<nfunc>[a-zA-Z0-9_$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z]\)
- (?(var),[a-zA-Z0-9_$]+\.set\("n"\,(?P=var)\),(?P=nfunc)\.length)''',
+ (?(var),[a-zA-Z0-9_$]+\.set\((?:"n+"|[a-zA-Z0-9_$]+)\,(?P=var)\))''',
jscode, 'n function name', group=('nfunc', 'idx'), default=(None, None))
if not funcname:
self.report_warning(join_nonempty(
@@ -3230,7 +3231,7 @@ def _extract_n_function_name(self, jscode, player_url=None):
return self._search_regex(
r'''(?xs)
;\s*(?P<name>[a-zA-Z0-9_$]+)\s*=\s*function\([a-zA-Z0-9_$]+\)
- \s*\{(?:(?!};).)+?["']enhanced_except_''',
+ \s*\{(?:(?!};).)+?return\s*(?P<q>["'])[\w-]+_w8_(?P=q)\s*\+\s*[a-zA-Z0-9_$]+''',
jscode, 'Initial JS player n function name', group='name')
elif not idx:
return funcname
@@ -3239,6 +3240,11 @@ def _extract_n_function_name(self, jscode, player_url=None):
rf'var {re.escape(funcname)}\s*=\s*(\[.+?\])\s*[,;]', jscode,
f'Initial JS player n function list ({funcname}.{idx})')))[int(idx)]
+ def _fixup_n_function_code(self, argnames, code):
+ return argnames, re.sub(
+ rf';\s*if\s*\(\s*typeof\s+[a-zA-Z0-9_$]+\s*===?\s*(["\'])undefined\1\s*\)\s*return\s+{argnames[0]};',
+ ';', code)
+
def _extract_n_function_code(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self.cache.load('youtube-nsig', player_id, min_ver='2024.07.09')
@@ -3250,7 +3256,8 @@ def _extract_n_function_code(self, video_id, player_url):
func_name = self._extract_n_function_name(jscode, player_url=player_url)
- func_code = jsi.extract_function_code(func_name)
+ # XXX: Workaround for the `typeof` gotcha
+ func_code = self._fixup_n_function_code(*jsi.extract_function_code(func_name))
self.cache.store('youtube-nsig', player_id, func_code)
return jsi, player_id, func_code
@@ -3266,7 +3273,7 @@ def extract_nsig(s):
except Exception as e:
raise JSInterpreter.Exception(traceback.format_exc(), cause=e)
- if ret.startswith('enhanced_except_'):
+ if ret.startswith('enhanced_except_') or ret.endswith(f'_w8_{s}'):
raise JSInterpreter.Exception('Signature function returned an exception')
return ret
| diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py
index 0f7ae34f44f5..1823af378f94 100644
--- a/test/test_youtube_signature.py
+++ b/test/test_youtube_signature.py
@@ -183,6 +183,10 @@
'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js',
'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw',
),
+ (
+ 'https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js',
+ 'gK15nzVyaXE9RsMP3z', 'ZFFWFLPWx9DEgQ',
+ ),
]
@@ -254,8 +258,11 @@ def signature(jscode, sig_input):
def n_sig(jscode, sig_input):
- funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode)
- return JSInterpreter(jscode).call_function(funcname, sig_input)
+ ie = YoutubeIE(FakeYDL())
+ funcname = ie._extract_n_function_name(jscode)
+ jsi = JSInterpreter(jscode)
+ func = jsi.extract_function_from_code(*ie._fixup_n_function_code(*jsi.extract_function_code(funcname)))
+ return func([sig_input])
make_sig_test = t_factory(
| [YouTube] player `3bb1f723`: nsig extraction failed: Some formats may be missing
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a bug unrelated to a specific site
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
### Provide a description that is worded well enough to be understood
the function _extract_n_function_name of extractor/youtube.py not working with the playerId `3bb1f723`
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['https://www.youtube.com/watch?v=2yJgwwDcgV8', '--extractor-args', 'youtube:player_client=mweb', '--no-config', '-v', '-F', '--cookies', 'cookies28.txt']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-master-builds [fca3eb5f8] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.22631-SP0 (OpenSSL 1.1.1t 7 Feb 2023)
[debug] exe versions: ffmpeg 7.1-full_build-www.gyan.dev (setts), ffprobe 7.1-full_build-www.gyan.dev, phantomjs 2.5.0, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-14.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi
[debug] Loaded 1837 extractors
[youtube] Extracting URL: https://www.youtube.com/watch?v=2yJgwwDcgV8
[youtube] 2yJgwwDcgV8: Downloading webpage
[youtube] 2yJgwwDcgV8: Downloading mweb player API JSON
[youtube] 2yJgwwDcgV8: Downloading player 3bb1f723
WARNING: [youtube] Falling back to generic n function search
player = https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js
WARNING: [youtube] 2yJgwwDcgV8: nsig extraction failed: Some formats may be missing
n = biFW29aV5aoB3znbkq ; player = https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js
[debug] [youtube] Unable to extract nsig function code (caused by RegexNotFoundError('Unable to extract \x1b[0;94mInitial JS player n function name\x1b[0m; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')); please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
WARNING: [youtube] Falling back to generic n function search
player = https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js
WARNING: [youtube] 2yJgwwDcgV8: nsig extraction failed: Some formats may be missing
n = g_F_AdRs3sNRjsOIzh ; player = https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js
WARNING: Only images are available for download. use --list-formats to see them
[debug] Sort order given by extractor: quality, res, fps, hdr:12, source, vcodec, channels, acodec, lang, proto
[debug] Formats sorted by: hasvid, ie_pref, quality, res, fps, hdr:12(7), source, vcodec, channels, acodec, lang, proto, size, br, asr, vext, aext, hasaud, id
[info] Available formats for 2yJgwwDcgV8:
ID EXT RESOLUTION FPS │ PROTO │ VCODEC MORE INFO
────────────────────────────────────────────────────
sb2 mhtml 48x27 0 │ mhtml │ images storyboard
sb1 mhtml 67x45 1 │ mhtml │ images storyboard
sb0 mhtml 135x90 1 │ mhtml │ images storyboard
```
| 1,733,489,646,000 | [
"high-priority",
"site-bug",
"site:youtube"
] | Bug Report | [
"yt_dlp/extractor/youtube.py:YoutubeIE._extract_n_function_name",
"yt_dlp/extractor/youtube.py:YoutubeIE._extract_n_function_code",
"yt_dlp/extractor/youtube.py:YoutubeIE._extract_n_function_from_code"
] | [
"yt_dlp/extractor/youtube.py:YoutubeIE._fixup_n_function_code"
] | 3 |
|
yt-dlp/yt-dlp | yt-dlp__yt-dlp-11438 | a6783a3b9905e547f6c1d4df9d7c7999feda8afa | diff --git a/yt_dlp/aes.py b/yt_dlp/aes.py
index abf54a998e0e..be67b40fe215 100644
--- a/yt_dlp/aes.py
+++ b/yt_dlp/aes.py
@@ -230,11 +230,11 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
iv_ctr = inc(j0)
decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr)))
- pad_len = len(data) // 16 * 16
+ pad_len = (BLOCK_SIZE_BYTES - (len(data) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES
s_tag = ghash(
hash_subkey,
data
- + [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
+ + [0] * pad_len # pad
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
+ ((len(data) * 8).to_bytes(8, 'big'))), # length of data
)
| diff --git a/test/test_aes.py b/test/test_aes.py
index 5f975efecfa4..6fe6059a17ad 100644
--- a/test/test_aes.py
+++ b/test/test_aes.py
@@ -83,6 +83,18 @@ def test_gcm_decrypt(self):
data, intlist_to_bytes(self.key), authentication_tag, intlist_to_bytes(self.iv[:12]))
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
+ def test_gcm_aligned_decrypt(self):
+ data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f'
+ authentication_tag = b'\x08\xb1\x9d!&\x98\xd0\xeaRq\x90\xe6;\xb5]\xd8'
+
+ decrypted = intlist_to_bytes(aes_gcm_decrypt_and_verify(
+ list(data), self.key, list(authentication_tag), self.iv[:12]))
+ self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg[:16])
+ if Cryptodome.AES:
+ decrypted = aes_gcm_decrypt_and_verify_bytes(
+ data, bytes(self.key), authentication_tag, bytes(self.iv[:12]))
+ self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg[:16])
+
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode()
encrypted = base64.b64encode(
| Bug in Native AES code causes chrome cookie extraction to fail
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a bug unrelated to a specific site
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
### Provide a description that is worded well enough to be understood
The Native AES code has a bug causing chromium cookie decryption to fail. This affects yt-dlp installs without pycryptodomex, like the py2exe builds.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['-v', '--cookies-from-browser', 'chromium']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [add96eb9f] (pip)
[debug] Python 3.12.3 (CPython AMD64 64bit) - Windows-11-10.0.22631-SP0 (OpenSSL 3.0.13 30 Jan 2024)
[debug] exe versions: ffmpeg 7.0.1-full_build-www.gyan.dev (setts), ffprobe 7.0.1-full_build-www.gyan.dev, phantomjs 2.5.0, rtmpdump 2.4
[debug] Optional libraries: sqlite3-3.45.1
[debug] Proxy map: {}
Extracting cookies from chromium
[debug] Extracting cookies from: "C:\Users\User\AppData\Local\Chromium\User Data\Default\Network\Cookies"
[debug] Found local state file at "C:\Users\User\AppData\Local\Chromium\User Data\Local State"
[Cookies] Loading cookie 0/ 3702WARNING: failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?
Extracted 3077 cookies from chromium (380 could not be decrypted)
[debug] cookie version breakdown: {'v10': 3592, 'other': 0, 'unencrypted': 110}
[debug] Request Handlers: urllib
[debug] Loaded 1820 extractors
Usage: __main__.py [OPTIONS] URL [URL...]
__main__.py: error: You must provide at least one URL.
Type yt-dlp --help to see a list of all options.
```
| 1,730,580,100,000 | [
"bug"
] | Bug Report | [
"yt_dlp/aes.py:aes_gcm_decrypt_and_verify"
] | [] | 1 |
|
yt-dlp/yt-dlp | yt-dlp__yt-dlp-11198 | 983c58fb7a809d827b5821d493819da954f2c00b | diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py
index e1b3c48d6339..967f01fdf941 100644
--- a/yt_dlp/utils/_utils.py
+++ b/yt_dlp/utils/_utils.py
@@ -664,31 +664,51 @@ def replace_insane(char):
return result
+def _sanitize_path_parts(parts):
+ sanitized_parts = []
+ for part in parts:
+ if not part or part == '.':
+ continue
+ elif part == '..':
+ if sanitized_parts and sanitized_parts[-1] != '..':
+ sanitized_parts.pop()
+ sanitized_parts.append('..')
+ continue
+ # Replace invalid segments with `#`
+ # - trailing dots and spaces (`asdf...` => `asdf..#`)
+ # - invalid chars (`<>` => `##`)
+ sanitized_part = re.sub(r'[/<>:"\|\\?\*]|[\s.]$', '#', part)
+ sanitized_parts.append(sanitized_part)
+
+ return sanitized_parts
+
+
def sanitize_path(s, force=False):
"""Sanitizes and normalizes path on Windows"""
- # XXX: this handles drive relative paths (c:sth) incorrectly
- if sys.platform == 'win32':
- force = False
- drive_or_unc, _ = os.path.splitdrive(s)
- elif force:
- drive_or_unc = ''
+ if sys.platform != 'win32':
+ if not force:
+ return s
+ root = '/' if s.startswith('/') else ''
+ return root + '/'.join(_sanitize_path_parts(s.split('/')))
+
+ normed = s.replace('/', '\\')
+
+ if normed.startswith('\\\\'):
+ # UNC path (`\\SERVER\SHARE`) or device path (`\\.`, `\\?`)
+ parts = normed.split('\\')
+ root = '\\'.join(parts[:4]) + '\\'
+ parts = parts[4:]
+ elif normed[1:2] == ':':
+ # absolute path or drive relative path
+ offset = 3 if normed[2:3] == '\\' else 2
+ root = normed[:offset]
+ parts = normed[offset:].split('\\')
else:
- return s
+ # relative/drive root relative path
+ root = '\\' if normed[:1] == '\\' else ''
+ parts = normed.split('\\')
- norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
- if drive_or_unc:
- norm_path.pop(0)
- sanitized_path = [
- path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
- for path_part in norm_path]
- if drive_or_unc:
- sanitized_path.insert(0, drive_or_unc + os.path.sep)
- elif force and s and s[0] == os.path.sep:
- sanitized_path.insert(0, os.path.sep)
- # TODO: Fix behavioral differences <3.12
- # The workaround using `normpath` only superficially passes tests
- # Ref: https://github.com/python/cpython/pull/100351
- return os.path.normpath(os.path.join(*sanitized_path))
+ return root + '\\'.join(_sanitize_path_parts(parts))
def sanitize_url(url, *, scheme='http'):
| diff --git a/test/test_utils.py b/test/test_utils.py
index 4f5fa1e100af..d4b846f56fba 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -221,9 +221,10 @@ def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
- if sys.platform != 'win32':
- return
+ with unittest.mock.patch('sys.platform', 'win32'):
+ self._test_sanitize_path()
+ def _test_sanitize_path(self):
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
@@ -256,6 +257,11 @@ def test_sanitize_path(self):
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
+ self.assertEqual(sanitize_path('\\abc'), '\\abc')
+ self.assertEqual(sanitize_path('C:abc'), 'C:abc')
+ self.assertEqual(sanitize_path('C:abc\\..\\'), 'C:..')
+ self.assertEqual(sanitize_path('C:\\abc:%(title)s.%(ext)s'), 'C:\\abc#%(title)s.%(ext)s')
+
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
| `utils.sanitize_path` behavioral differences <3.12
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a bug unrelated to a specific site
- [X] I've verified that I'm running yt-dlp version **2023.07.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
### Provide a description that is worded well enough to be understood
https://github.com/python/cpython/pull/100351 implements proper splitting for UNC paths.
We should implement compat for Python <3.12 path parsing and sanitization.
See this comparison:
```xonsh
@ py -3.11 -c r'import os.path; print(os.path.splitdrive("\\\\?\\UNC\\ComputerName\\Share\\path"))'
('\\\\?\\UNC', '\\ComputerName\\Share\\path')
@ py -3.12 -c r'import os.path; print(os.path.splitdrive("\\\\?\\UNC\\ComputerName\\Share\\path"))'
('\\\\?\\UNC\\ComputerName\\Share', '\\path')
```
The underlying `nt` function works consistently:
```xonsh
@ py -3.11 -c r'import nt; print(nt._path_splitroot("\\\\?\\UNC\\ComputerName\\Share\\path"))'
('\\\\?\\UNC\\ComputerName\\Share\\', 'path')
@ py -3.12 -c r'import nt; print(nt._path_splitroot("\\\\?\\UNC\\ComputerName\\Share\\path"))'
('\\\\?\\UNC\\ComputerName\\Share\\', 'path')
```
As is also documented [here](https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats#evaluate-relative-components).
### Provide verbose output that clearly demonstrates the problem
- [ ] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [ ] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
```
| 1,728,431,747,000 | [
"bug",
"high-priority"
] | Bug Report | [
"yt_dlp/utils/_utils.py:sanitize_path"
] | [
"yt_dlp/utils/_utils.py:_sanitize_path_parts"
] | 1 |
|
stanfordnlp/dspy | stanfordnlp__dspy-1741 | 9391c2a9b386cbbbe27d18206790a40c27503348 | diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py
index 7188044ed..0f93fd278 100644
--- a/dspy/predict/predict.py
+++ b/dspy/predict/predict.py
@@ -10,15 +10,12 @@
from dspy.primitives.program import Module
from dspy.signatures.signature import ensure_signature, signature_to_template
from dspy.utils.callback import with_callbacks
-
-
from dspy.adapters.image_utils import Image
@lru_cache(maxsize=None)
def warn_once(msg: str):
logging.warning(msg)
-
class Predict(Module, Parameter):
def __init__(self, signature, _parse_values=True, callbacks=None, **config):
self.stage = random.randbytes(8).hex()
@@ -71,10 +68,13 @@ def load_state(self, state, use_legacy_loading=False):
state (dict): The saved state of a `Predict` object.
use_legacy_loading (bool): Whether to use the legacy loading method. Only use it when you are loading a
saved state from a version of DSPy prior to v2.5.3.
+ Returns:
+ self: Returns self to allow method chaining
"""
if use_legacy_loading:
self._load_state_legacy(state)
- return
+ return self
+
if "signature" not in state:
# Check if the state is from a version of DSPy prior to v2.5.3.
raise ValueError(
@@ -102,10 +102,14 @@ def load_state(self, state, use_legacy_loading=False):
if "extended_signature" in state:
self.extended_signature = self.extended_signature.load_state(state["extended_signature"])
+ return self
+
def _load_state_legacy(self, state):
"""Legacy state loading for backwards compatibility.
This method is used to load the saved state of a `Predict` object from a version of DSPy prior to v2.5.3.
+ Returns:
+ self: Returns self to allow method chaining
"""
for name, value in state.items():
setattr(self, name, value)
@@ -130,6 +134,21 @@ def _load_state_legacy(self, state):
*_, last_key = self.extended_signature.fields.keys()
self.extended_signature = self.extended_signature.with_updated_fields(last_key, prefix=prefix)
+ return self
+
+ def load(self, path, return_self=False):
+ """Load a saved state from a file.
+
+ Args:
+ path (str): Path to the saved state file
+ return_self (bool): If True, returns self to allow method chaining. Default is False for backwards compatibility.
+
+ Returns:
+ Union[None, Predict]: Returns None if return_self is False (default), returns self if return_self is True
+ """
+ super().load(path)
+ return self if return_self else None
+
@with_callbacks
def __call__(self, **kwargs):
return self.forward(**kwargs)
@@ -213,8 +232,6 @@ def old_generate(demos, signature, kwargs, config, lm, stage):
with dsp.settings.context(lm=lm, query_only=True):
x, C = dsp.generate(template, **config)(x, stage=stage)
- # assert stage in x, "The generated (input, output) example was not stored"
-
completions = []
for c in C:
@@ -279,7 +296,6 @@ def v2_5_generate(lm, lm_kwargs, signature, demos, inputs, _parse_values=True):
lm, lm_kwargs=lm_kwargs, signature=signature, demos=demos, inputs=inputs, _parse_values=_parse_values
)
-
# TODO: get some defaults during init from the context window?
# # TODO: FIXME: Hmm, I guess expected behavior is that contexts can
# affect execution. Well, we need to determine whether context dominates, __init__ demoninates, or forward dominates.
| diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py
index 8d0121eed..3fb4fce4e 100644
--- a/tests/predict/test_predict.py
+++ b/tests/predict/test_predict.py
@@ -218,3 +218,48 @@ class OutputOnlySignature(dspy.Signature):
lm = DummyLM([{"output": "short answer"}])
dspy.settings.configure(lm=lm)
assert predictor().output == "short answer"
+
+
+
+def test_chainable_load(tmp_path):
+ """Test both traditional and chainable load methods."""
+
+ file_path = tmp_path / "test_chainable.json"
+
+
+ original = Predict("question -> answer")
+ original.demos = [{"question": "test", "answer": "response"}]
+ original.save(file_path)
+
+
+ traditional = Predict("question -> answer")
+ traditional.load(file_path)
+ assert traditional.demos == original.demos
+
+
+ chainable = Predict("question -> answer").load(file_path, return_self=True)
+ assert chainable is not None
+ assert chainable.demos == original.demos
+
+
+ assert chainable.signature.dump_state() == original.signature.dump_state()
+
+
+ result = Predict("question -> answer").load(file_path)
+ assert result is None
+
+def test_load_state_chaining():
+ """Test that load_state returns self for chaining."""
+ original = Predict("question -> answer")
+ original.demos = [{"question": "test", "answer": "response"}]
+ state = original.dump_state()
+
+
+ new_instance = Predict("question -> answer").load_state(state)
+ assert new_instance is not None
+ assert new_instance.demos == original.demos
+
+
+ legacy_instance = Predict("question -> answer").load_state(state, use_legacy_loading=True)
+ assert legacy_instance is not None
+ assert legacy_instance.demos == original.demos
\ No newline at end of file
| Add ability to return a predictor instead of modifying in place.
Currently, to load a predictor after optimization, you need to do it in two lines:
```python
predictor = dspy.Predict("question -> answer")
predictor.load(saved_path)
```
Desired behavior
```python
predictor = dspy.Predict("question -> answer").load(saved_path, return_self=True)
```
This fix is as adding a bool to the `load` method of `BaseModule` to return self.
| 1,730,533,921,000 | [] | Feature Request | [
"dspy/predict/predict.py:Predict.load_state",
"dspy/predict/predict.py:Predict._load_state_legacy"
] | [
"dspy/predict/predict.py:Predict.load"
] | 2 |
|
Qiskit/qiskit | Qiskit__qiskit-13436 | 2bf578eb00808744f80f30324148e946a6214a1d | diff --git a/qiskit/quantum_info/states/statevector.py b/qiskit/quantum_info/states/statevector.py
index 901ce95af424..1265d677abe4 100644
--- a/qiskit/quantum_info/states/statevector.py
+++ b/qiskit/quantum_info/states/statevector.py
@@ -476,7 +476,7 @@ def _expectation_value_pauli(self, pauli, qargs=None):
pauli_phase = (-1j) ** pauli.phase if pauli.phase else 1
if x_mask + z_mask == 0:
- return pauli_phase * np.linalg.norm(self.data)
+ return pauli_phase * np.linalg.norm(self.data) ** 2
if x_mask == 0:
return pauli_phase * expval_pauli_no_x(self.data, self.num_qubits, z_mask)
| diff --git a/releasenotes/notes/fix_identity_operator_9e2ec9770ac046a6.yaml b/releasenotes/notes/fix_identity_operator_9e2ec9770ac046a6.yaml
new file mode 100644
index 000000000000..69779a793cc3
--- /dev/null
+++ b/releasenotes/notes/fix_identity_operator_9e2ec9770ac046a6.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixed a bug that caused :meth:`.Statevector.expectation_value` to yield incorrect results
+ for the identity operator when the statevector was not normalized.
diff --git a/test/python/quantum_info/states/test_statevector.py b/test/python/quantum_info/states/test_statevector.py
index 29d5d42f3783..d16b3b3453ec 100644
--- a/test/python/quantum_info/states/test_statevector.py
+++ b/test/python/quantum_info/states/test_statevector.py
@@ -1152,6 +1152,31 @@ def test_expval_pauli_qargs(self, qubits):
expval = state.expectation_value(op, qubits)
self.assertAlmostEqual(expval, target)
+ def test_expval_identity(self):
+ """Test whether the calculation for identity operator has been fixed"""
+
+ # 1 qubit case test
+ state_1 = Statevector.from_label("0")
+ state_1_n1 = 2 * state_1 # test the same state with different norms
+ state_1_n2 = (1 + 2j) * state_1
+ identity_op_1 = SparsePauliOp.from_list([("I", 1)])
+ expval_state_1 = state_1.expectation_value(identity_op_1)
+ expval_state_1_n1 = state_1_n1.expectation_value(identity_op_1)
+ expval_state_1_n2 = state_1_n2.expectation_value(identity_op_1)
+ self.assertAlmostEqual(expval_state_1, 1.0 + 0j)
+ self.assertAlmostEqual(expval_state_1_n1, 4 + 0j)
+ self.assertAlmostEqual(expval_state_1_n2, 5 + 0j)
+
+ # Let's try a multi-qubit case
+ n_qubits = 3
+ state_coeff = 3 - 4j
+ op_coeff = 2 - 2j
+ state_test = state_coeff * Statevector.from_label("0" * n_qubits)
+ op_test = SparsePauliOp.from_list([("I" * n_qubits, op_coeff)])
+ expval = state_test.expectation_value(op_test)
+ target = op_coeff * np.abs(state_coeff) ** 2
+ self.assertAlmostEqual(expval, target)
+
@data(*(qargs for i in range(4) for qargs in permutations(range(4), r=i + 1)))
def test_probabilities_qargs(self, qargs):
"""Test probabilities method with qargs"""
| Statevector._expectation_value_pauli returns incorrect result in certain cases
The calculation for the expectation value of the __identity operator__ seems incorrect when the statevector is __not normalized__ to 1.
I think the reason is that one should use the norm squared rather than the norm in the following lines.
https://github.com/Qiskit/qiskit/blob/1be2c5aa103f1991f2fdc75a77a09f82635b8431/qiskit/quantum_info/states/statevector.py#L478-L479
---
The whole function is attached below to provide context.
https://github.com/Qiskit/qiskit/blob/1be2c5aa103f1991f2fdc75a77a09f82635b8431/qiskit/quantum_info/states/statevector.py#L458-L491
| Please can you show a reproducing code block following the bug report template, and the output you expected?
Sure. Here it is
```python
from qiskit.quantum_info import Statevector, SparsePauliOp, Pauli
state_normed = Statevector.from_label("0")
state_not_normed = 2 * state_normed
identity_op = SparsePauliOp.from_list([("I", 1)])
# using the public interface (expectation_value)
print(state_not_normed.expectation_value(identity_op))
# using _expectation_value_pauli (the backend)
print(state_not_normed._expectation_value_pauli(Pauli('I')))
```
## Environment
Python 3.11.6
qiskit 0.45.3
Judging from the source code on github, the issue should be platform-independent and exists across different versions.
## What is the expected result
The two print statement should give `4+0j` and `4.0` respectively.
## What is the actual result
The two print statement gave `2+0j` and `2.0` respectively.
Nice catch, that seems to be unique only to the Pauli-I operator. Would you like to open a PR to fix the issue? 🙂
Sure, I have created one PR. | 1,731,543,381,000 | [
"Changelog: Bugfix",
"Community PR"
] | Bug Report | [
"qiskit/quantum_info/states/statevector.py:Statevector._expectation_value_pauli"
] | [] | 1 |
Qiskit/qiskit | Qiskit__qiskit-12214 | d10f9a0fe1f73cde5b66e49af880a6f7816f131f | diff --git a/qiskit/transpiler/preset_passmanagers/__init__.py b/qiskit/transpiler/preset_passmanagers/__init__.py
index d8a7fc9b5158..8d653ed3a1a9 100644
--- a/qiskit/transpiler/preset_passmanagers/__init__.py
+++ b/qiskit/transpiler/preset_passmanagers/__init__.py
@@ -136,7 +136,7 @@ def generate_preset_pass_manager(
instruction_durations (InstructionDurations): Dictionary of duration
(in dt) for each instruction.
timing_constraints (TimingConstraints): Hardware time alignment restrictions.
- initial_layout (Layout): Initial position of virtual qubits on
+ initial_layout (Layout | List[int]): Initial position of virtual qubits on
physical qubits.
layout_method (str): The :class:`~.Pass` to use for choosing initial qubit
placement. Valid choices are ``'trivial'``, ``'dense'``,
| diff --git a/test/python/transpiler/test_preset_passmanagers.py b/test/python/transpiler/test_preset_passmanagers.py
index 0382c83ab754..ae1837bf111b 100644
--- a/test/python/transpiler/test_preset_passmanagers.py
+++ b/test/python/transpiler/test_preset_passmanagers.py
@@ -1436,6 +1436,38 @@ def test_generate_preset_pass_manager_with_list_coupling_map(self):
# Ensure the DAGs from both methods are identical
self.assertEqual(transpiled_circuit_list, transpiled_circuit_object)
+ @data(0, 1, 2, 3)
+ def test_generate_preset_pass_manager_with_list_initial_layout(self, optimization_level):
+ """Test that generate_preset_pass_manager can handle list based initial layouts."""
+ coupling_map_list = [[0, 1]]
+
+ # Circuit that doesn't fit in the coupling map
+ qc = QuantumCircuit(2)
+ qc.h(0)
+ qc.cx(0, 1)
+ qc.cx(1, 0)
+ qc.measure_all()
+
+ pm_list = generate_preset_pass_manager(
+ optimization_level=optimization_level,
+ coupling_map=coupling_map_list,
+ basis_gates=["u", "cx"],
+ seed_transpiler=42,
+ initial_layout=[1, 0],
+ )
+ pm_object = generate_preset_pass_manager(
+ optimization_level=optimization_level,
+ coupling_map=coupling_map_list,
+ basis_gates=["u", "cx"],
+ seed_transpiler=42,
+ initial_layout=Layout.from_intlist([1, 0], *qc.qregs),
+ )
+ tqc_list = pm_list.run(qc)
+ tqc_obj = pm_list.run(qc)
+ self.assertIsInstance(pm_list, PassManager)
+ self.assertIsInstance(pm_object, PassManager)
+ self.assertEqual(tqc_list, tqc_obj)
+
@ddt
class TestIntegrationControlFlow(QiskitTestCase):
| generate_preset_pass_manager should take initial_layout as a list
### What should we add?
Now that https://github.com/Qiskit/qiskit/pull/10344 is merged, we should make the `generate_preset_pass_manager` function take an integer list for layout. This is a user entry point and should be easy to use. The Layout object is a pain to use all around (I would love to completely eliminate it, since it doesn't contain any more information than a list of int. The circuit qubits have an order).
| We have a discussion about refactoring the `Layout` class in #11604 , @ajavadia could you give your opinion there? :-)
I think there are some other issues with the `generate_preset_pass_manager`. Even when an actual Layout object is supplied, it fails with `TranspilerError: 'Sabre swap runs on physical circuits only.'`. Needs more exploration.
@ajavadia do you have a re-create. I wrote a quick test to check this and it seems to at least work for my test:
```python
coupling_map_list = [[0, 1]]
# Circuit that doesn't fit in the coupling map
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.cx(1, 0)
qc.measure_all()
pm_list = generate_preset_pass_manager(
optimization_level=optimization_level,
coupling_map=coupling_map_list,
basis_gates=["u", "cx"],
seed_transpiler=42,
initial_layout=[1, 0],
)
pm_object = generate_preset_pass_manager(
optimization_level=optimization_level,
coupling_map=coupling_map_list,
basis_gates=["u", "cx"],
seed_transpiler=42,
initial_layout=Layout.from_intlist([1, 0], *qc.qregs),
)
tqc_list = pm_list.run(qc)
tqc_obj = pm_list.run(qc)
self.assertIsInstance(pm_list, PassManager)
self.assertIsInstance(pm_object, PassManager)
self.assertEqual(tqc_list, tqc_obj)
```
(I checked the output circuits and they worked fine). Is the issue just the type hint for the argument? | 1,713,474,509,000 | [
"documentation",
"Changelog: None",
"mod: transpiler"
] | Feature Request | [
"qiskit/transpiler/preset_passmanagers/__init__.py:generate_preset_pass_manager"
] | [] | 1 |
conan-io/conan | conan-io__conan-17369 | 7f88c7ef4e063489f087623fca6644eb420b4963 | diff --git a/conan/tools/scm/git.py b/conan/tools/scm/git.py
index c1aeb577a74..f3578e2c9ff 100644
--- a/conan/tools/scm/git.py
+++ b/conan/tools/scm/git.py
@@ -231,6 +231,7 @@ def fetch_commit(self, url, commit, hide_url=True):
"""
if os.path.exists(url):
url = url.replace("\\", "/") # Windows local directory
+ mkdir(self.folder)
self._conanfile.output.info("Shallow fetch of git repo")
self.run('init')
self.run(f'remote add origin "{url}"', hidden_output=url if hide_url else None)
| diff --git a/test/functional/tools/scm/test_git.py b/test/functional/tools/scm/test_git.py
index 47214e461bc..ceef1e68610 100644
--- a/test/functional/tools/scm/test_git.py
+++ b/test/functional/tools/scm/test_git.py
@@ -504,6 +504,46 @@ def source(self):
c.run("source .")
assert f'conanfile.py (pkg/0.1): RUN: git remote add origin "{url}"' in c.out
+ @pytest.mark.skipif(platform.system() == "Linux", reason="Git version in Linux not support it")
+ def test_clone_to_subfolder(self):
+ conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.scm import Git
+ from conan.tools.files import load
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def layout(self):
+ self.folders.source = "source"
+
+ def source(self):
+ git = Git(self, folder="folder")
+ git.fetch_commit(url="{url}", commit="{commit}")
+ self.output.info("MYCMAKE: {{}}".format(load(self, "folder/CMakeLists.txt")))
+ self.output.info("MYFILE: {{}}".format(load(self, "folder/src/myfile.h")))
+ """)
+ folder = os.path.join(temp_folder(), "myrepo")
+ url, commit = create_local_git_repo(files={"src/myfile.h": "myheader!",
+ "CMakeLists.txt": "mycmake"}, folder=folder)
+ # This second commit will NOT be used, as I will use the above commit in the conanfile
+ save_files(path=folder, files={"src/myfile.h": "my2header2!"})
+ git_add_changes_commit(folder=folder)
+
+ c = TestClient()
+ c.save({"conanfile.py": conanfile.format(url=url, commit=commit)})
+ c.run("create . -v")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c.out
+ assert "pkg/0.1: MYFILE: myheader!" in c.out
+
+ # It also works in local flow
+ c.run("source .")
+ assert "conanfile.py (pkg/0.1): MYCMAKE: mycmake" in c.out
+ assert "conanfile.py (pkg/0.1): MYFILE: myheader!" in c.out
+ assert c.load("source/folder/CMakeLists.txt") == "mycmake"
+ assert c.load("source/folder/src/myfile.h") == "myheader!"
class TestGitCloneWithArgs:
""" Git cloning passing additional arguments
| [bug] git.fetch_commit() throws an error when downloading sources to subfolder
### Describe the bug
`conan.tools.scm.Git.fetch_commit()` method throws an error when `folder` argument is passed to Git class constructor:
```python
def source(self):
git = Git(self, folder="myFolder")
git.fetch_commit(url, commit=self.conan_data["sources"]["commit"])
```
Error message:
```bash
ERROR: mypackage/1.2.3: Error in source() method, line 65
git.fetch_commit(url, commit=self.conan_data["sources"]["commit"])
FileNotFoundError: [Errno 2] No such file or directory: 'myFolder'
```
It seems that `mkdir(self.folder)` is missing in the body of `fetch_commit()` method - similarly as it is done in `clone()` method.
### How to reproduce it
_No response_
| Hi @bkarasm
Thanks for your report.
It seems it could be a small UX improvement (what we call a ``fix``, not a ``bugfix``), and I think it should be very straightforward, would you like to do it yourself and contribute a PR?
Sure, I can give it a try. | 1,732,485,597,000 | [] | Bug Report | [
"conan/tools/scm/git.py:Git.fetch_commit"
] | [] | 1 |
python/mypy | python__mypy-18293 | c4f5056d6c43db556b5215cb3c330fcde25a77cd | diff --git a/mypy/plugins/default.py b/mypy/plugins/default.py
index 73c5742614ee..03cb379a8173 100644
--- a/mypy/plugins/default.py
+++ b/mypy/plugins/default.py
@@ -304,11 +304,12 @@ def typed_dict_pop_callback(ctx: MethodContext) -> Type:
and len(ctx.arg_types) >= 1
and len(ctx.arg_types[0]) == 1
):
- keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
+ key_expr = ctx.args[0][0]
+ keys = try_getting_str_literals(key_expr, ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
- ctx.context,
+ key_expr,
code=codes.LITERAL_REQ,
)
return AnyType(TypeOfAny.from_error)
@@ -316,13 +317,13 @@ def typed_dict_pop_callback(ctx: MethodContext) -> Type:
value_types = []
for key in keys:
if key in ctx.type.required_keys:
- ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, ctx.context)
+ ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, key_expr)
value_type = ctx.type.items.get(key)
if value_type:
value_types.append(value_type)
else:
- ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
+ ctx.api.msg.typeddict_key_not_found(ctx.type, key, key_expr)
return AnyType(TypeOfAny.from_error)
if len(ctx.args[1]) == 0:
@@ -363,27 +364,29 @@ def typed_dict_setdefault_callback(ctx: MethodContext) -> Type:
and len(ctx.arg_types[0]) == 1
and len(ctx.arg_types[1]) == 1
):
- keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
+ key_expr = ctx.args[0][0]
+ keys = try_getting_str_literals(key_expr, ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
- ctx.context,
+ key_expr,
code=codes.LITERAL_REQ,
)
return AnyType(TypeOfAny.from_error)
assigned_readonly_keys = ctx.type.readonly_keys & set(keys)
if assigned_readonly_keys:
- ctx.api.msg.readonly_keys_mutated(assigned_readonly_keys, context=ctx.context)
+ ctx.api.msg.readonly_keys_mutated(assigned_readonly_keys, context=key_expr)
default_type = ctx.arg_types[1][0]
+ default_expr = ctx.args[1][0]
value_types = []
for key in keys:
value_type = ctx.type.items.get(key)
if value_type is None:
- ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
+ ctx.api.msg.typeddict_key_not_found(ctx.type, key, key_expr)
return AnyType(TypeOfAny.from_error)
# The signature_callback above can't always infer the right signature
@@ -392,7 +395,7 @@ def typed_dict_setdefault_callback(ctx: MethodContext) -> Type:
# default can be assigned to all key-value pairs we're updating.
if not is_subtype(default_type, value_type):
ctx.api.msg.typeddict_setdefault_arguments_inconsistent(
- default_type, value_type, ctx.context
+ default_type, value_type, default_expr
)
return AnyType(TypeOfAny.from_error)
@@ -409,20 +412,21 @@ def typed_dict_delitem_callback(ctx: MethodContext) -> Type:
and len(ctx.arg_types) == 1
and len(ctx.arg_types[0]) == 1
):
- keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
+ key_expr = ctx.args[0][0]
+ keys = try_getting_str_literals(key_expr, ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
- ctx.context,
+ key_expr,
code=codes.LITERAL_REQ,
)
return AnyType(TypeOfAny.from_error)
for key in keys:
if key in ctx.type.required_keys or key in ctx.type.readonly_keys:
- ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, ctx.context)
+ ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, key_expr)
elif key not in ctx.type.items:
- ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
+ ctx.api.msg.typeddict_key_not_found(ctx.type, key, key_expr)
return ctx.default_return_type
| diff --git a/test-data/unit/check-columns.test b/test-data/unit/check-columns.test
index 44524b9df943..fd1778af59ab 100644
--- a/test-data/unit/check-columns.test
+++ b/test-data/unit/check-columns.test
@@ -227,9 +227,19 @@ class D(TypedDict):
x: int
t: D = {'x':
'y'} # E:5: Incompatible types (expression has type "str", TypedDict item "x" has type "int")
+s: str
if int():
- del t['y'] # E:5: TypedDict "D" has no key "y"
+ del t[s] # E:11: Expected TypedDict key to be string literal
+ del t["x"] # E:11: Key "x" of TypedDict "D" cannot be deleted
+ del t["y"] # E:11: TypedDict "D" has no key "y"
+
+t.pop(s) # E:7: Expected TypedDict key to be string literal
+t.pop("y") # E:7: TypedDict "D" has no key "y"
+
+t.setdefault(s, 123) # E:14: Expected TypedDict key to be string literal
+t.setdefault("x", "a") # E:19: Argument 2 to "setdefault" of "TypedDict" has incompatible type "str"; expected "int"
+t.setdefault("y", 123) # E:14: TypedDict "D" has no key "y"
[builtins fixtures/dict.pyi]
[typing fixtures/typing-typeddict.pyi]
diff --git a/test-data/unit/check-literal.test b/test-data/unit/check-literal.test
index b2d3024d3b44..cff6e07670a7 100644
--- a/test-data/unit/check-literal.test
+++ b/test-data/unit/check-literal.test
@@ -1909,8 +1909,9 @@ reveal_type(d.get(a_key, u)) # N: Revealed type is "Union[builtins.int, __main_
reveal_type(d.get(b_key, u)) # N: Revealed type is "Union[builtins.str, __main__.Unrelated]"
reveal_type(d.get(c_key, u)) # N: Revealed type is "builtins.object"
-reveal_type(d.pop(a_key)) # E: Key "a" of TypedDict "Outer" cannot be deleted \
- # N: Revealed type is "builtins.int"
+reveal_type(d.pop(a_key)) # N: Revealed type is "builtins.int" \
+ # E: Key "a" of TypedDict "Outer" cannot be deleted
+
reveal_type(d.pop(b_key)) # N: Revealed type is "builtins.str"
d.pop(c_key) # E: TypedDict "Outer" has no key "c"
diff --git a/test-data/unit/check-typeddict.test b/test-data/unit/check-typeddict.test
index 6a86dd63a3cd..5234ced8ea86 100644
--- a/test-data/unit/check-typeddict.test
+++ b/test-data/unit/check-typeddict.test
@@ -1747,8 +1747,9 @@ td: Union[TDA, TDB]
reveal_type(td.pop('a')) # N: Revealed type is "builtins.int"
reveal_type(td.pop('b')) # N: Revealed type is "Union[builtins.str, builtins.int]"
-reveal_type(td.pop('c')) # E: TypedDict "TDA" has no key "c" \
- # N: Revealed type is "Union[Any, builtins.int]"
+reveal_type(td.pop('c')) # N: Revealed type is "Union[Any, builtins.int]" \
+ # E: TypedDict "TDA" has no key "c"
+
[builtins fixtures/dict.pyi]
[typing fixtures/typing-typeddict.pyi]
@@ -2614,8 +2615,9 @@ def func(foo: Union[Foo1, Foo2]):
del foo["missing"] # E: TypedDict "Foo1" has no key "missing" \
# E: TypedDict "Foo2" has no key "missing"
- del foo[1] # E: Expected TypedDict key to be string literal \
- # E: Argument 1 to "__delitem__" has incompatible type "int"; expected "str"
+ del foo[1] # E: Argument 1 to "__delitem__" has incompatible type "int"; expected "str" \
+ # E: Expected TypedDict key to be string literal
+
[builtins fixtures/dict.pyi]
[typing fixtures/typing-typeddict.pyi]
@@ -3726,8 +3728,9 @@ class TP(TypedDict):
mutable: bool
x: TP
-reveal_type(x.pop("key")) # E: Key "key" of TypedDict "TP" cannot be deleted \
- # N: Revealed type is "builtins.str"
+reveal_type(x.pop("key")) # N: Revealed type is "builtins.str" \
+ # E: Key "key" of TypedDict "TP" cannot be deleted
+
x.update({"key": "abc", "other": 1, "mutable": True}) # E: ReadOnly TypedDict keys ("key", "other") TypedDict are mutated
x.setdefault("key", "abc") # E: ReadOnly TypedDict key "key" TypedDict is mutated
| (🐞) Wrong column number for `TypedDict.setdefault`
```py
from typing import TypedDict
class A(TypedDict):
x: int
a: A
a.setdefault('x', '') # test.py:6:1: error: Argument 2 to "setdefault" of "TypedDict" has incompatible type "str"; expected "int"
```
column 1?
Internally an error with the correct column number is generated, but is filtered out as a duplicate.
| 1,734,129,733,000 | [] | Bug Report | [
"mypy/plugins/default.py:typed_dict_pop_callback",
"mypy/plugins/default.py:typed_dict_setdefault_callback",
"mypy/plugins/default.py:typed_dict_delitem_callback"
] | [] | 3 |
|
python/mypy | python__mypy-18290 | 46c7ec7ed25de55452783ee7d45718c01018c764 | diff --git a/mypy/semanal_typeargs.py b/mypy/semanal_typeargs.py
index 646bb28a3b6e..435abb78ca43 100644
--- a/mypy/semanal_typeargs.py
+++ b/mypy/semanal_typeargs.py
@@ -148,17 +148,18 @@ def validate_args(
is_error = False
is_invalid = False
for (i, arg), tvar in zip(enumerate(args), type_vars):
+ context = ctx if arg.line < 0 else arg
if isinstance(tvar, TypeVarType):
if isinstance(arg, ParamSpecType):
is_invalid = True
self.fail(
INVALID_PARAM_SPEC_LOCATION.format(format_type(arg, self.options)),
- ctx,
+ context,
code=codes.VALID_TYPE,
)
self.note(
INVALID_PARAM_SPEC_LOCATION_NOTE.format(arg.name),
- ctx,
+ context,
code=codes.VALID_TYPE,
)
continue
@@ -167,7 +168,7 @@ def validate_args(
self.fail(
f"Cannot use {format_type(arg, self.options)} for regular type variable,"
" only for ParamSpec",
- ctx,
+ context,
code=codes.VALID_TYPE,
)
continue
@@ -182,13 +183,15 @@ def validate_args(
is_error = True
self.fail(
message_registry.INVALID_TYPEVAR_AS_TYPEARG.format(arg.name, name),
- ctx,
+ context,
code=codes.TYPE_VAR,
)
continue
else:
arg_values = [arg]
- if self.check_type_var_values(name, arg_values, tvar.name, tvar.values, ctx):
+ if self.check_type_var_values(
+ name, arg_values, tvar.name, tvar.values, context
+ ):
is_error = True
# Check against upper bound. Since it's object the vast majority of the time,
# add fast path to avoid a potentially slow subtype check.
@@ -209,7 +212,7 @@ def validate_args(
name,
format_type(upper_bound, self.options),
),
- ctx,
+ context,
code=codes.TYPE_VAR,
)
elif isinstance(tvar, ParamSpecType):
@@ -220,7 +223,7 @@ def validate_args(
self.fail(
"Can only replace ParamSpec with a parameter types list or"
f" another ParamSpec, got {format_type(arg, self.options)}",
- ctx,
+ context,
code=codes.VALID_TYPE,
)
if is_invalid:
| diff --git a/test-data/unit/check-classes.test b/test-data/unit/check-classes.test
index 5ce80faaee18..a3d35da15107 100644
--- a/test-data/unit/check-classes.test
+++ b/test-data/unit/check-classes.test
@@ -6112,8 +6112,8 @@ A = G
x: A[B[int]] # E
B = G
[out]
-main:8:4: error: Type argument "G[int]" of "G" must be a subtype of "str"
-main:8:6: error: Type argument "int" of "G" must be a subtype of "str"
+main:8:6: error: Type argument "G[int]" of "G" must be a subtype of "str"
+main:8:8: error: Type argument "int" of "G" must be a subtype of "str"
[case testExtremeForwardReferencing]
from typing import TypeVar, Generic
diff --git a/test-data/unit/check-columns.test b/test-data/unit/check-columns.test
index 44524b9df943..79a2f31b574b 100644
--- a/test-data/unit/check-columns.test
+++ b/test-data/unit/check-columns.test
@@ -310,9 +310,18 @@ T = TypeVar('T', int, str)
class C(Generic[T]):
pass
-def f(c: C[object]) -> None: pass # E:10: Value of type variable "T" of "C" cannot be "object"
+def f(c: C[object]) -> None: pass # E:12: Value of type variable "T" of "C" cannot be "object"
(C[object]()) # E:2: Value of type variable "T" of "C" cannot be "object"
+[case testColumnInvalidLocationForParamSpec]
+from typing import List
+from typing_extensions import ParamSpec
+
+P = ParamSpec('P')
+def foo(x: List[P]): pass # E:17: Invalid location for ParamSpec "P" \
+ # N:17: You can use ParamSpec as the first argument to Callable, e.g., "Callable[P, int]"
+[builtins fixtures/list.pyi]
+
[case testColumnSyntaxErrorInTypeAnnotation]
if int():
def f(x # type: int,
diff --git a/test-data/unit/check-generics.test b/test-data/unit/check-generics.test
index b8cc0422b749..5791b9c471d5 100644
--- a/test-data/unit/check-generics.test
+++ b/test-data/unit/check-generics.test
@@ -671,7 +671,7 @@ reveal_type(a) # N: Revealed type is "other.array[Any, other.dtype[builtins.floa
[out]
main:3: error: Type argument "float" of "Array" must be a subtype of "generic" [type-var]
a: other.Array[float]
- ^
+ ^
[file other.py]
from typing import Any, Generic, TypeVar
diff --git a/test-data/unit/check-newsemanal.test b/test-data/unit/check-newsemanal.test
index 784b9db9f66e..81b0066dbf81 100644
--- a/test-data/unit/check-newsemanal.test
+++ b/test-data/unit/check-newsemanal.test
@@ -1666,8 +1666,8 @@ T = TypeVar('T', bound=int)
class C(Generic[T]): pass
class C2(Generic[T]): pass
-A = C[str] # E: Type argument "str" of "C" must be a subtype of "int" \
- # E: Value of type variable "T" of "C" cannot be "str"
+A = C[str] # E: Value of type variable "T" of "C" cannot be "str" \
+ # E: Type argument "str" of "C" must be a subtype of "int"
B = Union[C[str], int] # E: Type argument "str" of "C" must be a subtype of "int"
S = TypeVar('S', bound=C[str]) # E: Type argument "str" of "C" must be a subtype of "int"
U = TypeVar('U', C[str], str) # E: Type argument "str" of "C" must be a subtype of "int"
| (🐞) Wrong column number for "Invalid location for ParamSpec"
```py
from typing import List, ParamSpec
P = ParamSpec('P')
def foo(x: List[P]) -> None: ...
```
```
test.py:5:13: error: Invalid location for ParamSpec "P" [misc]
test.py:5:18: note: You can use ParamSpec as the first argument to Callable, e.g., 'Callable[P, int]'
Found 2 errors in 1 file (checked 1 source file)
```
Column 13 is the `L` in `List`. The error and the note don't match up.
Notice that the error and the note specify different column numbers.
An internal error with the correct column number is generated, but is removed as a duplicate.
| This has changed in master I think:
```
test.py:5:12: error: Invalid location for ParamSpec "P" [misc]
test.py:5:12: note: You can use ParamSpec as the first argument to Callable, e.g., 'Callable[P, int]'
Found 1 error in 1 file (checked 1 source file)
```
They have the same column now (still not on the `ParamSpec` though) | 1,734,107,837,000 | [] | Bug Report | [
"mypy/semanal_typeargs.py:TypeArgumentAnalyzer.validate_args"
] | [] | 1 |
python/mypy | python__mypy-18197 | 499adaed8adbded1a180e30d071438fef81779ec | diff --git a/mypy/typeanal.py b/mypy/typeanal.py
index daf7ab1951ea..b7e7da17e209 100644
--- a/mypy/typeanal.py
+++ b/mypy/typeanal.py
@@ -2277,7 +2277,8 @@ def set_any_tvars(
env[tv.id] = arg
t = TypeAliasType(node, args, newline, newcolumn)
if not has_type_var_tuple_type:
- fixed = expand_type(t, env)
+ with state.strict_optional_set(options.strict_optional):
+ fixed = expand_type(t, env)
assert isinstance(fixed, TypeAliasType)
t.args = fixed.args
| diff --git a/test-data/unit/check-typevar-defaults.test b/test-data/unit/check-typevar-defaults.test
index 3cd94f4a46d2..93d20eb26f6e 100644
--- a/test-data/unit/check-typevar-defaults.test
+++ b/test-data/unit/check-typevar-defaults.test
@@ -728,3 +728,24 @@ class C(Generic[_I]): pass
t: type[C] | int = C
[builtins fixtures/tuple.pyi]
+
+
+
+[case testGenericTypeAliasWithDefaultTypeVarPreservesNoneInDefault]
+from typing_extensions import TypeVar
+from typing import Generic, Union
+
+T1 = TypeVar("T1", default=Union[int, None])
+T2 = TypeVar("T2", default=Union[int, None])
+
+
+class A(Generic[T1, T2]):
+ def __init__(self, a: T1, b: T2) -> None:
+ self.a = a
+ self.b = b
+
+
+MyA = A[T1, int]
+a: MyA = A(None, 10)
+reveal_type(a.a) # N: Revealed type is "Union[builtins.int, None]"
+[builtins fixtures/tuple.pyi]
| Generic type aliases with TypeVar defaults infer to an incorrect type
<!--
If you're not sure whether what you're experiencing is a mypy bug, please see the "Question and Help" form instead.
Please consider:
- checking our common issues page: https://mypy.readthedocs.io/en/stable/common_issues.html
- searching our issue tracker: https://github.com/python/mypy/issues to see if it's already been reported
- asking on gitter chat: https://gitter.im/python/typing
-->
**Bug Report**
When using type alias of a generic type which uses type variables with defaults unbound type variables may infer to an incorrect default type.
<!--
If you're reporting a problem with a specific library function, the typeshed tracker is better suited for this report: https://github.com/python/typeshed/issues
If the project you encountered the issue in is open source, please provide a link to the project.
-->
**To Reproduce**
```python
from typing import Generic, TypeVar, TypeAlias
T1 = TypeVar('T1', default=int | None)
T2 = TypeVar('T2', default=int | None)
class A(Generic[T1, T2]):
def __init__(self, a: T1, b: T2) -> None:
self.a = a
self.b = b
MyA: TypeAlias = A[T1, int]
a: MyA = A(None, 10)
```
https://mypy-play.net/?mypy=latest&python=3.12&gist=abece06adfb80e28c6d23bd99997964e
**Expected Behavior**
`MyA` should preserve `T1` default type, which is `int | None`
**Actual Behavior**
`T1` is inferred to `int` default in `MyA`, so assignment to `a` fails with:
`Argument 1 to "A" has incompatible type "None"; expected "int" [arg-type]`
<!-- What went wrong? Paste mypy's output. -->
**Your Environment**
<!-- Include as many relevant details about the environment you experienced the bug in -->
- Mypy version used: 1.13
- Python version used: 3.12
<!-- You can freely edit this text, please remove all the lines you believe are unnecessary. -->
| 1,732,662,294,000 | [
"topic-strict-optional",
"topic-pep-696"
] | Bug Report | [
"mypy/typeanal.py:set_any_tvars"
] | [] | 1 |
|
python/mypy | python__mypy-18164 | 8ef21976cfd190d0b1974f438a7d30e8eaea5272 | diff --git a/mypy/subtypes.py b/mypy/subtypes.py
index 11f3421331a5..a26aaf798b58 100644
--- a/mypy/subtypes.py
+++ b/mypy/subtypes.py
@@ -910,14 +910,12 @@ def visit_typeddict_type(self, left: TypedDictType) -> bool:
return False
# Non-required key is not compatible with a required key since
# indexing may fail unexpectedly if a required key is missing.
- # Required key is not compatible with a non-required key since
- # the prior doesn't support 'del' but the latter should support
- # it.
- #
- # NOTE: 'del' support is currently not implemented (#3550). We
- # don't want to have to change subtyping after 'del' support
- # lands so here we are anticipating that change.
- if (name in left.required_keys) != (name in right.required_keys):
+ # Required key is not compatible with a non-read-only non-required
+ # key since the prior doesn't support 'del' but the latter should
+ # support it.
+ # Required key is compatible with a read-only non-required key.
+ required_differ = (name in left.required_keys) != (name in right.required_keys)
+ if not right_readonly and required_differ:
return False
# Readonly fields check:
#
| diff --git a/test-data/unit/check-typeddict.test b/test-data/unit/check-typeddict.test
index affa472bb640..521a5b80573d 100644
--- a/test-data/unit/check-typeddict.test
+++ b/test-data/unit/check-typeddict.test
@@ -3894,6 +3894,20 @@ accepts_B(b)
[builtins fixtures/dict.pyi]
[typing fixtures/typing-typeddict.pyi]
+[case testTypedDictRequiredConsistentWithNotRequiredReadOnly]
+from typing import NotRequired, ReadOnly, Required, TypedDict
+
+class A(TypedDict):
+ x: NotRequired[ReadOnly[str]]
+
+class B(TypedDict):
+ x: Required[str]
+
+def f(b: B):
+ a: A = b # ok
+[builtins fixtures/dict.pyi]
+[typing fixtures/typing-typeddict.pyi]
+
[case testTypedDictReadOnlyCall]
from typing import ReadOnly, TypedDict
| `arg-type` error duck typing a `TypedDict` with `NotRequired` field
**Bug Report**
[mypy play url](https://mypy-play.net/?mypy=latest&python=3.12&gist=e6a8b78acddafd1ad10e81364e8e9ab5)
it seems that mypy doesn't "match" a `NotRequired` field with a normal one when duck typing 2 different typed dict:
Let's say that I have a function `foo` that expect a `Small` TypedDict as parameter
```python
class Small(TypedDict):
a: NotRequired[str]
def foo(small: Small) -> None: ...
```
In this `Small` TypedDict the field `a` is marked as `NotRequired`.
Elsewere I have another `Big` TypedDict with 2 fields `a`, and `b`, both required.
```python
class Big(TypedDict):
a: str
b: str
```
but if I try to pass a `Big` dict instance to my `foo` function, mypy mark it as an error:
`main.py:21: error: Argument 1 to "foo" has incompatible type "Big"; expected "Small" [arg-type]`
If I change the type of `Big.a` to a `NotRequired[str]`, mypy is happy, however mypy should allow the original definition, as a "normal" field should be treated as a stricter, "smaller" version of a not required one
| One wrinkle is that `Small` allows deleting keys whereas `Big` does not, which makes passing a `Big` where a `Small` is expected potentially unsound.
Consider:
```python
from typing import NotRequired, TypedDict
class Big(TypedDict):
a: str
b: str
class Small(TypedDict):
a: NotRequired[str]
def foo(small: Small) -> None:
del small["a"] # ok, deleting NotRequired keys is permitted
x = Big(a="a", b="b")
foo(x) # danger! If this was legal, x would no longer be a valid Big
```
However, if you prevent key deletion using [`ReadOnly`](https://peps.python.org/pep-0705/), I think this would be safe. Pyright seems to support this, provided that the `NotRequired` fields are `ReadOnly`: [[pyright playground]](https://pyright-play.net/?pythonVersion=3.12&code=GYJw9gtgBALgngBwJYDsDmUkQWEMoByYMASgKYCOArkiGQCYA0UAKogwCJIDGMAsAChQkWIlRoA%2BmQAeMMigDOSMIszZc%2BcgEN6AeRQAbOIMHcDWhQqgAhJGgAUbBJx4wAlAC5BUH1C0eoBRgQb18AIwCgkIFfKFNzSygAZQgtAwNHdnouXk9Qn38obT1DOABtIlJKGjp6MqiAXQb8uIF6MmAoYDAwewVU9ICUtIM3KABaAD5CFTIvGN8ECwUWgGIodoNAgYMygCItPeaFn0FpKABeGzt7LQuDveYw%2B7C9t0Fu3uk3IA) | 1,731,978,110,000 | [] | Bug Report | [
"mypy/subtypes.py:SubtypeVisitor.visit_typeddict_type"
] | [] | 1 |
python/mypy | python__mypy-18163 | 8ef21976cfd190d0b1974f438a7d30e8eaea5272 | diff --git a/mypy/checker.py b/mypy/checker.py
index 1bee348bc252..ef3f7502d7ce 100644
--- a/mypy/checker.py
+++ b/mypy/checker.py
@@ -6274,10 +6274,6 @@ def has_no_custom_eq_checks(t: Type) -> bool:
coerce_only_in_literal_context,
)
- # Strictly speaking, we should also skip this check if the objects in the expr
- # chain have custom __eq__ or __ne__ methods. But we (maybe optimistically)
- # assume nobody would actually create a custom objects that considers itself
- # equal to None.
if if_map == {} and else_map == {}:
if_map, else_map = self.refine_away_none_in_comparison(
operands, operand_types, expr_indices, narrowable_operand_index_to_hash.keys()
@@ -6602,25 +6598,36 @@ def refine_away_none_in_comparison(
For more details about what the different arguments mean, see the
docstring of 'refine_identity_comparison_expression' up above.
"""
+
non_optional_types = []
for i in chain_indices:
typ = operand_types[i]
if not is_overlapping_none(typ):
non_optional_types.append(typ)
- # Make sure we have a mixture of optional and non-optional types.
- if len(non_optional_types) == 0 or len(non_optional_types) == len(chain_indices):
- return {}, {}
+ if_map, else_map = {}, {}
- if_map = {}
- for i in narrowable_operand_indices:
- expr_type = operand_types[i]
- if not is_overlapping_none(expr_type):
- continue
- if any(is_overlapping_erased_types(expr_type, t) for t in non_optional_types):
- if_map[operands[i]] = remove_optional(expr_type)
+ if not non_optional_types or (len(non_optional_types) != len(chain_indices)):
- return if_map, {}
+ # Narrow e.g. `Optional[A] == "x"` or `Optional[A] is "x"` to `A` (which may be
+ # convenient but is strictly not type-safe):
+ for i in narrowable_operand_indices:
+ expr_type = operand_types[i]
+ if not is_overlapping_none(expr_type):
+ continue
+ if any(is_overlapping_erased_types(expr_type, t) for t in non_optional_types):
+ if_map[operands[i]] = remove_optional(expr_type)
+
+ # Narrow e.g. `Optional[A] != None` to `A` (which is stricter than the above step and
+ # so type-safe but less convenient, because e.g. `Optional[A] == None` still results
+ # in `Optional[A]`):
+ if any(isinstance(get_proper_type(ot), NoneType) for ot in operand_types):
+ for i in narrowable_operand_indices:
+ expr_type = operand_types[i]
+ if is_overlapping_none(expr_type):
+ else_map[operands[i]] = remove_optional(expr_type)
+
+ return if_map, else_map
def is_len_of_tuple(self, expr: Expression) -> bool:
"""Is this expression a `len(x)` call where x is a tuple or union of tuples?"""
| diff --git a/test-data/unit/check-narrowing.test b/test-data/unit/check-narrowing.test
index d740708991d0..bc763095477e 100644
--- a/test-data/unit/check-narrowing.test
+++ b/test-data/unit/check-narrowing.test
@@ -1385,9 +1385,9 @@ val: Optional[A]
if val == None:
reveal_type(val) # N: Revealed type is "Union[__main__.A, None]"
else:
- reveal_type(val) # N: Revealed type is "Union[__main__.A, None]"
+ reveal_type(val) # N: Revealed type is "__main__.A"
if val != None:
- reveal_type(val) # N: Revealed type is "Union[__main__.A, None]"
+ reveal_type(val) # N: Revealed type is "__main__.A"
else:
reveal_type(val) # N: Revealed type is "Union[__main__.A, None]"
| `==`-based narrowing of `Optional`
1. Mypy's current behaviour:
```python
x: int | None
if x == None:
x # "Union[builtins.int, None]"
else:
x # "Union[builtins.int, None]"
```
2. Pyright's current behaviour:
```python
x: int | None
if x == None:
x # x is None
else:
x # x is int
```
3. I think the ideal solution would be:
```python
x: int | None
if x == None:
x # "Union[builtins.int, None]"
else:
x # "builtins.int"
```
This [Mypy code comment](https://github.com/python/mypy/blob/3b00002acdf098e7241df8f2e1843f8b8260b168/mypy/checker.py#L6278-L6280) seems to favour solution 2.
I am asking because if Mypy would follow solution 2 or 3, `in`-based narrowing of optional types could be implemented more consistently.
Related pull requests: #15760 #17154 #17044.
| I think both 2 and 3 are fine. If we go with 3, we should look at presence of `__eq__` (`int` has `__eq__` defined, not sure why, dates back to 2015)
Did you mean we should check for `__eq__` when going for solution **2**?
If so, this could be an additional safety net for most cases. Then, the modified solution 2 would only result in wrong narrowing for subtypes that override `__eq__` strangely:
```python
class A: ...
class B(A):
def __eq__(self, o: object) -> bool:
return True
def f(x: A | None) -> None:
if x == None:
assert x is None
f(B()) # AssertionError
```
I think, for complete safety, we would have to combine looking for `__eq__` with checking for `final`:
```python
from typing import final
@final
class A: ...
def f(x: A | None) -> None:
if x == None:
reveal_type(x) # must be None
``` | 1,731,965,411,000 | [
"topic-type-narrowing"
] | Feature Request | [
"mypy/checker.py:TypeChecker.refine_away_none_in_comparison"
] | [] | 1 |
python/mypy | python__mypy-18160 | 8ef21976cfd190d0b1974f438a7d30e8eaea5272 | diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py
index 577576a4e5f8..11a9cffe18c3 100644
--- a/mypy/checkexpr.py
+++ b/mypy/checkexpr.py
@@ -5612,11 +5612,15 @@ def visit_slice_expr(self, e: SliceExpr) -> Type:
except KeyError:
supports_index = self.chk.named_type("builtins.int") # thanks, fixture life
expected = make_optional_type(supports_index)
+ type_args = []
for index in [e.begin_index, e.end_index, e.stride]:
if index:
t = self.accept(index)
self.chk.check_subtype(t, expected, index, message_registry.INVALID_SLICE_INDEX)
- return self.named_type("builtins.slice")
+ type_args.append(t)
+ else:
+ type_args.append(NoneType())
+ return self.chk.named_generic_type("builtins.slice", type_args)
def visit_list_comprehension(self, e: ListComprehension) -> Type:
return self.check_generator_or_comprehension(
| diff --git a/test-data/unit/check-expressions.test b/test-data/unit/check-expressions.test
index d5ddc910bcd6..cd26c9bb408a 100644
--- a/test-data/unit/check-expressions.test
+++ b/test-data/unit/check-expressions.test
@@ -1178,8 +1178,8 @@ class B: pass
[case testSlicingWithInvalidBase]
a: A
-a[1:2] # E: Invalid index type "slice" for "A"; expected type "int"
-a[:] # E: Invalid index type "slice" for "A"; expected type "int"
+a[1:2] # E: Invalid index type "slice[int, int, None]" for "A"; expected type "int"
+a[:] # E: Invalid index type "slice[None, None, None]" for "A"; expected type "int"
class A:
def __getitem__(self, n: int) -> 'A': pass
[builtins fixtures/slice.pyi]
| Change in inference of overloads involving `Hashable` and `slice` now that `slice` is generic
**Bug Report**
Consider the following Python snippet, saved as `foo.pyi`:
```py
from typing import assert_type, overload, Hashable
class Foo: ...
class DataFrame:
@overload
def __getitem__(self, key: slice) -> DataFrame: ...
@overload
def __getitem__(self, key: Hashable) -> Foo: ...
df = DataFrame()
assert_type(df[1:], DataFrame)
assert_type(df[:2], DataFrame)
```
Prior to [the recent change to make `slice` generic in typeshed](https://github.com/python/typeshed/pull/11637), mypy used to emit no errors on this snippet. However, mypy on the `master` branch emits the following errors:
```
foo.pyi:7: error: Overloaded function signatures 1 and 2 overlap with incompatible return types [overload-overlap]
foo.pyi:13: error: Expression is of type "Any", not "DataFrame" [assert-type]
foo.pyi:14: error: Expression is of type "Any", not "DataFrame" [assert-type]
```
The first error here seems reasonable to me, but it's unclear to me why mypy now infers `Any` as the result of the `df` subscriptions. This snippet is minimized from the sole new mypy_primer error reported in https://github.com/python/typeshed/pull/11637#issuecomment-2435268577.
**To Reproduce**
https://mypy-play.net/?mypy=master&python=3.12&gist=339ba7f72c9048770ce15d6dc75207a1
**Your Environment**
- Mypy version used: mypy 1.14.0+dev.2ebc690279c7e20ed8bec006787030c5ba57c40e (compiled: no)
| I think this is because since python 3.12, slice is hashable, hence it satisfies both overloads and mypy performs a join of the return types. When you run with 3.11 the error disappears:
https://mypy-play.net/?mypy=master&python=3.11&gist=339ba7f72c9048770ce15d6dc75207a1
@AlexWaygood Uhh, I just discovered that it actually seems to be related to the bug #2410 we just encountered in typeshed as well, because when you use non-literal slices it also disappears!
```python
from typing import assert_type, overload, Hashable
class Foo: ...
class DataFrame:
@overload
def __getitem__(self, key: slice, /) -> "DataFrame": ...
@overload
def __getitem__(self, key: Hashable, /) -> Foo: ...
df = DataFrame()
assert_type(df[1:], DataFrame) # ❌
assert_type(df[:2], DataFrame) # ❌
assert_type(df[slice(1, None)], DataFrame) # ✅
assert_type(df[slice(None, 2)], DataFrame) # ✅
```
https://mypy-play.net/?mypy=master&python=3.12&gist=935d2927539dbe01d6727037ace98469
----
I was wondering about this because according to https://mypy.readthedocs.io/en/stable/more_types.html#type-checking-calls-to-overloads, no join operation should happen. I guess there is some legacy code responsible for dealing with literal slices in `__getitem__` that's going haywire?
> I guess there is some legacy code responsible for dealing with literal slices in `__getitem__` that's going haywire?
~Indeed, this is actually hardcoded into the expression checker:~ Nevermind, I had my wires crossed about what this issue was about, sorry for the noise!
[https://github.com/python/mypy/blob/2ebc690279c7e20ed8bec006787030c5ba57c40e/mypy/checkexpr.py#L5609-L5619.](https://github.com/python/mypy/blob/2ebc690279c7e20ed8bec006787030c5ba57c40e/mypy/checkexpr.py#L5609-L5619)
Actually, my previous comment wasn't as far from the mark as I thought :laughing:
The difference between slice expressions and non-literal slices does come down to the hardcoded analysis in the expression checker, specifically the return:
https://github.com/python/mypy/blob/2ebc690279c7e20ed8bec006787030c5ba57c40e/mypy/checkexpr.py#L5619
This erases any type information and makes all slice expressions be treated as `slice[Any, Any, Any]`. Manually passing a slice of type `slice[Any, Any, Any]` indeed has the same behavior:
```python
df = DataFrame()
x: slice[Any, Any, Any]
assert_type(df[x], DataFrame) # E: Expression is of type "Any", not "DataFrame"
```
I'm not fully sure why passing `slice[Any, Any, Any]` causes the overload return type to resolve to `Any`, but I think it may have to do with the way [`infer_overload_return_type`](https://github.com/python/mypy/blob/2ebc690279c7e20ed8bec006787030c5ba57c40e/mypy/checkexpr.py#L2838) resolves ambiguous overloads, specifically now that this check will resolve to True:
https://github.com/python/mypy/blob/2ebc690279c7e20ed8bec006787030c5ba57c40e/mypy/checkexpr.py#L2862
so this branch can no longer be taken:
https://github.com/python/mypy/blob/2ebc690279c7e20ed8bec006787030c5ba57c40e/mypy/checkexpr.py#L2882-L2884
I think the fix would be to just properly infer the generic type arguments for slice expressions. Naively that could look something like the following, though we'd probably want to make it match the behavior of `slice.__new__` in typeshed.
```diff
--- a/mypy/checkexpr.py
+++ b/mypy/checkexpr.py
@@ -5612,11 +5612,15 @@ class ExpressionChecker(ExpressionVisitor[Type]):
except KeyError:
supports_index = self.chk.named_type("builtins.int") # thanks, fixture life
expected = make_optional_type(supports_index)
+ type_args = []
for index in [e.begin_index, e.end_index, e.stride]:
if index:
t = self.accept(index)
self.chk.check_subtype(t, expected, index, message_registry.INVALID_SLICE_INDEX)
- return self.named_type("builtins.slice")
+ type_args.append(t)
+ else:
+ type_args.append(NoneType())
+ return self.chk.named_generic_type("builtins.slice", type_args)
``` | 1,731,889,089,000 | [] | Bug Report | [
"mypy/checkexpr.py:ExpressionChecker.visit_slice_expr"
] | [] | 1 |
python/mypy | python__mypy-18141 | 3b00002acdf098e7241df8f2e1843f8b8260b168 | diff --git a/mypy/checkpattern.py b/mypy/checkpattern.py
index 6b4fa35f9c49..a7121712a6db 100644
--- a/mypy/checkpattern.py
+++ b/mypy/checkpattern.py
@@ -693,7 +693,9 @@ def visit_class_pattern(self, o: ClassPattern) -> PatternType:
def should_self_match(self, typ: Type) -> bool:
typ = get_proper_type(typ)
- if isinstance(typ, Instance) and typ.type.is_named_tuple:
+ if isinstance(typ, Instance) and typ.type.get("__match_args__") is not None:
+ # Named tuples and other subtypes of builtins that define __match_args__
+ # should not self match.
return False
for other in self.self_match_types:
if is_subtype(typ, other):
| diff --git a/test-data/unit/check-python310.test b/test-data/unit/check-python310.test
index 58b70d7b74d8..8187f27353a9 100644
--- a/test-data/unit/check-python310.test
+++ b/test-data/unit/check-python310.test
@@ -648,6 +648,25 @@ match m:
reveal_type(m) # N: Revealed type is "builtins.tuple[Any, ...]"
[builtins fixtures/primitives.pyi]
+[case testMatchClassPatternCaptureSelfSubtype]
+class A(str):
+ pass
+
+class B(str):
+ __match_args__ = ("b",)
+ b: int
+
+def f1(x: A):
+ match x:
+ case A(a):
+ reveal_type(a) # N: Revealed type is "__main__.A"
+
+def f2(x: B):
+ match x:
+ case B(b):
+ reveal_type(b) # N: Revealed type is "builtins.int"
+[builtins fixtures/tuple.pyi]
+
[case testMatchInvalidClassPattern]
m: object
| Positional subpattern of a built-in type subclass with __match_args__ causes type confusion
Given
```python
class PrintableKey(bytes):
__match_args__ = ('ch', )
@property
def ch(self) -> str:
return self.decode('UTF-8')
```
now
```python
match event:
case PrintableKey(ch):
reveal_type(ch) # mypy thinks it's PrintableKey
```
versus
```python
match event:
case PrintableKey(ch=ch):
reveal_type(ch) # mypy correctly deduces it's str
```
_Originally posted by @mgedmin in https://github.com/python/mypy/issues/13804#issuecomment-2466177933_
| 1,731,277,108,000 | [] | Bug Report | [
"mypy/checkpattern.py:PatternChecker.should_self_match"
] | [] | 1 |
|
python/mypy | python__mypy-18132 | 3b00002acdf098e7241df8f2e1843f8b8260b168 | diff --git a/mypy/semanal.py b/mypy/semanal.py
index 59e4594353f0..1f5f31d59e32 100644
--- a/mypy/semanal.py
+++ b/mypy/semanal.py
@@ -484,6 +484,12 @@ def __init__(
# Used to pass information about current overload index to visit_func_def().
self.current_overload_item: int | None = None
+ # Used to track whether currently inside an except* block. This helps
+ # to invoke errors when continue/break/return is used inside except* block.
+ self.inside_except_star_block: bool = False
+ # Used to track edge case when return is still inside except* if it enters a loop
+ self.return_stmt_inside_except_star_block: bool = False
+
# mypyc doesn't properly handle implementing an abstractproperty
# with a regular attribute so we make them properties
@property
@@ -511,6 +517,25 @@ def allow_unbound_tvars_set(self) -> Iterator[None]:
finally:
self.allow_unbound_tvars = old
+ @contextmanager
+ def inside_except_star_block_set(
+ self, value: bool, entering_loop: bool = False
+ ) -> Iterator[None]:
+ old = self.inside_except_star_block
+ self.inside_except_star_block = value
+
+ # Return statement would still be in except* scope if entering loops
+ if not entering_loop:
+ old_return_stmt_flag = self.return_stmt_inside_except_star_block
+ self.return_stmt_inside_except_star_block = value
+
+ try:
+ yield
+ finally:
+ self.inside_except_star_block = old
+ if not entering_loop:
+ self.return_stmt_inside_except_star_block = old_return_stmt_flag
+
#
# Preparing module (performed before semantic analysis)
#
@@ -877,7 +902,8 @@ def visit_func_def(self, defn: FuncDef) -> None:
return
with self.scope.function_scope(defn):
- self.analyze_func_def(defn)
+ with self.inside_except_star_block_set(value=False):
+ self.analyze_func_def(defn)
def function_fullname(self, fullname: str) -> str:
if self.current_overload_item is None:
@@ -5263,6 +5289,8 @@ def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail('"return" outside function', s)
+ if self.return_stmt_inside_except_star_block:
+ self.fail('"return" not allowed in except* block', s, serious=True)
if s.expr:
s.expr.accept(self)
@@ -5296,7 +5324,8 @@ def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth[-1] += 1
- s.body.accept(self)
+ with self.inside_except_star_block_set(value=False, entering_loop=True):
+ s.body.accept(self)
self.loop_depth[-1] -= 1
self.visit_block_maybe(s.else_body)
@@ -5320,20 +5349,24 @@ def visit_for_stmt(self, s: ForStmt) -> None:
s.index_type = analyzed
self.loop_depth[-1] += 1
- self.visit_block(s.body)
+ with self.inside_except_star_block_set(value=False, entering_loop=True):
+ self.visit_block(s.body)
self.loop_depth[-1] -= 1
-
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth[-1] == 0:
self.fail('"break" outside loop', s, serious=True, blocker=True)
+ if self.inside_except_star_block:
+ self.fail('"break" not allowed in except* block', s, serious=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth[-1] == 0:
self.fail('"continue" outside loop', s, serious=True, blocker=True)
+ if self.inside_except_star_block:
+ self.fail('"continue" not allowed in except* block', s, serious=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
@@ -5354,7 +5387,8 @@ def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
- handler.accept(visitor)
+ with self.inside_except_star_block_set(self.inside_except_star_block or s.is_star):
+ handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
| diff --git a/test-data/unit/check-python311.test b/test-data/unit/check-python311.test
index 28951824999f..6f4c540572b0 100644
--- a/test-data/unit/check-python311.test
+++ b/test-data/unit/check-python311.test
@@ -173,3 +173,89 @@ Alias4 = Callable[[*IntList], int] # E: "List[int]" cannot be unpacked (must be
x4: Alias4[int] # E: Bad number of arguments for type alias, expected 0, given 1
reveal_type(x4) # N: Revealed type is "def (*Any) -> builtins.int"
[builtins fixtures/tuple.pyi]
+
+[case testReturnInExceptStarBlock1]
+# flags: --python-version 3.11
+def foo() -> None:
+ try:
+ pass
+ except* Exception:
+ return # E: "return" not allowed in except* block
+ finally:
+ return
+[builtins fixtures/exception.pyi]
+
+[case testReturnInExceptStarBlock2]
+# flags: --python-version 3.11
+def foo():
+ while True:
+ try:
+ pass
+ except* Exception:
+ while True:
+ return # E: "return" not allowed in except* block
+[builtins fixtures/exception.pyi]
+
+[case testContinueInExceptBlockNestedInExceptStarBlock]
+# flags: --python-version 3.11
+while True:
+ try:
+ ...
+ except* Exception:
+ try:
+ ...
+ except Exception:
+ continue # E: "continue" not allowed in except* block
+ continue # E: "continue" not allowed in except* block
+[builtins fixtures/exception.pyi]
+
+[case testReturnInExceptBlockNestedInExceptStarBlock]
+# flags: --python-version 3.11
+def foo():
+ try:
+ ...
+ except* Exception:
+ try:
+ ...
+ except Exception:
+ return # E: "return" not allowed in except* block
+ return # E: "return" not allowed in except* block
+[builtins fixtures/exception.pyi]
+
+[case testBreakContinueReturnInExceptStarBlock1]
+# flags: --python-version 3.11
+from typing import Iterable
+def foo(x: Iterable[int]) -> None:
+ for _ in x:
+ try:
+ pass
+ except* Exception:
+ continue # E: "continue" not allowed in except* block
+ except* Exception:
+ for _ in x:
+ continue
+ break # E: "break" not allowed in except* block
+ except* Exception:
+ return # E: "return" not allowed in except* block
+[builtins fixtures/exception.pyi]
+
+[case testBreakContinueReturnInExceptStarBlock2]
+# flags: --python-version 3.11
+def foo():
+ while True:
+ try:
+ pass
+ except* Exception:
+ def inner():
+ while True:
+ if 1 < 1:
+ continue
+ else:
+ break
+ return
+ if 1 < 2:
+ break # E: "break" not allowed in except* block
+ if 1 < 2:
+ continue # E: "continue" not allowed in except* block
+ return # E: "return" not allowed in except* block
+[builtins fixtures/exception.pyi]
| mypy doesn't flag continue/break/return in except*
**Feature**
continue/break/return in except* is a syntax error, see https://peps.python.org/pep-0654/#forbidden-combinations.
```python
def foo():
for _ in range(5):
try:
...
except* Exception:
continue
except* ValueError:
break
except* TypeError:
return
```
```sh
$ mypy --python-version=3.11 foo.py
Success: no issues found in 1 source file
$ mypy --version
mypy 1.13.0 (compiled: yes)
```
**Pitch**
mypy does give an error if you try to combine `except` and `except*`, and as far as I know it flags all other syntax errors as well, so should probably handle this too. It might also cause users to adjust code to avoid `unreachable code` needlessly, when in fact they have bigger problems to fix first.
| I'd like to work on this!
@coldwolverine great! You can check out [CONTRIBUTING.md](https://github.com/python/mypy/blob/master/CONTRIBUTING.md) and the [Developer Guides](https://github.com/python/mypy/wiki/Developer-Guides) for contributing guidelines and tips on where to start.
I'm guessing this feature will involve adding some new state to the semantic analyzer to keep track of whether its currently inside an `except*` block. While working on this feature, be mindful of edge cases like nested try statements and function definitions or loops inside the `except*` block. | 1,731,130,375,000 | [] | Feature Request | [
"mypy/semanal.py:SemanticAnalyzer.__init__",
"mypy/semanal.py:SemanticAnalyzer.visit_func_def",
"mypy/semanal.py:SemanticAnalyzer.visit_return_stmt",
"mypy/semanal.py:SemanticAnalyzer.visit_while_stmt",
"mypy/semanal.py:SemanticAnalyzer.visit_for_stmt",
"mypy/semanal.py:SemanticAnalyzer.visit_break_stmt",
"mypy/semanal.py:SemanticAnalyzer.visit_continue_stmt",
"mypy/semanal.py:SemanticAnalyzer.analyze_try_stmt"
] | [
"mypy/semanal.py:SemanticAnalyzer.inside_except_star_block_set"
] | 8 |
python/mypy | python__mypy-18119 | 3b00002acdf098e7241df8f2e1843f8b8260b168 | diff --git a/mypy/checkpattern.py b/mypy/checkpattern.py
index 6b4fa35f9c49..4323daa68025 100644
--- a/mypy/checkpattern.py
+++ b/mypy/checkpattern.py
@@ -158,7 +158,8 @@ def visit_or_pattern(self, o: OrPattern) -> PatternType:
for pattern in o.patterns:
pattern_type = self.accept(pattern, current_type)
pattern_types.append(pattern_type)
- current_type = pattern_type.rest_type
+ if not is_uninhabited(pattern_type.type):
+ current_type = pattern_type.rest_type
#
# Collect the final type
| diff --git a/test-data/unit/check-python310.test b/test-data/unit/check-python310.test
index 58b70d7b74d8..3ef964d3c0b9 100644
--- a/test-data/unit/check-python310.test
+++ b/test-data/unit/check-python310.test
@@ -1702,6 +1702,22 @@ def f(x: int | str) -> int:
case str() as s:
return 1
+[case testMatchOrPatternExhaustiveness]
+from typing import NoReturn, Literal
+def assert_never(x: NoReturn) -> None: ...
+
+Color = Literal["blue", "green", "red"]
+c: Color
+
+match c:
+ case "blue":
+ reveal_type(c) # N: Revealed type is "Literal['blue']"
+ case "green" | "notColor":
+ reveal_type(c) # N: Revealed type is "Literal['green']"
+ case _:
+ assert_never(c) # E: Argument 1 to "assert_never" has incompatible type "Literal['red']"; expected "Never"
+[typing fixtures/typing-typeddict.pyi]
+
[case testMatchAsPatternIntersection-skip]
class A: pass
class B: pass
| Structural Pattern Matching Exhaustiveness check when using OR
<!--
If you're not sure whether what you're experiencing is a mypy bug, please see the "Question and Help" form instead.
Please consider:
- checking our common issues page: https://mypy.readthedocs.io/en/stable/common_issues.html
- searching our issue tracker: https://github.com/python/mypy/issues to see if it's already been reported
- asking on gitter chat: https://gitter.im/python/typing
-->
**Bug Report**
Mypy fails to identify missing value comparisons in my `match` when I use `|`
**To Reproduce**
[Gist URL](https://gist.github.com/mypy-play/cba9c66b56fcbfbd383ca559518ebac5)
[Playground URL](https://mypy-play.net/?mypy=latest&python=3.12&flags=strict&gist=cba9c66b56fcbfbd383ca559518ebac5)
```python
from typing import Literal, assert_never
Color = Literal["blue", "green", "red"]
def print_color(color: Color) -> None:
match color:
case "blue":
print("blue")
case "green" | "kangaroo":
print("green")
case _:
assert_never(color)
```
**Expected Behavior**
I expected mypy to error because I missed checking against `red`
```console
colors.py:13: error: Argument 1 to "assert_never" has incompatible type "Literal['red']"; expected "Never" [arg-type]
Found 1 error in 1 file (checked 1 source file)
```
**Actual Behavior**
Mypy doesn't raise any errors
```console
Success: no issues found in 1 source file
```
** Extra Context **
This only happens when I use `|`.
I don't use pyright but ran it to see if I was the one missing something but I believe they do flag it correctly
```console
colors.py:13:24 - error: Argument of type "Literal['red']" cannot be assigned to parameter "arg" of type "Never" in function "assert_never"
Type "Literal['red']" is not assignable to type "Never" (reportArgumentType)
```
**Your Environment**
- Mypy version used: `mypy 1.13.0 (compiled: yes)`
- Mypy command-line flags: `--strict`
- Python version used: `Python 3.11.6`
| 1,730,969,028,000 | [] | Bug Report | [
"mypy/checkpattern.py:PatternChecker.visit_or_pattern"
] | [] | 1 |
|
python/mypy | python__mypy-18102 | 1f200dde451493a070b9464011998dd837df4bc2 | diff --git a/mypy/subtypes.py b/mypy/subtypes.py
index a63db93fd9cb..11f3421331a5 100644
--- a/mypy/subtypes.py
+++ b/mypy/subtypes.py
@@ -625,8 +625,8 @@ def visit_instance(self, left: Instance) -> bool:
return is_named_instance(item, "builtins.object")
if isinstance(right, LiteralType) and left.last_known_value is not None:
return self._is_subtype(left.last_known_value, right)
- if isinstance(right, CallableType):
- # Special case: Instance can be a subtype of Callable.
+ if isinstance(right, FunctionLike):
+ # Special case: Instance can be a subtype of Callable / Overloaded.
call = find_member("__call__", left, left, is_operator=True)
if call:
return self._is_subtype(call, right)
| diff --git a/test-data/unit/check-protocols.test b/test-data/unit/check-protocols.test
index 5ed2351e33e6..0367be3dde65 100644
--- a/test-data/unit/check-protocols.test
+++ b/test-data/unit/check-protocols.test
@@ -4215,3 +4215,24 @@ def g4(a: Input[bytes], b: Output[str]) -> None:
f(a, b) # E: Cannot infer type argument 1 of "f"
[builtins fixtures/tuple.pyi]
+
+[case testOverloadProtocolSubtyping]
+from typing import Protocol, Self, overload
+
+class NumpyFloat:
+ __add__: "FloatOP"
+
+class FloatOP(Protocol):
+ @overload
+ def __call__(self, other: float) -> NumpyFloat: ...
+ @overload
+ def __call__(self, other: NumpyFloat) -> NumpyFloat: ...
+
+class SupportsAdd(Protocol):
+ @overload
+ def __add__(self, other: float) -> Self: ...
+ @overload
+ def __add__(self, other: NumpyFloat) -> Self: ...
+
+x: SupportsAdd = NumpyFloat()
+[builtins fixtures/tuple.pyi]
| Problems with "expected overloaded function" when method hinted via Callback Protocol (⇝`numpy`)
**Bug Report**
`mypy` complains about expecting an overloaded function, when a method is hinted by a Callback-Protocol. This technique is used in the `numpy` stubs, and causes `mypy` to complain when trying to describe numpy scalars with simple protocols.
**To Reproduce**
Below is a MWE [[mypy-playground]](https://mypy-play.net/?mypy=latest&python=3.12&gist=8041e0145f43507bed8c86104a5fb5b2), based on how the [numpy stubs declare the methods on scalars](https://github.com/numpy/numpy/blob/bd1f6067a13f8bbdf99871277426dfaad9f7602d/numpy/__init__.pyi#L3582). Note that [[pyright-playground]](https://pyright-play.net/?code=GYJw9gtgBALgngBwJYDsDmUkQWEMoAK4MYAxmADYA0UAygKYXA1gBu9IFYAhgCYBQ-UhW4BnUVAByAV2xwAYl24wAXPygaoAfS19eOlVABEinjADyBI4OFiJp5ZYAURMCXIUAlGs1QAAmwcSgK%2BvPTA2lqk3BQUOk6ijMxQbgAWHIbASjCeUAC0AHxSsggK2YYAdFXqmgHsnDwhmmEROtGx8YlMLDDpIIYycg45%2BUWDpcOV1UIi4nTSCDh4ogCCvLwuxGSU3jUadUGNe1AtkXqdST19mdm5hXRJUxXHBw18x6c651oJlym9GWKQ1uoweTCeggAHoZaAsljBVusoABeIETbJOTz8IA) does not report any issues.
```python
from typing import Protocol, Self, overload
class NumpyFloat:
__add__: "FloatOP"
class FloatOP(Protocol):
@overload
def __call__(self, other: float) -> NumpyFloat: ...
@overload
def __call__(self, other: NumpyFloat) -> NumpyFloat: ...
class SupportsAdd(Protocol):
@overload
def __add__(self, other: float) -> Self: ...
@overload
def __add__(self, other: NumpyFloat) -> Self: ...
x: SupportsAdd = NumpyFloat() # expected overloaded function, got "FloatOP"
```
Concrete example with `numpy==2.1.3` (again, `pyright` reports no errors):
```python
from typing import Protocol, Self, overload
import numpy as np
class TimeDelta(Protocol):
def __sub__(self, other: Self, /) -> Self: ...
class TimeStamp[TD: TimeDelta](Protocol):
@overload
def __sub__(self, other: Self, /) -> TD: ...
@overload
def __sub__(self, other: TD, /) -> Self: ...
x: TimeStamp[np.timedelta64] = np.datetime64("2021-01-01") # ✅
y: TimeStamp[np.float64] = np.float64(10.0) # expected overloaded function, got "_FloatOp[_64Bit]"
```
| 1,730,686,173,000 | [] | Bug Report | [
"mypy/subtypes.py:SubtypeVisitor.visit_instance"
] | [] | 1 |
|
python/mypy | python__mypy-18091 | 1f200dde451493a070b9464011998dd837df4bc2 | diff --git a/mypy/checkpattern.py b/mypy/checkpattern.py
index 6b4fa35f9c49..f54b3c53f907 100644
--- a/mypy/checkpattern.py
+++ b/mypy/checkpattern.py
@@ -46,6 +46,7 @@
TypedDictType,
TypeOfAny,
TypeVarTupleType,
+ TypeVarType,
UninhabitedType,
UnionType,
UnpackType,
@@ -342,13 +343,11 @@ def visit_sequence_pattern(self, o: SequencePattern) -> PatternType:
new_inner_type = UninhabitedType()
for typ in new_inner_types:
new_inner_type = join_types(new_inner_type, typ)
- new_type = self.construct_sequence_child(current_type, new_inner_type)
- if is_subtype(new_type, current_type):
- new_type, _ = self.chk.conditional_types_with_intersection(
- current_type, [get_type_range(new_type)], o, default=current_type
- )
+ if isinstance(current_type, TypeVarType):
+ new_bound = self.narrow_sequence_child(current_type.upper_bound, new_inner_type, o)
+ new_type = current_type.copy_modified(upper_bound=new_bound)
else:
- new_type = current_type
+ new_type = self.narrow_sequence_child(current_type, new_inner_type, o)
return PatternType(new_type, rest_type, captures)
def get_sequence_type(self, t: Type, context: Context) -> Type | None:
@@ -447,6 +446,16 @@ def expand_starred_pattern_types(
return new_types
+ def narrow_sequence_child(self, outer_type: Type, inner_type: Type, ctx: Context) -> Type:
+ new_type = self.construct_sequence_child(outer_type, inner_type)
+ if is_subtype(new_type, outer_type):
+ new_type, _ = self.chk.conditional_types_with_intersection(
+ outer_type, [get_type_range(new_type)], ctx, default=outer_type
+ )
+ else:
+ new_type = outer_type
+ return new_type
+
def visit_starred_pattern(self, o: StarredPattern) -> PatternType:
captures: dict[Expression, Type] = {}
if o.capture is not None:
| diff --git a/test-data/unit/check-python310.test b/test-data/unit/check-python310.test
index 58b70d7b74d8..f6f4c9cf167f 100644
--- a/test-data/unit/check-python310.test
+++ b/test-data/unit/check-python310.test
@@ -2343,3 +2343,30 @@ def test(xs: Tuple[Unpack[Ts]]) -> None:
reveal_type(b3) # N: Revealed type is "builtins.list[builtins.object]"
reveal_type(c3) # N: Revealed type is "builtins.int"
[builtins fixtures/tuple.pyi]
+
+[case testMatchSequencePatternTypeVarBoundNoCrash]
+# This was crashing: https://github.com/python/mypy/issues/18089
+from typing import TypeVar, Sequence, Any
+
+T = TypeVar("T", bound=Sequence[Any])
+
+def f(x: T) -> None:
+ match x:
+ case [_]:
+ pass
+[builtins fixtures/tuple.pyi]
+
+[case testMatchSequencePatternTypeVarBoundNarrows]
+from typing import TypeVar, Sequence
+
+T = TypeVar("T", bound=Sequence[int | str])
+
+def accept_seq_int(x: Sequence[int]): ...
+
+def f(x: T) -> None:
+ match x:
+ case [1, 2]:
+ accept_seq_int(x)
+ case _:
+ accept_seq_int(x) # E: Argument 1 to "accept_seq_int" has incompatible type "T"; expected "Sequence[int]"
+[builtins fixtures/tuple.pyi]
| Crash on sequence match statement in generic class with restricted type variable bound to sequence
<!--
Use this form only if mypy reports an "INTERNAL ERROR" and/or gives a traceback.
Please include the traceback and all other messages below (use `mypy --show-traceback`).
-->
**Crash Report**
mypy crashes when the following conditions are met:
1. A generic class is present.
2. The generic class refers to a typevar `T` that is bound to a sequence.
3. The generic class has a method that attempts to pattern match a method parameter of type `T`
4. One of the pattern matching cases is a sequence pattern such as `_, _`.
This crash is similar to but distinct from this issue: https://github.com/python/mypy/issues/12448
**Traceback**
```
$ mypy mypy_mre.py --show-traceback
mypy_mre.py:7: error: INTERNAL ERROR -- Please try using mypy master on GitHub:
https://mypy.readthedocs.io/en/stable/common_issues.html#using-a-development-mypy-build
Please report a bug at https://github.com/python/mypy/issues
version: 1.14.0+dev.05a9e79068a5830e57264390c9f6bca859e92053
Traceback (most recent call last):
File "~/venv-3.11.6/bin/mypy", line 8, in <module>
sys.exit(console_entry())
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/__main__.py", line 15, in console_entry
main()
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/main.py", line 109, in main
res, messages, blockers = run_build(sources, options, fscache, t0, stdout, stderr)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/main.py", line 193, in run_build
res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/build.py", line 194, in build
result = _build(
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/build.py", line 269, in _build
graph = dispatch(sources, manager, stdout)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/build.py", line 2937, in dispatch
process_graph(graph, manager)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/build.py", line 3335, in process_graph
process_stale_scc(graph, scc, manager)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/build.py", line 3436, in process_stale_scc
graph[id].type_check_first_pass()
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/build.py", line 2306, in type_check_first_pass
self.type_checker().check_first_pass()
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 483, in check_first_pass
self.accept(d)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 592, in accept
stmt.accept(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/nodes.py", line 1196, in accept
return visitor.visit_class_def(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 2461, in visit_class_def
self.accept(defn.defs)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 592, in accept
stmt.accept(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/nodes.py", line 1277, in accept
return visitor.visit_block(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 2951, in visit_block
self.accept(s)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 592, in accept
stmt.accept(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/nodes.py", line 827, in accept
return visitor.visit_func_def(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 1046, in visit_func_def
self._visit_func_def(defn)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 1050, in _visit_func_def
self.check_func_item(defn, name=defn.name)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 1084, in check_func_item
self.check_func_def(defn, typ, name, allow_empty)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 1360, in check_func_def
self.accept(item.body)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 592, in accept
stmt.accept(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/nodes.py", line 1277, in accept
return visitor.visit_block(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 2951, in visit_block
self.accept(s)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 592, in accept
stmt.accept(self)
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/nodes.py", line 1661, in accept
return visitor.visit_match_stmt(self)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 5298, in visit_match_stmt
pattern_types = [self.pattern_checker.accept(p, subject_type) for p in s.patterns]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checker.py", line 5298, in <listcomp>
pattern_types = [self.pattern_checker.accept(p, subject_type) for p in s.patterns]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checkpattern.py", line 129, in accept
result = o.accept(self)
^^^^^^^^^^^^^^
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/patterns.py", line 93, in accept
return visitor.visit_sequence_pattern(self)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checkpattern.py", line 345, in visit_sequence_pattern
new_type = self.construct_sequence_child(current_type, new_inner_type)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "~/venv-3.11.6/lib/python3.11/site-packages/mypy/checkpattern.py", line 766, in construct_sequence_child
assert isinstance(proper_type, Instance)
AssertionError:
mypy_mre.py:7: : note: use --pdb to drop into pdb
```
**To Reproduce**
Here is an MRE:
```python
from typing import Generic, TypeVar, Sequence, Any
T = TypeVar("T", bound=Sequence[Any])
class C(Generic[T]):
def f(self, x: T) -> None:
match x:
case _, _:
pass
case _:
pass
```
**Your Environment**
<!-- Include as many relevant details about the environment you experienced the bug in -->
- Mypy version used: 1.14.0+dev.05a9e79068a5830e57264390c9f6bca859e92053 (current master)
- Mypy command-line flags: None
- Mypy configuration options from `mypy.ini` (and other config files):
```toml
check_untyped_defs = true
disallow_any_generics = true
disallow_incomplete_defs = true
disallow_subclassing_any = true
disallow_untyped_calls = true
disallow_untyped_decorators = true
disallow_untyped_defs = true
extra_checks = true
no_implicit_reexport = true
python_version = "3.11"
strict_equality = true
warn_redundant_casts = true
warn_return_any = true
warn_unused_configs = true
warn_unused_ignores = true
```
- Python version used: 3.11.6
- Operating system and version: AlmaLinux 9.4 under WSL 2 on Windows 10
<!--
You can freely edit this text, please remove all the lines
you believe are unnecessary.
-->
| 1,730,565,453,000 | [] | Bug Report | [
"mypy/checkpattern.py:PatternChecker.visit_sequence_pattern"
] | [
"mypy/checkpattern.py:PatternChecker.narrow_sequence_child"
] | 1 |
|
python/mypy | python__mypy-18020 | e106dd7a0653c24d67597adf3ae6939d7ff9a376 | diff --git a/mypy/checker.py b/mypy/checker.py
index 4b3d6c3298b4..f52bebdaa052 100644
--- a/mypy/checker.py
+++ b/mypy/checker.py
@@ -1072,46 +1072,7 @@ def _visit_func_def(self, defn: FuncDef) -> None:
if defn.original_def:
# Override previous definition.
new_type = self.function_type(defn)
- if isinstance(defn.original_def, FuncDef):
- # Function definition overrides function definition.
- old_type = self.function_type(defn.original_def)
- if not is_same_type(new_type, old_type):
- self.msg.incompatible_conditional_function_def(defn, old_type, new_type)
- else:
- # Function definition overrides a variable initialized via assignment or a
- # decorated function.
- orig_type = defn.original_def.type
- if orig_type is None:
- # If other branch is unreachable, we don't type check it and so we might
- # not have a type for the original definition
- return
- if isinstance(orig_type, PartialType):
- if orig_type.type is None:
- # Ah this is a partial type. Give it the type of the function.
- orig_def = defn.original_def
- if isinstance(orig_def, Decorator):
- var = orig_def.var
- else:
- var = orig_def
- partial_types = self.find_partial_types(var)
- if partial_types is not None:
- var.type = new_type
- del partial_types[var]
- else:
- # Trying to redefine something like partial empty list as function.
- self.fail(message_registry.INCOMPATIBLE_REDEFINITION, defn)
- else:
- name_expr = NameExpr(defn.name)
- name_expr.node = defn.original_def
- self.binder.assign_type(name_expr, new_type, orig_type)
- self.check_subtype(
- new_type,
- orig_type,
- defn,
- message_registry.INCOMPATIBLE_REDEFINITION,
- "redefinition with type",
- "original type",
- )
+ self.check_func_def_override(defn, new_type)
def check_func_item(
self,
@@ -1147,6 +1108,49 @@ def check_func_item(
if dataclasses_plugin.is_processed_dataclass(defn.info):
dataclasses_plugin.check_post_init(self, defn, defn.info)
+ def check_func_def_override(self, defn: FuncDef, new_type: FunctionLike) -> None:
+ assert defn.original_def is not None
+ if isinstance(defn.original_def, FuncDef):
+ # Function definition overrides function definition.
+ old_type = self.function_type(defn.original_def)
+ if not is_same_type(new_type, old_type):
+ self.msg.incompatible_conditional_function_def(defn, old_type, new_type)
+ else:
+ # Function definition overrides a variable initialized via assignment or a
+ # decorated function.
+ orig_type = defn.original_def.type
+ if orig_type is None:
+ # If other branch is unreachable, we don't type check it and so we might
+ # not have a type for the original definition
+ return
+ if isinstance(orig_type, PartialType):
+ if orig_type.type is None:
+ # Ah this is a partial type. Give it the type of the function.
+ orig_def = defn.original_def
+ if isinstance(orig_def, Decorator):
+ var = orig_def.var
+ else:
+ var = orig_def
+ partial_types = self.find_partial_types(var)
+ if partial_types is not None:
+ var.type = new_type
+ del partial_types[var]
+ else:
+ # Trying to redefine something like partial empty list as function.
+ self.fail(message_registry.INCOMPATIBLE_REDEFINITION, defn)
+ else:
+ name_expr = NameExpr(defn.name)
+ name_expr.node = defn.original_def
+ self.binder.assign_type(name_expr, new_type, orig_type)
+ self.check_subtype(
+ new_type,
+ orig_type,
+ defn,
+ message_registry.INCOMPATIBLE_REDEFINITION,
+ "redefinition with type",
+ "original type",
+ )
+
@contextmanager
def enter_attribute_inference_context(self) -> Iterator[None]:
old_types = self.inferred_attribute_types
@@ -5120,6 +5124,10 @@ def visit_decorator_inner(self, e: Decorator, allow_empty: bool = False) -> None
if e.type and not isinstance(get_proper_type(e.type), (FunctionLike, AnyType)):
self.fail(message_registry.BAD_CONSTRUCTOR_TYPE, e)
+ if e.func.original_def and isinstance(sig, FunctionLike):
+ # Function definition overrides function definition.
+ self.check_func_def_override(e.func, sig)
+
def check_for_untyped_decorator(
self, func: FuncDef, dec_type: Type, dec_expr: Expression
) -> None:
| diff --git a/test-data/unit/check-functions.test b/test-data/unit/check-functions.test
index 96f9815019e6..b8a02a1ec7d4 100644
--- a/test-data/unit/check-functions.test
+++ b/test-data/unit/check-functions.test
@@ -1474,7 +1474,7 @@ def dec(f) -> Callable[[int], None]: pass
x = int()
if x:
- def f(x: int) -> None: pass
+ def f(x: int, /) -> None: pass
else:
@dec
def f(): pass
@@ -1489,9 +1489,12 @@ x = int()
if x:
def f(x: str) -> None: pass
else:
- # TODO: Complain about incompatible redefinition
@dec
- def f(): pass
+ def f(): pass # E: All conditional function variants must have identical signatures \
+ # N: Original: \
+ # N: def f(x: str) -> None \
+ # N: Redefinition: \
+ # N: def f(int, /) -> None
[case testConditionalFunctionDefinitionUnreachable]
def bar() -> None:
@@ -1599,7 +1602,7 @@ else:
def f():
yield
[file m.py]
-def f(): pass
+def f() -> None: pass
[case testDefineConditionallyAsImportedAndDecoratedWithInference]
if int():
diff --git a/test-data/unit/check-newsemanal.test b/test-data/unit/check-newsemanal.test
index 511c7b003015..fe02ac3ccd5e 100644
--- a/test-data/unit/check-newsemanal.test
+++ b/test-data/unit/check-newsemanal.test
@@ -1908,9 +1908,9 @@ else:
@dec
def f(x: int) -> None:
1() # E: "int" not callable
-reveal_type(f) # N: Revealed type is "def (x: builtins.str)"
+reveal_type(f) # N: Revealed type is "def (builtins.str)"
[file m.py]
-def f(x: str) -> None: pass
+def f(x: str, /) -> None: pass
[case testNewAnalyzerConditionallyDefineFuncOverVar]
from typing import Callable
diff --git a/test-data/unit/check-overloading.test b/test-data/unit/check-overloading.test
index e414c1c9b0b6..9d01ce6bd480 100644
--- a/test-data/unit/check-overloading.test
+++ b/test-data/unit/check-overloading.test
@@ -6463,7 +6463,7 @@ class D: ...
def f1(g: A) -> A: ...
if True:
@overload # E: Single overload definition, multiple required
- def f1(g: B) -> B: ...
+ def f1(g: B) -> B: ... # E: Incompatible redefinition (redefinition with type "Callable[[B], B]", original type "Callable[[A], A]")
if maybe_true: # E: Condition can't be inferred, unable to merge overloads \
# E: Name "maybe_true" is not defined
@overload
@@ -6480,14 +6480,14 @@ if True:
def f2(g: B) -> B: ...
elif maybe_true: # E: Name "maybe_true" is not defined
@overload # E: Single overload definition, multiple required
- def f2(g: C) -> C: ...
+ def f2(g: C) -> C: ... # E: Incompatible redefinition (redefinition with type "Callable[[C], C]", original type "Callable[[A], A]")
def f2(g): ... # E: Name "f2" already defined on line 21
@overload # E: Single overload definition, multiple required
def f3(g: A) -> A: ...
if True:
@overload # E: Single overload definition, multiple required
- def f3(g: B) -> B: ...
+ def f3(g: B) -> B: ... # E: Incompatible redefinition (redefinition with type "Callable[[B], B]", original type "Callable[[A], A]")
if True:
pass # Some other node
@overload # E: Name "f3" already defined on line 32 \
| Conditional decorated functions are not type checked
**Bug Report**
Going through Mypy's tests, I realized that some tests are passing that shouldn't. This involves for example:
- `testConditionalFunctionDefinitionUsingDecorator3` (there's an error in the test that the test author probably didn't notice: Positional only args of a callable are never assignable to a normal function with positional-or-keyword params; typically an error in Mypy would look like this: `Incompatible redefinition (redefinition with type "Callable[[int], None]", original type "Callable[[Arg(int, 'x')], None]")`)
- `testConditionalFunctionDefinitionUsingDecorator4` (there's even a TODO in there that complains about this not being done)
- `testNewAnalyzerConditionalDecoratedFunc`
**To Reproduce**
```python
from typing import Callable
def dec(f) -> Callable[[int], None]: raise NotImplementedError
if int():
def f(x: str): pass
else:
@dec
def f() -> None: pass
```
https://mypy-play.net/?mypy=latest&python=3.12&gist=18b131da809d740eced20dd7652ea213
**Expected Behavior**
It would be nice if Mypy complained here (like in AFAIK all other conditional definitions).
I'm happy to assist if somebody wants to fix this.
| 1,729,649,372,000 | [] | Bug Report | [
"mypy/checker.py:TypeChecker._visit_func_def",
"mypy/checker.py:TypeChecker.visit_decorator_inner"
] | [
"mypy/checker.py:TypeChecker.check_func_def_override"
] | 2 |
|
python/mypy | python__mypy-17991 | df7cd88eded0702fc1b8bb225b286a514a442e37 | diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py
index b06aaa8f89f5..08d7345452fb 100644
--- a/mypy/checkexpr.py
+++ b/mypy/checkexpr.py
@@ -399,8 +399,8 @@ def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
# TODO: always do this in type_object_type by passing the original context
result.ret_type.line = e.line
result.ret_type.column = e.column
- if isinstance(get_proper_type(self.type_context[-1]), TypeType):
- # This is the type in a Type[] expression, so substitute type
+ if is_type_type_context(self.type_context[-1]):
+ # This is the type in a type[] expression, so substitute type
# variables with Any.
result = erasetype.erase_typevars(result)
elif isinstance(node, MypyFile):
@@ -6617,3 +6617,12 @@ def get_partial_instance_type(t: Type | None) -> PartialType | None:
if t is None or not isinstance(t, PartialType) or t.type is None:
return None
return t
+
+
+def is_type_type_context(context: Type | None) -> bool:
+ context = get_proper_type(context)
+ if isinstance(context, TypeType):
+ return True
+ if isinstance(context, UnionType):
+ return any(is_type_type_context(item) for item in context.items)
+ return False
| diff --git a/test-data/unit/check-typevar-defaults.test b/test-data/unit/check-typevar-defaults.test
index 9ca67376da26..22e2594eb38b 100644
--- a/test-data/unit/check-typevar-defaults.test
+++ b/test-data/unit/check-typevar-defaults.test
@@ -717,3 +717,15 @@ def func_d3(
reveal_type(c) # N: Revealed type is "__main__.B[__main__.A[builtins.dict[builtins.int, builtins.float]]]"
reveal_type(d) # N: Revealed type is "__main__.B[builtins.int]"
[builtins fixtures/dict.pyi]
+
+[case testTypeVarDefaultsAndTypeObjectTypeInUnion]
+from __future__ import annotations
+from typing import Generic
+from typing_extensions import TypeVar
+
+_I = TypeVar("_I", default=int)
+
+class C(Generic[_I]): pass
+
+t: type[C] | int = C
+[builtins fixtures/tuple.pyi]
| [1.12 regression] New error on type[memoryView] | type[str]
**Bug Report**
new to 1.12.0
incorrect error message on union of two type one being memoryview
Complete file checked:
"""
a_type: type[memoryview] | type[str] = memoryview
"""
mypy 1.11.2 no error
mypy 1.12.0
Incompatible default for argument "a_type" (default has type "type[memoryview[Any]]", argument has type "type[memoryview[int]] | type[str]")
**Your Environment**
github action ubuntu various , python various
- Mypy command-line flags:
-no flags
- Mypy configuration options from `mypy.ini` (and other config files):
- None
- Python version used: via github actions
3.8, 3.9, 3.10, 3.11, 3.12 and 3.13
while
"""
a_type: type[memoryview] = memoryview
""""
Works ok!
| 1,729,245,825,000 | [] | Bug Report | [
"mypy/checkexpr.py:ExpressionChecker.analyze_ref_expr"
] | [
"mypy/checkexpr.py:is_type_type_context"
] | 1 |
|
python/mypy | python__mypy-17903 | 9e24b56e86fe10b2d89631854770bbd139bcf55d | diff --git a/mypy/plugins/functools.py b/mypy/plugins/functools.py
index 6650af637519..f09ea88f7162 100644
--- a/mypy/plugins/functools.py
+++ b/mypy/plugins/functools.py
@@ -18,6 +18,7 @@
Type,
TypeOfAny,
UnboundType,
+ UnionType,
get_proper_type,
)
@@ -130,7 +131,19 @@ def partial_new_callback(ctx: mypy.plugin.FunctionContext) -> Type:
if isinstance(get_proper_type(ctx.arg_types[0][0]), Overloaded):
# TODO: handle overloads, just fall back to whatever the non-plugin code does
return ctx.default_return_type
- fn_type = ctx.api.extract_callable_type(ctx.arg_types[0][0], ctx=ctx.default_return_type)
+ return handle_partial_with_callee(ctx, callee=ctx.arg_types[0][0])
+
+
+def handle_partial_with_callee(ctx: mypy.plugin.FunctionContext, callee: Type) -> Type:
+ if not isinstance(ctx.api, mypy.checker.TypeChecker): # use internals
+ return ctx.default_return_type
+
+ if isinstance(callee_proper := get_proper_type(callee), UnionType):
+ return UnionType.make_union(
+ [handle_partial_with_callee(ctx, item) for item in callee_proper.items]
+ )
+
+ fn_type = ctx.api.extract_callable_type(callee, ctx=ctx.default_return_type)
if fn_type is None:
return ctx.default_return_type
| diff --git a/test-data/unit/check-functools.test b/test-data/unit/check-functools.test
index 50de3789ebd2..bee30931a92b 100644
--- a/test-data/unit/check-functools.test
+++ b/test-data/unit/check-functools.test
@@ -346,15 +346,32 @@ fn1: Union[Callable[[int], int], Callable[[int], int]]
reveal_type(functools.partial(fn1, 2)()) # N: Revealed type is "builtins.int"
fn2: Union[Callable[[int], int], Callable[[int], str]]
-reveal_type(functools.partial(fn2, 2)()) # N: Revealed type is "builtins.object"
+reveal_type(functools.partial(fn2, 2)()) # N: Revealed type is "Union[builtins.int, builtins.str]"
fn3: Union[Callable[[int], int], str]
reveal_type(functools.partial(fn3, 2)()) # E: "str" not callable \
- # E: "Union[Callable[[int], int], str]" not callable \
# N: Revealed type is "builtins.int" \
# E: Argument 1 to "partial" has incompatible type "Union[Callable[[int], int], str]"; expected "Callable[..., int]"
[builtins fixtures/tuple.pyi]
+[case testFunctoolsPartialUnionOfTypeAndCallable]
+import functools
+from typing import Callable, Union, Type
+from typing_extensions import TypeAlias
+
+class FooBar:
+ def __init__(self, arg1: str) -> None:
+ pass
+
+def f1(t: Union[Type[FooBar], Callable[..., 'FooBar']]) -> None:
+ val = functools.partial(t)
+
+FooBarFunc: TypeAlias = Callable[..., 'FooBar']
+
+def f2(t: Union[Type[FooBar], FooBarFunc]) -> None:
+ val = functools.partial(t)
+[builtins fixtures/tuple.pyi]
+
[case testFunctoolsPartialExplicitType]
from functools import partial
from typing import Type, TypeVar, Callable
| Union between a callable and a type not respected by functools.partial
Seems similar to #17646 but not fixed in https://mypy-play.net/?mypy=master&python=3.12
The linked #17659 is not applicable because I am not using Self.
Sample code:
```python
import functools
import typing as T
from typing_extensions import TypeAlias
FooBarFunc: TypeAlias = T.Callable[..., 'FooBar']
class FooBar:
def __init__(self, arg1: str) -> None:
pass
def transform(t: T.Union[T.Type[FooBar], FooBarFunc]) -> None:
val = functools.partial(t)
```
Error:
```
main.py:12: error: "type[FooBar] | Callable[..., FooBar]" not callable [misc]
```
The codebase where this was discovered: https://github.com/mesonbuild/meson/blob/83f8de5357f31d6448ae033e1e8ed2b22c8c306a/mesonbuild/dependencies/factory.py#L75-L95
```
mesonbuild/dependencies/factory.py:95:38: error: "type[CMakeDependency] | Callable[..., CMakeDependency]" not callable [misc]
```
Both sides of the union if cast and used independently, work.
| This is sort of underlying behaviour in mypy that is exposed now that mypy checks functools.partial
E.g. on mypy 1.11 both of these will complain
```
def join(t1: T.Type[FooBar], t2: FooBarFunc):
val1 = [t1, t2]
val1[0]("asdf")
val2 = t1 if random.random() < 0.5 else t2
val2("asdf")
```
This could be fixed by improving the join logic (improving callable joins would fix many existing mypy issues) or potentially by changing the functools.partial logic to avoid using joins on unions (this could get messy) | 1,728,476,591,000 | [] | Bug Report | [
"mypy/plugins/functools.py:partial_new_callback"
] | [
"mypy/plugins/functools.py:handle_partial_with_callee"
] | 1 |
python/mypy | python__mypy-17894 | 46c108e35c215aef53b638b1f9e34e4665759ec4 | diff --git a/mypy/errors.py b/mypy/errors.py
index 13452b14a237..1b3f485d19c0 100644
--- a/mypy/errors.py
+++ b/mypy/errors.py
@@ -922,9 +922,25 @@ def file_messages(self, path: str, formatter: ErrorFormatter | None = None) -> l
self.flushed_files.add(path)
source_lines = None
if self.options.pretty and self.read_source:
- source_lines = self.read_source(path)
+ # Find shadow file mapping and read source lines if a shadow file exists for the given path.
+ # If shadow file mapping is not found, read source lines
+ mapped_path = self.find_shadow_file_mapping(path)
+ if mapped_path:
+ source_lines = self.read_source(mapped_path)
+ else:
+ source_lines = self.read_source(path)
return self.format_messages(error_tuples, source_lines)
+ def find_shadow_file_mapping(self, path: str) -> str | None:
+ """Return the shadow file path for a given source file path or None."""
+ if self.options.shadow_file is None:
+ return None
+
+ for i in self.options.shadow_file:
+ if i[0] == path:
+ return i[1]
+ return None
+
def new_messages(self) -> list[str]:
"""Return a string list of new error messages.
| diff --git a/test-data/unit/cmdline.test b/test-data/unit/cmdline.test
index 2262b7e7280c..38ea83cdbcf4 100644
--- a/test-data/unit/cmdline.test
+++ b/test-data/unit/cmdline.test
@@ -914,6 +914,23 @@ s4.py:2: error: Incompatible return value type (got "int", expected "str")
s3.py:2: error: Incompatible return value type (got "List[int]", expected "int")
s1.py:2: error: Incompatible return value type (got "int", expected "str")
+[case testShadowFileWithPretty]
+# cmd: mypy a.py --pretty --shadow-file a.py b.py
+[file a.py]
+b: bytes
+[file b.py]
+a: int = ""
+b: bytes = 1
+[out]
+a.py:1: error: Incompatible types in assignment (expression has type "str",
+variable has type "int")
+ a: int = ""
+ ^~
+a.py:2: error: Incompatible types in assignment (expression has type "int",
+variable has type "bytes")
+ b: bytes = 1
+ ^
+
[case testConfigWarnUnusedSection1]
# cmd: mypy foo.py quux.py spam/eggs.py
[file mypy.ini]
| Crash in `--pretty` mode with `--shadow-file` which shadows a shorter file
**Crash Report**
`--pretty` inserts source file fragments from the file that's being reported. If `--shadow-file` uses a `SHADOW_FILE` which is longer than `SOURCE_FILE`, mypy crashes as it tries to access a line which doesn't exist.
**To Reproduce & Traceback**
`a.py`
```python
b: bytes # Ensure there is at most 1 newline character in this file
```
`b.py`
```python
a: int = ""
b: bytes = 1
```
```shell
$ mypy a.py --pretty --shadow-file a.py b.py
Traceback (most recent call last):
...
File "<virtual environment>\lib\site-packages\mypy\__main__.py", line 15, in console_entry
main()
File "mypy\main.py", line 103, in main
File "mypy\main.py", line 187, in run_build
File "mypy\build.py", line 193, in build
File "mypy\build.py", line 268, in _build
File "mypy\build.py", line 2950, in dispatch
File "mypy\build.py", line 3348, in process_graph
File "mypy\build.py", line 3471, in process_stale_scc
File "mypy\errors.py", line 923, in file_messages
File "mypy\errors.py", line 883, in format_messages
IndexError: list index out of range
```
https://github.com/python/mypy/blob/1a2c8e2a4df21532e4952191cad74ae50083f4ad/mypy/errors.py#L883
I think the solution is to insert source file fragments from `SHADOW_FILE` rather than `SOURCE_FILE`. Even if this doesn't crash, the resultant messages don't make sense. For example, inserting extra newlines into `a.py` prevents the crash but you get
```shell
a.py:1:10:1:11: error: Incompatible types in assignment (expression has type "str", variable has type "int") [assignment]
b: bytes
^
a.py:2:12:2:12: error: Incompatible types in assignment (expression has type "int", variable has type "bytes") [assignment]
^
```
**Your Environment**
- Mypy version used: 1.11.2
- Mypy command-line flags: `--pretty --shadow-file`
- Mypy configuration options from `mypy.ini` (and other config files): None
- Python version used: 3.9
- Operating system and version: Windows 11 Home
| PR welcome! | 1,728,342,718,000 | [] | Bug Report | [
"mypy/errors.py:Errors.file_messages"
] | [
"mypy/errors.py:Errors.find_shadow_file_mapping"
] | 1 |
python/mypy | python__mypy-17873 | ac98ab59f7811a4b7272161610abc21958a528b2 | diff --git a/mypy/typeops.py b/mypy/typeops.py
index 7f530d13d4e2..0699cda53cfa 100644
--- a/mypy/typeops.py
+++ b/mypy/typeops.py
@@ -14,6 +14,7 @@
from mypy.expandtype import expand_type, expand_type_by_instance
from mypy.maptype import map_instance_to_supertype
from mypy.nodes import (
+ ARG_OPT,
ARG_POS,
ARG_STAR,
ARG_STAR2,
@@ -305,9 +306,27 @@ class B(A): pass
"""
if isinstance(method, Overloaded):
- items = [
- bind_self(c, original_type, is_classmethod, ignore_instances) for c in method.items
- ]
+ items = []
+ original_type = get_proper_type(original_type)
+ for c in method.items:
+ if isinstance(original_type, Instance):
+ # Filter based on whether declared self type can match actual object type.
+ # For example, if self has type C[int] and method is accessed on a C[str] value,
+ # omit this item. This is best effort since bind_self can be called in many
+ # contexts, and doing complete validation might trigger infinite recursion.
+ #
+ # Note that overload item filtering normally happens elsewhere. This is needed
+ # at least during constraint inference.
+ keep = is_valid_self_type_best_effort(c, original_type)
+ else:
+ keep = True
+ if keep:
+ items.append(bind_self(c, original_type, is_classmethod, ignore_instances))
+ if len(items) == 0:
+ # If no item matches, returning all items helps avoid some spurious errors
+ items = [
+ bind_self(c, original_type, is_classmethod, ignore_instances) for c in method.items
+ ]
return cast(F, Overloaded(items))
assert isinstance(method, CallableType)
func = method
@@ -379,6 +398,43 @@ class B(A): pass
return cast(F, res)
+def is_valid_self_type_best_effort(c: CallableType, self_type: Instance) -> bool:
+ """Quickly check if self_type might match the self in a callable.
+
+ Avoid performing any complex type operations. This is performance-critical.
+
+ Default to returning True if we don't know (or it would be too expensive).
+ """
+ if (
+ self_type.args
+ and c.arg_types
+ and isinstance((arg_type := get_proper_type(c.arg_types[0])), Instance)
+ and c.arg_kinds[0] in (ARG_POS, ARG_OPT)
+ and arg_type.args
+ and self_type.type.fullname != "functools._SingleDispatchCallable"
+ ):
+ if self_type.type is not arg_type.type:
+ # We can't map to supertype, since it could trigger expensive checks for
+ # protocol types, so we consevatively assume this is fine.
+ return True
+
+ # Fast path: no explicit annotation on self
+ if all(
+ (
+ type(arg) is TypeVarType
+ and type(arg.upper_bound) is Instance
+ and arg.upper_bound.type.fullname == "builtins.object"
+ )
+ for arg in arg_type.args
+ ):
+ return True
+
+ from mypy.meet import is_overlapping_types
+
+ return is_overlapping_types(self_type, c.arg_types[0])
+ return True
+
+
def erase_to_bound(t: Type) -> Type:
# TODO: use value restrictions to produce a union?
t = get_proper_type(t)
| diff --git a/test-data/unit/check-overloading.test b/test-data/unit/check-overloading.test
index 48d5996b226f..e414c1c9b0b6 100644
--- a/test-data/unit/check-overloading.test
+++ b/test-data/unit/check-overloading.test
@@ -6750,3 +6750,21 @@ def foo(x: object) -> str: ...
def bar(x: int) -> int: ...
@overload
def bar(x: Any) -> str: ...
+
+[case testOverloadOnInvalidTypeArgument]
+from typing import TypeVar, Self, Generic, overload
+
+class C: pass
+
+T = TypeVar("T", bound=C)
+
+class D(Generic[T]):
+ @overload
+ def f(self, x: int) -> int: ...
+ @overload
+ def f(self, x: str) -> str: ...
+ def f(Self, x): ...
+
+a: D[str] # E: Type argument "str" of "D" must be a subtype of "C"
+reveal_type(a.f(1)) # N: Revealed type is "builtins.int"
+reveal_type(a.f("x")) # N: Revealed type is "builtins.str"
diff --git a/test-data/unit/check-protocols.test b/test-data/unit/check-protocols.test
index ee7556461fd3..5ed2351e33e6 100644
--- a/test-data/unit/check-protocols.test
+++ b/test-data/unit/check-protocols.test
@@ -4127,3 +4127,91 @@ class P(Protocol):
class C(P): ...
C(0) # OK
+
+[case testTypeVarValueConstraintAgainstGenericProtocol]
+from typing import TypeVar, Generic, Protocol, overload
+
+T_contra = TypeVar("T_contra", contravariant=True)
+AnyStr = TypeVar("AnyStr", str, bytes)
+
+class SupportsWrite(Protocol[T_contra]):
+ def write(self, s: T_contra, /) -> None: ...
+
+class Buffer: ...
+
+class IO(Generic[AnyStr]):
+ @overload
+ def write(self: IO[bytes], s: Buffer, /) -> None: ...
+ @overload
+ def write(self, s: AnyStr, /) -> None: ...
+ def write(self, s): ...
+
+def foo(fdst: SupportsWrite[AnyStr]) -> None: ...
+
+x: IO[str]
+foo(x)
+
+[case testTypeVarValueConstraintAgainstGenericProtocol2]
+from typing import Generic, Protocol, TypeVar, overload
+
+AnyStr = TypeVar("AnyStr", str, bytes)
+T_co = TypeVar("T_co", covariant=True)
+T_contra = TypeVar("T_contra", contravariant=True)
+
+class SupportsRead(Generic[T_co]):
+ def read(self) -> T_co: ...
+
+class SupportsWrite(Protocol[T_contra]):
+ def write(self, s: T_contra) -> object: ...
+
+def copyfileobj(fsrc: SupportsRead[AnyStr], fdst: SupportsWrite[AnyStr]) -> None: ...
+
+class WriteToMe(Generic[AnyStr]):
+ @overload
+ def write(self: WriteToMe[str], s: str) -> int: ...
+ @overload
+ def write(self: WriteToMe[bytes], s: bytes) -> int: ...
+ def write(self, s): ...
+
+class WriteToMeOrReadFromMe(WriteToMe[AnyStr], SupportsRead[AnyStr]): ...
+
+copyfileobj(WriteToMeOrReadFromMe[bytes](), WriteToMe[bytes]())
+
+[case testOverloadedMethodWithExplictSelfTypes]
+from typing import Generic, overload, Protocol, TypeVar, Union
+
+AnyStr = TypeVar("AnyStr", str, bytes)
+T_co = TypeVar("T_co", covariant=True)
+T_contra = TypeVar("T_contra", contravariant=True)
+
+class SupportsRead(Protocol[T_co]):
+ def read(self) -> T_co: ...
+
+class SupportsWrite(Protocol[T_contra]):
+ def write(self, s: T_contra) -> int: ...
+
+class Input(Generic[AnyStr]):
+ def read(self) -> AnyStr: ...
+
+class Output(Generic[AnyStr]):
+ @overload
+ def write(self: Output[str], s: str) -> int: ...
+ @overload
+ def write(self: Output[bytes], s: bytes) -> int: ...
+ def write(self, s: Union[str, bytes]) -> int: ...
+
+def f(src: SupportsRead[AnyStr], dst: SupportsWrite[AnyStr]) -> None: ...
+
+def g1(a: Input[bytes], b: Output[bytes]) -> None:
+ f(a, b)
+
+def g2(a: Input[bytes], b: Output[bytes]) -> None:
+ f(a, b)
+
+def g3(a: Input[str], b: Output[bytes]) -> None:
+ f(a, b) # E: Cannot infer type argument 1 of "f"
+
+def g4(a: Input[bytes], b: Output[str]) -> None:
+ f(a, b) # E: Cannot infer type argument 1 of "f"
+
+[builtins fixtures/tuple.pyi]
| Regression: IO[str] not compatible with SupportsWrite[AnyStr]
This code generates a false positive, but type checked cleanly on mypy 1.11:
```py
from typing import IO
from shutil import copyfileobj
f: IO[str]
copyfileobj(f, f) # Cannot infer type argument 1 of "copyfileobj"
```
I believe the reason is that the definition of `IO.write` changed in typeshed, and this triggered a mypy bug. Here's how `IO.write` is currently defined:
```py
class IO(Iterator[AnyStr]):
...
@abstractmethod
@overload
def write(self: IO[bytes], s: ReadableBuffer, /) -> int: ...
@abstractmethod
@overload
def write(self, s: AnyStr, /) -> int: ...
```
| 1,727,962,007,000 | [] | Bug Report | [
"mypy/typeops.py:bind_self"
] | [
"mypy/typeops.py:is_valid_self_type_best_effort"
] | 1 |
|
scipy/scipy | scipy__scipy-21918 | edea1192fe142306ee6ecaf94c02873dadb00255 | diff --git a/scipy/optimize/_minpack_py.py b/scipy/optimize/_minpack_py.py
index 8c0564c06bbe..8cfce8aae21b 100644
--- a/scipy/optimize/_minpack_py.py
+++ b/scipy/optimize/_minpack_py.py
@@ -964,12 +964,21 @@ def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
xdata = xdata[..., ~has_nan]
ydata = ydata[~has_nan]
+ # Also omit the corresponding entries from sigma
+ if sigma is not None:
+ sigma = np.asarray(sigma)
+ if sigma.ndim == 1:
+ sigma = sigma[~has_nan]
+ elif sigma.ndim == 2:
+ sigma = sigma[~has_nan, :]
+ sigma = sigma[:, ~has_nan]
+
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-D or a scalar, sigma are errors, define transform = 1/sigma
- if sigma.size == 1 or sigma.shape == (ydata.size, ):
+ if sigma.size == 1 or sigma.shape == (ydata.size,):
transform = 1.0 / sigma
# if 2-D, sigma is the covariance matrix,
# define transform = L such that L L^T = C
| diff --git a/scipy/optimize/tests/test_minpack.py b/scipy/optimize/tests/test_minpack.py
index dce5776d27cb..ef107c5692c2 100644
--- a/scipy/optimize/tests/test_minpack.py
+++ b/scipy/optimize/tests/test_minpack.py
@@ -811,6 +811,61 @@ def test_maxfev_and_bounds(self):
assert_allclose(popt1, 2, atol=1e-14)
assert_allclose(popt2, 2, atol=1e-14)
+ @pytest.mark.parametrize("sigma_dim", [0, 1, 2])
+ def test_curvefit_omitnan(self, sigma_dim):
+ def exponential(x, a, b):
+ return b * np.exp(a * x)
+
+ rng = np.random.default_rng(578285731148908)
+ N = 100
+ x = np.linspace(1, 10, N)
+ y = exponential(x, 0.2, 0.5)
+
+ if (sigma_dim == 0):
+ sigma = 0.05
+ y += rng.normal(0, sigma, N)
+
+ elif (sigma_dim == 1):
+ sigma = x * 0.05
+ y += rng.normal(0, sigma, N)
+
+ elif (sigma_dim == 2):
+ # The covariance matrix must be symmetric positive-semidefinite
+ a = rng.normal(0, 2, (N, N))
+ sigma = a @ a.T
+ y += rng.multivariate_normal(np.zeros_like(x), sigma)
+ else:
+ assert False, "The sigma must be a scalar, 1D array or 2D array."
+
+ p0 = [0.1, 1.0]
+
+ # Choose indices to place NaNs.
+ i_x = rng.integers(N, size=5)
+ i_y = rng.integers(N, size=5)
+
+ # Add NaNs and compute result using `curve_fit`
+ x[i_x] = np.nan
+ y[i_y] = np.nan
+ res_opt, res_cov = curve_fit(exponential, x, y, p0=p0, sigma=sigma,
+ nan_policy="omit")
+
+ # Manually remove elements that should be eliminated, and
+ # calculate reference using `curve_fit`
+ i_delete = np.unique(np.concatenate((i_x, i_y)))
+ x = np.delete(x, i_delete, axis=0)
+ y = np.delete(y, i_delete, axis=0)
+
+ sigma = np.asarray(sigma)
+ if sigma.ndim == 1:
+ sigma = np.delete(sigma, i_delete)
+ elif sigma.ndim == 2:
+ sigma = np.delete(sigma, i_delete, axis=0)
+ sigma = np.delete(sigma, i_delete, axis=1)
+ ref_opt, ref_cov = curve_fit(exponential, x, y, p0=p0, sigma=sigma)
+
+ assert_allclose(res_opt, ref_opt, atol=1e-14)
+ assert_allclose(res_cov, ref_cov, atol=1e-14)
+
def test_curvefit_simplecovariance(self):
def func(x, a, b):
| BUG: `optimize.curve_fit` with `nan_policy="omit"` fails to handle sigma properly
### Describe your issue.
Hello,
I have encountered an issue with curve_fit. It is bypassable, but I'd say it is unexpected behaviour. Generally speaking, it occurs when both of the following is true:
-- I have xdata or ydata with NaNs and use nan_policy = "omit" to handle them;
-- I have specified sigma.
In this case, I get ValueError "Sigma has incorrect shape" in optimize._minpack_py.py despite sigma being of the same size as xdata and ydata.
I checked what is going in _minpack_py.py and figured out the following.
It occurs because "nan_policy = omit" leads to the removal of NaN indices from xdata and ydata (lines 960-967), but not from sigma. Then, after line 969, the shape of sigma is checked by comparing it with ydata. And even if sigma and ydata were originally of the same size, they end up to be different after ydata truncation, throwing an error.
So, to bypass this problem, one has either to remove indices corresponding NaNs in xdata and ydata from sigma, or not using nan_policy = "omit" at all and remove NaN indices manually. I suggest improving nan_policy = "omit" in a way that it would remove the corresponding indices from sigma, if needed.
Sorry if my bug report is incorrect, I am first time doing this.
### Reproducing Code Example
```python
import numpy as np
from scipy.optimize import curve_fit
def exponential(x, a, b):
return b * np.exp(a * x)
np.random.seed(0)
N = 90
x = np.linspace(1, 10, N, endpoint = False)
sigma = x * 0.05
y = exponential(x, 0.2, 0.5) + np.random.normal(0, sigma, N)
p0 = [0.1, 1.0]
# without nans, works perfect
popt, pcov = curve_fit(exponential, x, y, p0 = p0, sigma = sigma)
perr = np.diag(np.sqrt(pcov))
print("without nans:")
print(f"popt: {popt}")
print(f"perr: {perr}")
# with nans, have to set nan_policy = "omit"...
x[11] = np.nan
y[22] = np.nan
popt, pcov = curve_fit(exponential, x, y, p0 = p0, nan_policy = "omit")
perr = np.diag(np.sqrt(pcov))
print("with nans:")
print(f"popt: {popt}")
print(f"perr: {perr}")
#...but when using sigma, it says "sigma has incorrect shape"
x[11] = np.nan
y[22] = np.nan
popt, pcov = curve_fit(exponential, x, y, p0 = p0, sigma = sigma, nan_policy = "omit")
```
### Error message
```shell
Traceback (most recent call last):
File "/root/Science/Spiral_shape_analysis/fit_results/../spiralcutter_advanced/scipy_test.py", line 34, in <module>
popt, pcov = curve_fit(exponential, x, y, p0 = p0, sigma = sigma, nan_policy = "omit")
File "/usr/local/lib/python3.10/dist-packages/scipy/optimize/_minpack_py.py", line 985, in curve_fit
raise ValueError("`sigma` has incorrect shape.")
ValueError: `sigma` has incorrect shape.
```
### SciPy/NumPy/Python version and system information
```shell
>>> print(scipy.__version__, numpy.__version__, sys.version_info)
1.14.1 2.1.2 sys.version_info(major=3, minor=10, micro=12, releaselevel='final', serial=0)
>>> scipy.show_config()
Build Dependencies:
blas:
detection method: pkgconfig
found: true
include directory: /opt/_internal/cpython-3.10.14/lib/python3.10/site-packages/scipy_openblas32/include
lib directory: /opt/_internal/cpython-3.10.14/lib/python3.10/site-packages/scipy_openblas32/lib
name: scipy-openblas
openblas configuration: OpenBLAS 0.3.27.dev DYNAMIC_ARCH NO_AFFINITY Zen MAX_THREADS=64
pc file directory: /project
version: 0.3.27.dev
lapack:
detection method: pkgconfig
found: true
include directory: /opt/_internal/cpython-3.10.14/lib/python3.10/site-packages/scipy_openblas32/include
lib directory: /opt/_internal/cpython-3.10.14/lib/python3.10/site-packages/scipy_openblas32/lib
name: scipy-openblas
openblas configuration: OpenBLAS 0.3.27.dev DYNAMIC_ARCH NO_AFFINITY Zen MAX_THREADS=64
pc file directory: /project
version: 0.3.27.dev
pybind11:
detection method: config-tool
include directory: unknown
name: pybind11
version: 2.12.0
Compilers:
c:
commands: cc
linker: ld.bfd
name: gcc
version: 10.2.1
c++:
commands: c++
linker: ld.bfd
name: gcc
version: 10.2.1
cython:
commands: cython
linker: cython
name: cython
version: 3.0.11
fortran:
commands: gfortran
linker: ld.bfd
name: gcc
version: 10.2.1
pythran:
include directory: ../../tmp/pip-build-env-h_xz8lfs/overlay/lib/python3.10/site-packages/pythran
version: 0.16.1
Machine Information:
build:
cpu: x86_64
endian: little
family: x86_64
system: linux
cross-compiled: false
host:
cpu: x86_64
endian: little
family: x86_64
system: linux
Python Information:
path: /opt/python/cp310-cp310/bin/python
version: '3.10'
```
| Oops, thanks for reporting!
@AtsushiSakai Looks like we missed this. Do you have a moment to submit a PR?
@Vallastro Just to confirm, this would also be a problem when `sigma` is 2D. In that case, we would remove both the rows and columns where there are NaNs in either `x` or `y`?
> Just to confirm, this would also be a problem when `sigma` is 2D. In that case, we would remove both the rows and columns where there are NaNs in either `x` or `y`?
I guess, yes. In this case, if `ydata[i]` is invalidated, then for any `j` covariation between `xdata[i]` and `xdata[j]`, represented in `sigma[i, j]` and `sigma[j, i]` has no sense. And if `N` elements of `ydata` out of original `M` are remaining after removing NaNs, then removing columns and rows from `sigma` will make it `N` x `N` from `M` x `M`, which is expected.
@mdhaber Is this issue accepting PRs? If so, could you please point me to the files that need to be changed? I would love to contribute. | 1,732,105,907,000 | [
"defect",
"scipy.optimize"
] | Bug Report | [
"scipy/optimize/_minpack_py.py:curve_fit"
] | [] | 1 |
scipy/scipy | scipy__scipy-21912 | ec30b43e143ac0cb0e30129d4da0dbaa29e74c34 | diff --git a/scipy/integrate/_quadrature.py b/scipy/integrate/_quadrature.py
index 4f423da63895..65faaaa79b85 100644
--- a/scipy/integrate/_quadrature.py
+++ b/scipy/integrate/_quadrature.py
@@ -136,11 +136,16 @@ def trapezoid(y, x=None, dx=1.0, axis=-1):
d = dx
else:
x = _asarray(x, xp=xp, subok=True)
- # reshape to correct shape
- shape = [1] * y.ndim
- shape[axis] = y.shape[axis]
- x = xp.reshape(x, shape)
- d = x[tuple(slice1)] - x[tuple(slice2)]
+ if x.ndim == 1:
+ d = x[1:] - x[:-1]
+ # make d broadcastable to y
+ slice3 = [None] * nd
+ slice3[axis] = slice(None)
+ d = d[tuple(slice3)]
+ else:
+ # if x is n-D it should be broadcastable to y
+ x = xp.broadcast_to(x, y.shape)
+ d = x[tuple(slice1)] - x[tuple(slice2)]
try:
ret = xp.sum(
d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0,
| diff --git a/scipy/integrate/tests/test_quadrature.py b/scipy/integrate/tests/test_quadrature.py
index e49ded996a2e..ec554d3b0bc8 100644
--- a/scipy/integrate/tests/test_quadrature.py
+++ b/scipy/integrate/tests/test_quadrature.py
@@ -309,14 +309,6 @@ def test_ndim(self, xp):
r = trapezoid(q, x=z[None, None,:], axis=2)
xp_assert_close(r, qz)
- # n-d `x` but not the same as `y`
- r = trapezoid(q, x=xp.reshape(x[:, None, None], (3, 1)), axis=0)
- xp_assert_close(r, qx)
- r = trapezoid(q, x=xp.reshape(y[None,:, None], (8, 1)), axis=1)
- xp_assert_close(r, qy)
- r = trapezoid(q, x=xp.reshape(z[None, None,:], (13, 1)), axis=2)
- xp_assert_close(r, qz)
-
# 1-d `x`
r = trapezoid(q, x=x, axis=0)
xp_assert_close(r, qx)
@@ -325,6 +317,33 @@ def test_ndim(self, xp):
r = trapezoid(q, x=z, axis=2)
xp_assert_close(r, qz)
+ @skip_xp_backends('jax.numpy',
+ reasons=["JAX arrays do not support item assignment"])
+ @pytest.mark.usefixtures("skip_xp_backends")
+ def test_gh21908(self, xp):
+ # extended testing for n-dim arrays
+ x = xp.reshape(xp.linspace(0, 29, 30), (3, 10))
+ y = xp.reshape(xp.linspace(0, 29, 30), (3, 10))
+
+ out0 = xp.linspace(200, 380, 10)
+ xp_assert_close(trapezoid(y, x=x, axis=0), out0)
+ xp_assert_close(trapezoid(y, x=xp.asarray([0, 10., 20.]), axis=0), out0)
+ # x needs to be broadcastable against y
+ xp_assert_close(
+ trapezoid(y, x=xp.asarray([0, 10., 20.])[:, None], axis=0),
+ out0
+ )
+ with pytest.raises(Exception):
+ # x is not broadcastable against y
+ trapezoid(y, x=xp.asarray([0, 10., 20.])[None, :], axis=0)
+
+ out1 = xp.asarray([ 40.5, 130.5, 220.5])
+ xp_assert_close(trapezoid(y, x=x, axis=1), out1)
+ xp_assert_close(
+ trapezoid(y, x=xp.linspace(0, 9, 10), axis=1),
+ out1
+ )
+
@skip_xp_invalid_arg
def test_masked(self, xp):
# Testing that masked arrays behave as if the function is 0 where
| BUG: integrate.trapezoid: broadcasting failure after #21524
### Describe your issue.
`scipy.integrate.trapezoid` fails when 2D arrays of identical shape are passed as `x` and `y` arguments.
This failure started in recent nightlies, probably from https://github.com/scipy/scipy/pull/21524.
The same example works with scipy 1.14.1 and earlier.
@Armavica @lucascolley
### Reproducing Code Example
```python
In [1]: import numpy as np, scipy
In [2]: x = np.random.randn(3, 10)
In [3]: scipy.integrate.trapezoid(x, x)
```
### Error message
```shell
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[3], line 1
----> 1 scipy.integrate.trapezoid(x, x)
File ~/.pyenv/versions/3.12.3/envs/py3.12.3/lib/python3.12/site-packages/scipy/integrate/_quadrature.py:142, in trapezoid(y, x, dx, axis)
140 shape = [1] * y.ndim
141 shape[axis] = y.shape[axis]
--> 142 x = xp.reshape(x, shape)
143 d = x[tuple(slice1)] - x[tuple(slice2)]
144 try:
File ~/.pyenv/versions/3.12.3/envs/py3.12.3/lib/python3.12/site-packages/scipy/_lib/array_api_compat/_internal.py:28, in get_xp.<locals>.inner.<locals>.wrapped_f(*args, **kwargs)
26 @wraps(f)
27 def wrapped_f(*args, **kwargs):
---> 28 return f(*args, xp=xp, **kwargs)
File ~/.pyenv/versions/3.12.3/envs/py3.12.3/lib/python3.12/site-packages/scipy/_lib/array_api_compat/common/_aliases.py:382, in reshape(x, shape, xp, copy, **kwargs)
380 y.shape = shape
381 return y
--> 382 return xp.reshape(x, shape, **kwargs)
File ~/.pyenv/versions/3.12.3/envs/py3.12.3/lib/python3.12/site-packages/numpy/core/fromnumeric.py:285, in reshape(a, newshape, order)
200 @array_function_dispatch(_reshape_dispatcher)
201 def reshape(a, newshape, order='C'):
202 """
203 Gives a new shape to an array without changing its data.
204
(...)
283 [5, 6]])
284 """
--> 285 return _wrapfunc(a, 'reshape', newshape, order=order)
File ~/.pyenv/versions/3.12.3/envs/py3.12.3/lib/python3.12/site-packages/numpy/core/fromnumeric.py:59, in _wrapfunc(obj, method, *args, **kwds)
56 return _wrapit(obj, method, *args, **kwds)
58 try:
---> 59 return bound(*args, **kwds)
60 except TypeError:
61 # A TypeError occurs if the object does have such a method in its
62 # class, but its signature is not identical to that of NumPy's. This
(...)
66 # Call _wrapit from within the except clause to ensure a potential
67 # exception has a traceback chain.
68 return _wrapit(obj, method, *args, **kwds)
ValueError: cannot reshape array of size 30 into shape (1,10)
```
### SciPy/NumPy/Python version and system information
```shell
1.15.0.dev0+git20241118.0ea90cf 1.26.4 sys.version_info(major=3, minor=12, micro=3, releaselevel='final', serial=0)
Build Dependencies:
blas:
detection method: extraframeworks
found: true
include directory: unknown
lib directory: unknown
name: Accelerate
openblas configuration: unknown
pc file directory: unknown
version: unknown
lapack:
detection method: extraframeworks
found: true
include directory: unknown
lib directory: unknown
name: Accelerate
openblas configuration: unknown
pc file directory: unknown
version: unknown
pybind11:
detection method: config-tool
include directory: unknown
name: pybind11
version: 2.13.6
Compilers:
c:
commands: cc
linker: ld64
name: clang
version: 15.0.0
c++:
commands: c++
linker: ld64
name: clang
version: 15.0.0
cython:
commands: cython
linker: cython
name: cython
version: 3.0.11
fortran:
commands: gfortran
linker: ld64
name: gcc
version: 13.3.0
pythran:
include directory: ../../../../../../private/var/folders/g6/rgtlsw6n123b0gt5483s5_cm0000gn/T/pip-build-env-zkcj2f79/overlay/lib/python3.12/site-packages/pythran
version: 0.17.0
Machine Information:
build:
cpu: aarch64
endian: little
family: aarch64
system: darwin
cross-compiled: false
host:
cpu: aarch64
endian: little
family: aarch64
system: darwin
Python Information:
path: /private/var/folders/g6/rgtlsw6n123b0gt5483s5_cm0000gn/T/cibw-run-7dkhp5o6/cp312-macosx_arm64/build/venv/bin/python
version: '3.12'
```
| Can reproduce.
```
import numpy as np
from scipy.integrate import trapezoid
import scipy
print(scipy.version.version)
x = np.linspace(0, 29, 30).reshape(3, 10)
y = np.linspace(0, 29, 30).reshape(3, 10)
print(trapezoid(y, x=x, axis=0))
print(trapezoid(y, x=x, axis=1))
```
@lucascolley @Armavica, it's not clear what [these lines do](https://github.com/scipy/scipy/blob/main/scipy/integrate/_quadrature.py#L139):
```
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = y.shape[axis]
x = xp.reshape(x, shape)
```
why is a reshape necessary?
the pre-existing code included a similar reshape, but of `d` and only when `x.ndim == 1`. I think the new reshape of `x` is serving the same purpose, as `d` is then constructed from the reshaped `x` rather than via `np.diff`. At a guess, @Armavica has accurately replicated `np.diff` for `x.ndim == 1`, but in higher dimensions, where the pre-existing code did not reshape, the new reshape is causing trouble.
`xp.diff` is coming in the next version of the standard, so at the worst we can just revert and wait for that to become available in array-api-compat. Maybe the fix here is easy, though. | 1,731,994,475,000 | [
"defect",
"scipy.integrate"
] | Bug Report | [
"scipy/integrate/_quadrature.py:trapezoid"
] | [] | 1 |
scipy/scipy | scipy__scipy-21808 | 1d5ca99ae1afd2a0c298c8eafde04dbe915588c8 | diff --git a/scipy/signal/_signaltools.py b/scipy/signal/_signaltools.py
index f95b52c0c69f..3006bf014be5 100644
--- a/scipy/signal/_signaltools.py
+++ b/scipy/signal/_signaltools.py
@@ -405,6 +405,8 @@ def correlation_lags(in1_len, in2_len, mode='full'):
lags = np.arange(lag_bound + 1)
else:
lags = np.arange(lag_bound, 1)
+ else:
+ raise ValueError(f"Mode {mode} is invalid")
return lags
| diff --git a/scipy/signal/tests/test_signaltools.py b/scipy/signal/tests/test_signaltools.py
index c5a5028e4624..4f8a573696b1 100644
--- a/scipy/signal/tests/test_signaltools.py
+++ b/scipy/signal/tests/test_signaltools.py
@@ -2135,6 +2135,11 @@ def test_correlation_lags(mode, behind, input_size):
assert_equal(lags.shape, correlation.shape)
+def test_correlation_lags_invalid_mode():
+ with pytest.raises(ValueError, match="Mode asdfgh is invalid"):
+ correlation_lags(100, 100, mode="asdfgh")
+
+
@pytest.mark.parametrize('dt', [np.csingle, np.cdouble,
pytest.param(np.clongdouble, marks=_pmf)])
class TestCorrelateComplex:
| BUG: signal: Confusing error when giving an invalid mode to `correlation_lags`
### Describe your issue.
When correlation_lags is given a mode that is not "valid", "same", or "full", the `lags` array is never defined, and the function fails with `UnboundLocalError: local variable 'lags' referenced before assignment`.
The fix is very simple, I will submit a PR soon.
### Reproducing Code Example
```python
signal.correlation_lags(10, 10, "some_invalid_format")
```
### Error message
```shell
Traceback (most recent call last)
<ipython-input-25-202bff163744> in <cell line: 1>()
----> 1 signal.correlation_lags(10, 10, "some_invalid_format")
/usr/local/lib/python3.10/dist-packages/scipy/signal/_signaltools.py in correlation_lags(in1_len, in2_len, mode)
384 else:
385 lags = np.arange(lag_bound, 1)
--> 386 return lags
387
388
UnboundLocalError: local variable 'lags' referenced before assignment
```
### SciPy/NumPy/Python version and system information
```shell
1.13.1 1.26.4 sys.version_info(major=3, minor=10, micro=12, releaselevel='final', serial=0)
Build Dependencies:
blas:
detection method: pkgconfig
found: true
include directory: /usr/local/include
lib directory: /usr/local/lib
name: openblas
openblas configuration: USE_64BITINT=0 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS=
NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= ZEN MAX_THREADS=64
pc file directory: /usr/local/lib/pkgconfig
version: 0.3.27
lapack:
detection method: pkgconfig
found: true
include directory: /usr/local/include
lib directory: /usr/local/lib
name: openblas
openblas configuration: USE_64BITINT=0 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS=
NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= ZEN MAX_THREADS=64
pc file directory: /usr/local/lib/pkgconfig
version: 0.3.27
pybind11:
detection method: config-tool
include directory: unknown
name: pybind11
version: 2.12.0
Compilers:
c:
commands: cc
linker: ld.bfd
name: gcc
version: 10.2.1
c++:
commands: c++
linker: ld.bfd
name: gcc
version: 10.2.1
cython:
commands: cython
linker: cython
name: cython
version: 3.0.10
fortran:
commands: gfortran
linker: ld.bfd
name: gcc
version: 10.2.1
pythran:
include directory: ../../tmp/pip-build-env-mnl4e8vy/overlay/lib/python3.10/site-packages/pythran
version: 0.15.0
Machine Information:
build:
cpu: x86_64
endian: little
family: x86_64
system: linux
cross-compiled: false
host:
cpu: x86_64
endian: little
family: x86_64
system: linux
Python Information:
path: /opt/python/cp310-cp310/bin/python
version: '3.10'
```
```
| 1,730,801,461,000 | [
"defect",
"scipy.signal"
] | Bug Report | [
"scipy/signal/_signaltools.py:correlation_lags"
] | [] | 1 |
|
scipy/scipy | scipy__scipy-21801 | f33f5f644b665532cecc098a4b6215c1fe11ee7b | diff --git a/scipy/special/_basic.py b/scipy/special/_basic.py
index 103f2c867632..5fe7f17e2784 100644
--- a/scipy/special/_basic.py
+++ b/scipy/special/_basic.py
@@ -11,7 +11,7 @@
sin, place, issubdtype, extract, inexact, nan, zeros, sinc)
from . import _ufuncs
-from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma,
+from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma, rgamma,
psi, hankel1, hankel2, yv, kv, poch, binom,
_stirling2_inexact)
@@ -2911,34 +2911,68 @@ def _factorialx_array_exact(n, k=1):
return out
-def _factorialx_array_approx(n, k):
+def _factorialx_array_approx(n, k, extend):
"""
Calculate approximation to multifactorial for array n and integer k.
- Ensure we only call _factorialx_approx_core where necessary/required.
+ Ensure that values aren't calculated unnecessarily.
"""
+ if extend == "complex":
+ return _factorialx_approx_core(n, k=k, extend=extend)
+
+ # at this point we are guaranteed that extend='zero' and that k>0 is an integer
result = zeros(n.shape)
# keep nans as nans
place(result, np.isnan(n), np.nan)
# only compute where n >= 0 (excludes nans), everything else is 0
cond = (n >= 0)
n_to_compute = extract(cond, n)
- place(result, cond, _factorialx_approx_core(n_to_compute, k=k))
+ place(result, cond, _factorialx_approx_core(n_to_compute, k=k, extend=extend))
return result
-def _factorialx_approx_core(n, k):
+def _gamma1p(vals):
+ """
+ returns gamma(n+1), though with NaN at -1 instead of inf, c.f. #21827
+ """
+ res = gamma(vals + 1)
+ # replace infinities at -1 (from gamma function at 0) with nan
+ # gamma only returns inf for real inputs; can ignore complex case
+ if isinstance(res, np.ndarray):
+ if not _is_subdtype(vals.dtype, "c"):
+ res[vals == -1] = np.nan
+ elif np.isinf(res) and vals == -1:
+ res = np.float64("nan")
+ return res
+
+
+def _factorialx_approx_core(n, k, extend):
"""
Core approximation to multifactorial for array n and integer k.
"""
if k == 1:
- # shortcut for k=1
- result = gamma(n + 1)
+ # shortcut for k=1; same for both extensions, because we assume the
+ # handling of extend == 'zero' happens in _factorialx_array_approx
+ result = _gamma1p(n)
if isinstance(n, np.ndarray):
- # gamma does not maintain 0-dim arrays
+ # gamma does not maintain 0-dim arrays; fix it
result = np.array(result)
return result
+ if extend == "complex":
+ # see https://numpy.org/doc/stable/reference/generated/numpy.power.html
+ p_dtype = complex if (_is_subdtype(type(k), "c") or k < 0) else None
+ with warnings.catch_warnings():
+ # do not warn about 0 * inf, nan / nan etc.; the results are correct
+ warnings.simplefilter("ignore", RuntimeWarning)
+ result = np.power(k, (n - 1) / k, dtype=p_dtype) * _gamma1p(n / k)
+ result *= rgamma(1 / k + 1)
+ if isinstance(n, np.ndarray):
+ # ensure we keep array-ness for 0-dim inputs; already n/k above loses it
+ result = np.array(result)
+ return result
+
+ # at this point we are guaranteed that extend='zero' and that k>0 is an integer
n_mod_k = n % k
# scalar case separately, unified handling would be inefficient for arrays;
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
@@ -2988,7 +3022,7 @@ def _is_subdtype(dtype, dtypes):
return any(np.issubdtype(dtype, dt) for dt in dtypes)
-def factorial(n, exact=False):
+def factorial(n, exact=False, extend="zero"):
"""
The factorial of a number or array of numbers.
@@ -3006,6 +3040,10 @@ def factorial(n, exact=False):
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
+ extend : string, optional
+ One of ``'zero'`` or ``'complex'``; this determines how values ``n<0``
+ are handled - by default they are 0, but it is possible to opt into the
+ complex extension of the factorial (the Gamma function).
Returns
-------
@@ -3036,47 +3074,64 @@ def factorial(n, exact=False):
120
"""
+ if extend not in ("zero", "complex"):
+ raise ValueError(
+ f"argument `extend` must be either 'zero' or 'complex', received: {extend}"
+ )
+ if exact and extend == "complex":
+ raise ValueError("Incompatible options: `exact=True` and `extend='complex'`")
+
+ msg_needs_complex = (
+ "In order to use non-integer arguments, you must opt into this by passing "
+ "`extend='complex'`. Note that this changes the result for all negative "
+ "arguments (which by default return 0)."
+ )
msg_wrong_dtype = (
"Unsupported data type for factorial: {dtype}\n"
- "Permitted data types are integers and floating point numbers."
+ "Permitted data types are integers and floating point numbers, "
+ "as well as complex numbers if `extend='complex'` is passed."
+ )
+ msg_exact_not_possible = (
+ "`exact=True` only supports integers, cannot use data type {dtype}"
)
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
if np.ndim(n) == 0 and not isinstance(n, np.ndarray):
# scalar cases
- if not _is_subdtype(type(n), ["i", "f", type(None)]):
+ if not _is_subdtype(type(n), ["i", "f", "c", type(None)]):
raise ValueError(msg_wrong_dtype.format(dtype=type(n)))
+ elif _is_subdtype(type(n), "c") and extend != "complex":
+ raise ValueError(msg_needs_complex)
elif n is None or np.isnan(n):
- return np.float64("nan")
- elif n < 0:
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ return np.complex128("nan+nanj") if complexify else np.float64("nan")
+ elif extend == "zero" and n < 0:
return 0 if exact else np.float64(0)
elif exact and _is_subdtype(type(n), "i"):
return math.factorial(n)
elif exact:
- msg = ("Non-integer values of `n` together with `exact=True` are "
- "not supported. Either ensure integer `n` or use `exact=False`.")
- raise ValueError(msg)
- return _factorialx_approx_core(n, k=1)
+ raise ValueError(msg_exact_not_possible.format(dtype=type(n)))
+ return _factorialx_approx_core(n, k=1, extend=extend)
# arrays & array-likes
n = asarray(n)
- if not _is_subdtype(n.dtype, ["i", "f"]):
+ if not _is_subdtype(n.dtype, ["i", "f", "c"]):
raise ValueError(msg_wrong_dtype.format(dtype=n.dtype))
- if exact and not _is_subdtype(n.dtype, "i"):
- msg = ("factorial with `exact=True` does not "
- "support non-integral arrays")
- raise ValueError(msg)
+ elif _is_subdtype(n.dtype, "c") and extend != "complex":
+ raise ValueError(msg_needs_complex)
+ elif exact and _is_subdtype(n.dtype, ["f", "c"]):
+ raise ValueError(msg_exact_not_possible.format(dtype=n.dtype))
if n.size == 0:
# return empty arrays unchanged
return n
elif exact:
return _factorialx_array_exact(n, k=1)
- return _factorialx_array_approx(n, k=1)
+ return _factorialx_array_approx(n, k=1, extend=extend)
-def factorial2(n, exact=False):
+def factorial2(n, exact=False, extend="zero"):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
@@ -3086,6 +3141,8 @@ def factorial2(n, exact=False):
= 2 ** (n / 2) * gamma(n / 2 + 1) n even
= 2 ** (n / 2) * (n / 2)! n even
+ The formula for `n odd` is the basis for the complex extension.
+
Parameters
----------
n : int or array_like
@@ -3094,6 +3151,17 @@ def factorial2(n, exact=False):
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
+ extend : string, optional
+ One of ``'zero'`` or ``'complex'``; this determines how values ``n<0``
+ are handled - by default they are 0, but it is possible to opt into the
+ complex extension of the double factorial. This also enables passing
+ complex values to ``n``.
+
+ .. warning::
+
+ Using the ``'complex'`` extension also changes the values of the
+ double factorial for even integers, reducing them by a factor of
+ ``sqrt(2/pi) ~= 0.79``, see [1].
Returns
-------
@@ -3109,41 +3177,68 @@ def factorial2(n, exact=False):
>>> factorial2(7, exact=True)
105
+ References
+ ----------
+ .. [1] Complex extension to double factorial
+ https://en.wikipedia.org/wiki/Double_factorial#Complex_arguments
"""
+ if extend not in ("zero", "complex"):
+ raise ValueError(
+ f"argument `extend` must be either 'zero' or 'complex', received: {extend}"
+ )
+ if exact and extend == "complex":
+ raise ValueError("Incompatible options: `exact=True` and `extend='complex'`")
+
+ msg_needs_complex = (
+ "In order to use non-integer arguments, you must opt into this by passing "
+ "`extend='complex'`. Note that this changes the result for all negative "
+ "arguments (which by default return 0). Additionally, it will rescale the "
+ "values of the double factorial at even integers by a factor of sqrt(2/pi)."
+ )
msg_wrong_dtype = (
"Unsupported data type for factorial2: {dtype}\n"
- "Only integers are permitted."
+ "Only integers are permitted by default, though floating point "
+ "and complex numbers can be used if `extend='complex'` is passed."
)
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
if np.ndim(n) == 0 and not isinstance(n, np.ndarray):
# scalar cases
- if not _is_subdtype(type(n), "i"):
+ if not _is_subdtype(type(n), ["i", "f", "c", type(None)]):
raise ValueError(msg_wrong_dtype.format(dtype=type(n)))
- elif n < 0:
- return 0
+ elif _is_subdtype(type(n), ["f", "c"]) and extend != "complex":
+ raise ValueError(msg_needs_complex)
+ elif n is None or np.isnan(n):
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ return np.complex128("nan+nanj") if complexify else np.float64("nan")
+ elif extend == "zero" and n < 0:
+ return 0 if exact else np.float64(0)
elif n in {0, 1}:
- return 1
- # general integer case
+ return 1 if exact else np.float64(1)
+
if exact:
+ # general integer case
return _range_prod(1, n, k=2)
- return _factorialx_approx_core(n, k=2)
+ # approximation
+ return _factorialx_approx_core(n, k=2, extend=extend)
# arrays & array-likes
n = asarray(n)
- if not _is_subdtype(n.dtype, "i"):
+ if not _is_subdtype(n.dtype, ["i", "f", "c"]):
raise ValueError(msg_wrong_dtype.format(dtype=n.dtype))
+ elif _is_subdtype(n.dtype, ["f", "c"]) and extend != "complex":
+ raise ValueError(msg_needs_complex)
if n.size == 0:
# return empty arrays unchanged
return n
elif exact:
return _factorialx_array_exact(n, k=2)
- return _factorialx_array_approx(n, k=2)
+ return _factorialx_array_approx(n, k=2, extend=extend)
-def factorialk(n, k, exact=None):
+def factorialk(n, k, exact=None, extend="zero"):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
@@ -3170,6 +3265,17 @@ def factorialk(n, k, exact=None):
.. warning::
The default value for ``exact`` will be changed to
``False`` in SciPy 1.15.0.
+ extend : string, optional
+ One of ``'zero'`` or ``'complex'``; this determines how values `n<0` are
+ handled - by default they are 0, but it is possible to opt into the complex
+ extension of the multifactorial. This enables passing complex values,
+ not only to ``n`` but also to ``k``.
+
+ .. warning::
+
+ Using the ``'complex'`` extension also changes the values of the
+ multifactorial at integers ``n != 1 (mod k)`` by a factor depending
+ on both ``k`` and ``n % k``, see below or [1].
Returns
-------
@@ -3197,15 +3303,23 @@ def factorialk(n, k, exact=None):
n!(k) = k ** ((n - r)/k) * gamma(n/k + 1) / gamma(r/k + 1) * max(r, 1)
- This is the basis of the approximation when ``exact=False``. Compare also [1].
+ This is the basis of the approximation when ``exact=False``.
+
+ In principle, any fixed choice of ``r`` (ignoring its relation ``r = n%k``
+ to ``n``) would provide a suitable analytic continuation from integer ``n``
+ to complex ``z`` (not only satisfying the functional equation but also
+ being logarithmically convex, c.f. Bohr-Mollerup theorem) -- in fact, the
+ choice of ``r`` above only changes the function by a constant factor. The
+ final constraint that determines the canonical continuation is ``f(1) = 1``,
+ which forces ``r = 1`` (see also [1]).::
+
+ z!(k) = k ** ((z - 1)/k) * gamma(z/k + 1) / gamma(1/k + 1)
References
----------
.. [1] Complex extension to multifactorial
https://en.wikipedia.org/wiki/Double_factorial#Alternative_extension_of_the_multifactorial
"""
- if not _is_subdtype(type(k), "i") or k < 1:
- raise ValueError(f"k must be a positive integer, received: {k}")
if exact is None:
msg = (
"factorialk will default to `exact=False` starting from SciPy "
@@ -3216,42 +3330,72 @@ def factorialk(n, k, exact=None):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
exact = True
+ if extend not in ("zero", "complex"):
+ raise ValueError(
+ f"argument `extend` must be either 'zero' or 'complex', received: {extend}"
+ )
+ if exact and extend == "complex":
+ raise ValueError("Incompatible options: `exact=True` and `extend='complex'`")
+
+ msg_needs_complex = (
+ "In order to use non-integer arguments, you must opt into this by passing "
+ "`extend='complex'`. Note that this changes the result for all negative "
+ "arguments (which by default return 0). Additionally, it will perturb "
+ "the values of the multifactorial at most positive integers `n`."
+ )
msg_wrong_dtype = (
- "Unsupported data type for factorialk: {dtype}\n"
- "Only integers are permitted."
+ "Unsupported data type for factorialk in {varname}: {dtype}\n"
+ "Only integers are permitted by default, though floating point "
+ "and complex numbers can be used if `extend='complex'` is passed."
)
- helpmsg = ""
- if k in {1, 2}:
- func = "factorial" if k == 1 else "factorial2"
- helpmsg = f"\nYou can try to use {func} instead"
+ # check type of k
+ if not _is_subdtype(type(k), ["i", "f", "c"]):
+ raise ValueError(msg_wrong_dtype.format(varname="k", dtype=type(k)))
+ elif _is_subdtype(type(k), ["f", "c"]) and extend != "complex":
+ raise ValueError(msg_needs_complex)
+ # check value of k
+ if extend == "zero" and k < 1:
+ msg = f"For `extend='zero'`, k must be a positive integer, received: {k}"
+ raise ValueError(msg)
+ elif k == 0:
+ raise ValueError("Parameter k cannot be zero!")
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
if np.ndim(n) == 0 and not isinstance(n, np.ndarray):
# scalar cases
- if not _is_subdtype(type(n), "i"):
- raise ValueError(msg_wrong_dtype.format(dtype=type(n)) + helpmsg)
- elif n < 0:
- return 0
+ if not _is_subdtype(type(n), ["i", "f", "c", type(None)]):
+ raise ValueError(msg_wrong_dtype.format(varname="n", dtype=type(n)))
+ elif _is_subdtype(type(n), ["f", "c"]) and extend != "complex":
+ raise ValueError(msg_needs_complex)
+ elif n is None or np.isnan(n):
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ return np.complex128("nan+nanj") if complexify else np.float64("nan")
+ elif extend == "zero" and n < 0:
+ return 0 if exact else np.float64(0)
elif n in {0, 1}:
- return 1
- # general integer case
+ return 1 if exact else np.float64(1)
+
if exact:
+ # general integer case
return _range_prod(1, n, k=k)
- return _factorialx_approx_core(n, k=k)
+ # approximation
+ return _factorialx_approx_core(n, k=k, extend=extend)
# arrays & array-likes
n = asarray(n)
- if not _is_subdtype(n.dtype, "i"):
- raise ValueError(msg_wrong_dtype.format(dtype=n.dtype) + helpmsg)
+ if not _is_subdtype(n.dtype, ["i", "f", "c"]):
+ raise ValueError(msg_wrong_dtype.format(varname="n", dtype=n.dtype))
+ elif _is_subdtype(n.dtype, ["f", "c"]) and extend != "complex":
+ raise ValueError(msg_needs_complex)
if n.size == 0:
# return empty arrays unchanged
return n
elif exact:
return _factorialx_array_exact(n, k=k)
- return _factorialx_array_approx(n, k=k)
+ return _factorialx_array_approx(n, k=k, extend=extend)
def stirling2(N, K, *, exact=False):
| diff --git a/scipy/_lib/tests/test_warnings.py b/scipy/_lib/tests/test_warnings.py
index f18bc20cb033..f200b1a6e975 100644
--- a/scipy/_lib/tests/test_warnings.py
+++ b/scipy/_lib/tests/test_warnings.py
@@ -115,6 +115,7 @@ def test_warning_calls_filters(warning_calls):
os.path.join('optimize', '_nnls.py'),
os.path.join('signal', '_ltisys.py'),
os.path.join('sparse', '__init__.py'), # np.matrix pending-deprecation
+ os.path.join('special', '_basic.py'), # gh-21801
os.path.join('stats', '_discrete_distns.py'), # gh-14901
os.path.join('stats', '_continuous_distns.py'),
os.path.join('stats', '_binned_statistic.py'), # gh-19345
diff --git a/scipy/special/tests/test_basic.py b/scipy/special/tests/test_basic.py
index 259a6fd35055..54d2bb0570f5 100644
--- a/scipy/special/tests/test_basic.py
+++ b/scipy/special/tests/test_basic.py
@@ -2133,28 +2133,39 @@ def assert_complex_nan(x):
assert_really_equal(elem_x, elem_y, rtol=rtol)
elif np.isnan(x) and np.isnan(y) and _is_subdtype(type(x), "c"):
assert_complex_nan(x) and assert_complex_nan(y)
+ # no need to consider complex infinities due to numpy/numpy#25493
else:
assert_func(x, y)
class TestFactorialFunctions:
- @pytest.mark.parametrize("exact", [True, False])
- def test_factorialx_scalar_return_type(self, exact):
- kw = {"exact": exact}
+ def factorialk_ref(self, n, k, exact, extend):
+ if exact:
+ return special.factorialk(n, k=k, exact=True)
+ # for details / explanation see factorialk-docstring
+ r = np.mod(n, k) if extend == "zero" else 1
+ vals = np.power(k, (n - r)/k) * special.gamma(n/k + 1) * special.rgamma(r/k + 1)
+ # np.maximum is element-wise, which is what we want
+ return vals * np.maximum(r, 1)
+
+ @pytest.mark.parametrize("exact,extend",
+ [(True, "zero"), (False, "zero"), (False, "complex")])
+ def test_factorialx_scalar_return_type(self, exact, extend):
+ kw = {"exact": exact, "extend": extend}
assert np.isscalar(special.factorial(1, **kw))
assert np.isscalar(special.factorial2(1, **kw))
assert np.isscalar(special.factorialk(1, k=3, **kw))
@pytest.mark.parametrize("n", [-1, -2, -3])
@pytest.mark.parametrize("exact", [True, False])
- def test_factorialx_negative(self, exact, n):
+ def test_factorialx_negative_extend_zero(self, exact, n):
kw = {"exact": exact}
assert_equal(special.factorial(n, **kw), 0)
assert_equal(special.factorial2(n, **kw), 0)
assert_equal(special.factorialk(n, k=3, **kw), 0)
@pytest.mark.parametrize("exact", [True, False])
- def test_factorialx_negative_array(self, exact):
+ def test_factorialx_negative_extend_zero_array(self, exact):
kw = {"exact": exact}
rtol = 1e-15
n = [-5, -4, 0, 1]
@@ -2164,42 +2175,115 @@ def test_factorialx_negative_array(self, exact):
assert_really_equal(special.factorial2(n, **kw), expected, rtol=rtol)
assert_really_equal(special.factorialk(n, k=3, **kw), expected, rtol=rtol)
+ @pytest.mark.parametrize("n", [-1.1, -2.2, -3.3])
+ def test_factorialx_negative_extend_complex(self, n):
+ # factorialk defaults to exact=True, need to explicitly set to False
+ kw = {"exact": False, "extend": "complex"}
+ exp_1 = {-1.1: -10.686287021193184771,
+ -2.2: 4.8509571405220931958,
+ -3.3: -1.4471073942559181166}
+ exp_2 = {-1.1: 1.0725776858167496309,
+ -2.2: -3.9777171783768419874,
+ -3.3: -0.99588841846200555977}
+ exp_k = {-1.1: 0.73565345382163025659,
+ -2.2: 1.1749163167190809498,
+ -3.3: -2.4780584257450583713}
+ rtol = 3e-15
+ assert_allclose(special.factorial(n, **kw), exp_1[n], rtol=rtol)
+ assert_allclose(special.factorial2(n, **kw), exp_2[n], rtol=rtol)
+ assert_allclose(special.factorialk(n, k=3, **kw), exp_k[n], rtol=rtol)
+ assert_allclose(special.factorial([n], **kw)[0], exp_1[n], rtol=rtol)
+ assert_allclose(special.factorial2([n], **kw)[0], exp_2[n], rtol=rtol)
+ assert_allclose(special.factorialk([n], k=3, **kw)[0], exp_k[n], rtol=rtol)
+
+ @pytest.mark.parametrize("imag", [0, 0j])
+ @pytest.mark.parametrize("n_outer", [-1, -2, -3])
+ def test_factorialx_negative_extend_complex_poles(self, n_outer, imag):
+ def _check(n):
+ complexify = _is_subdtype(type(n), "c")
+ # like for gamma, we expect complex nans for complex inputs
+ complex_nan = np.complex128("nan+nanj")
+ exp = np.complex128("nan+nanj") if complexify else np.float64("nan")
+ # factorialk defaults to incompatible exact=True, explicitly set to False
+ kw = {"exact": False, "extend": "complex"}
+ # poles are at negative integers that are multiples of k
+ assert_really_equal(special.factorial(n, **kw), exp)
+ assert_really_equal(special.factorial2(n * 2, **kw), exp)
+ assert_really_equal(special.factorialk(n * 3, k=3, **kw), exp)
+ # also test complex k for factorialk
+ c = 1.5 - 2j
+ assert_really_equal(special.factorialk(n * c, k=c, **kw), complex_nan)
+ # same for array case
+ assert_really_equal(special.factorial([n], **kw)[0], exp)
+ assert_really_equal(special.factorial2([n * 2], **kw)[0], exp)
+ assert_really_equal(special.factorialk([n * 3], k=3, **kw)[0], exp)
+ assert_really_equal(special.factorialk([n * c], k=c, **kw)[0], complex_nan)
+ # more specific tests in test_factorial{,2,k}_complex_reference
+
+ # imag ensures we test both real and complex representations of the poles
+ _check(n_outer + imag)
+ # check for large multiple of period
+ _check(100_000 * n_outer + imag)
+
@pytest.mark.parametrize("boxed", [True, False])
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize(
"n",
[
np.nan, np.float64("nan"), np.nan + np.nan*1j, np.complex128("nan+nanj"),
- None, np.datetime64("nat")
+ np.inf, np.inf + 0j, -np.inf, -np.inf + 0j, None, np.datetime64("nat")
],
ids=[
"NaN", "np.float64('nan')", "NaN+i*NaN", "np.complex128('nan+nanj')",
- "None", "NaT"
+ "inf", "inf+0i", "-inf", "-inf+0i", "None", "NaT"
]
)
@pytest.mark.parametrize(
"factorialx",
[special.factorial, special.factorial2, special.factorialk]
)
- def test_factorialx_nan(self, factorialx, n, boxed):
+ def test_factorialx_inf_nan(self, factorialx, n, extend, boxed):
# NaNs not allowed (by dtype) for exact=True
- kw = {"exact": False}
+ kw = {"exact": False, "extend": extend}
if factorialx == special.factorialk:
kw["k"] = 3
- permissible_types = ["i"]
- # factorial also allows floats
- if factorialx == special.factorial:
- # None is allowed for scalars, but would cause object type in array case
- permissible_types = ["i", "f"] if boxed else ["i", "f", type(None)]
+ # None is allowed for scalars, but would cause object type in array case
+ permissible_types = ["i", "f", "c"] if boxed else ["i", "f", "c", type(None)]
+ # factorial allows floats also for extend="zero"
+ types_need_complex_ext = "c" if factorialx == special.factorial else ["f", "c"]
if not _is_subdtype(type(n), permissible_types):
with pytest.raises(ValueError, match="Unsupported data type.*"):
factorialx([n] if boxed else n, **kw)
+ elif _is_subdtype(type(n), types_need_complex_ext) and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ factorialx([n] if boxed else n, **kw)
else:
+ # account for type and whether extend="complex"
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ # note that the type of the naïve `np.nan + np.nan * 1j` is `complex`
+ # instead of `numpy.complex128`, which trips up assert_really_equal
+ expected = np.complex128("nan+nanj") if complexify else np.float64("nan")
+ # the only exception are real infinities
+ if _is_subdtype(type(n), "f") and np.isinf(n):
+ # unchanged for positive infinity; negative one depends on extension
+ neg_inf_result = np.float64(0 if (extend == "zero") else "nan")
+ expected = np.float64("inf") if (n > 0) else neg_inf_result
+
result = factorialx([n], **kw)[0] if boxed else factorialx(n, **kw)
- assert_really_equal(result, np.float64("nan"))
+ assert_really_equal(result, expected)
# also tested in test_factorial{,2,k}_{array,scalar}_corner_cases
+ @pytest.mark.parametrize("extend", [0, 1.1, np.nan, "string"])
+ def test_factorialx_raises_extend(self, extend):
+ with pytest.raises(ValueError, match="argument `extend` must be.*"):
+ special.factorial(1, extend=extend)
+ with pytest.raises(ValueError, match="argument `extend` must be.*"):
+ special.factorial2(1, extend=extend)
+ with pytest.raises(ValueError, match="argument `extend` must be.*"):
+ special.factorialk(1, k=3, exact=True, extend=extend)
+
@pytest.mark.parametrize("levels", range(1, 5))
@pytest.mark.parametrize("exact", [True, False])
def test_factorialx_array_shape(self, levels, exact):
@@ -2294,15 +2378,20 @@ def test_factorial_int_reference(self, n):
assert_allclose(correct, special.factorial(n, exact=False), rtol=rtol)
assert_allclose(correct, special.factorial([n], exact=False)[0], rtol=rtol)
+ # extend="complex" only works for exact=False
+ kw = {"exact": False, "extend": "complex"}
+ assert_allclose(correct, special.factorial(n, **kw), rtol=rtol)
+ assert_allclose(correct, special.factorial([n], **kw)[0], rtol=rtol)
+
def test_factorial_float_reference(self):
def _check(n, expected):
rtol = 8e-14 if sys.platform == 'win32' else 1e-15
assert_allclose(special.factorial(n), expected, rtol=rtol)
assert_allclose(special.factorial([n])[0], expected, rtol=rtol)
# using floats with `exact=True` raises an error for scalars and arrays
- with pytest.raises(ValueError, match="Non-integer values.*"):
- assert_allclose(special.factorial(n, exact=True), expected, rtol=rtol)
- with pytest.raises(ValueError, match="factorial with `exact=Tr.*"):
+ with pytest.raises(ValueError, match="`exact=True` only supports.*"):
+ special.factorial(n, exact=True)
+ with pytest.raises(ValueError, match="`exact=True` only supports.*"):
special.factorial([n], exact=True)
# Reference values from mpmath for gamma(n+1)
@@ -2317,8 +2406,25 @@ def _check(n, expected):
# close to maximum for float64
_check(170.6243, 1.79698185749571048960082e+308)
+ def test_factorial_complex_reference(self):
+ def _check(n, expected):
+ rtol = 3e-15 if sys.platform == 'win32' else 2e-15
+ kw = {"exact": False, "extend": "complex"}
+ assert_allclose(special.factorial(n, **kw), expected, rtol=rtol)
+ assert_allclose(special.factorial([n], **kw)[0], expected, rtol=rtol)
+
+ # Reference values from mpmath.gamma(n+1)
+ # negative & complex values
+ _check(-0.5, expected=1.7724538509055160276)
+ _check(-0.5 + 0j, expected=1.7724538509055160276 + 0j)
+ _check(2 + 2j, expected=-0.42263728631120216694 + 0.87181425569650686062j)
+ # close to poles
+ _check(-0.9999, expected=9999.422883232725532)
+ _check(-1 + 0.0001j, expected=-0.57721565582674219 - 9999.9999010944009697j)
+
@pytest.mark.parametrize("dtype", [np.int64, np.float64,
np.complex128, object])
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("dim", range(0, 5))
# test empty & non-empty arrays, with nans and mixed
@@ -2327,7 +2433,7 @@ def _check(n, expected):
[[], [1], [1.1], [np.nan], [np.nan + np.nan * 1j], [np.nan, 1]],
ids=["[]", "[1]", "[1.1]", "[NaN]", "[NaN+i*NaN]", "[NaN, 1]"],
)
- def test_factorial_array_corner_cases(self, content, dim, exact, dtype):
+ def test_factorial_array_corner_cases(self, content, dim, exact, extend, dtype):
if dtype is object and SCIPY_ARRAY_API:
pytest.skip("object arrays unsupported in array API mode")
# get dtype without calling array constructor (that might fail or mutate)
@@ -2336,17 +2442,23 @@ def test_factorial_array_corner_cases(self, content, dim, exact, dtype):
if dtype == np.float64 and any(_is_subdtype(type(x), "c") for x in content):
pytest.skip("impossible combination")
- kw = {"exact": exact}
+ kw = {"exact": exact, "extend": extend}
# np.array(x, ndim=0) will not be 0-dim. unless x is too
content = content if (dim > 0 or len(content) != 1) else content[0]
n = np.array(content, ndmin=dim, dtype=dtype)
result = None
- if not _is_subdtype(n.dtype, ["i", "f"]):
+ if extend == "complex" and exact:
+ with pytest.raises(ValueError, match="Incompatible options:.*"):
+ special.factorial(n, **kw)
+ elif not _is_subdtype(n.dtype, ["i", "f", "c"]):
with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorial(n, **kw)
+ elif _is_subdtype(n.dtype, "c") and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ special.factorial(n, **kw)
elif exact and not _is_subdtype(n.dtype, "i"):
- with pytest.raises(ValueError, match="factorial with `exact=.*"):
+ with pytest.raises(ValueError, match="`exact=True` only supports.*"):
special.factorial(n, **kw)
else:
result = special.factorial(n, **kw)
@@ -2359,22 +2471,33 @@ def test_factorial_array_corner_cases(self, content, dim, exact, dtype):
# result is empty if and only if n is empty, and has the same dimension
# as n; dtype stays the same, except when not empty and not exact:
if n.size:
- dtype = native_int if exact else np.float64
+ cx = (extend == "complex") and _is_subdtype(n.dtype, "c")
+ dtype = np.complex128 if cx else (native_int if exact else np.float64)
expected = np.array(ref, ndmin=dim, dtype=dtype)
- assert_really_equal(result, expected)
+ assert_really_equal(result, expected, rtol=1e-15)
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, np.nan + np.nan*1j, None],
ids=["1", "1.1", "2+2j", "NaN", "NaN+i*NaN", "None"])
- def test_factorial_scalar_corner_cases(self, n, exact):
- kw = {"exact": exact}
- if not _is_subdtype(type(n), ["i", "f", type(None)]):
+ def test_factorial_scalar_corner_cases(self, n, exact, extend):
+ kw = {"exact": exact, "extend": extend}
+ if extend == "complex" and exact:
+ with pytest.raises(ValueError, match="Incompatible options:.*"):
+ special.factorial(n, **kw)
+ elif not _is_subdtype(type(n), ["i", "f", "c", type(None)]):
with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorial(n, **kw)
+ elif _is_subdtype(type(n), "c") and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ special.factorial(n, **kw)
elif n is None or np.isnan(n):
- assert_really_equal(special.factorial(n, **kw), np.float64("nan"))
+ # account for dtype and whether extend="complex"
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ expected = np.complex128("nan+nanj") if complexify else np.float64("nan")
+ assert_really_equal(special.factorial(n, **kw), expected)
elif exact and _is_subdtype(type(n), "f"):
- with pytest.raises(ValueError, match="Non-integer values.*"):
+ with pytest.raises(ValueError, match="`exact=True` only supports.*"):
special.factorial(n, **kw)
else:
assert_equal(special.factorial(n, **kw), special.gamma(n + 1))
@@ -2409,8 +2532,37 @@ def test_factorial2_int_reference(self, n):
assert_allclose(correct, special.factorial2(n, exact=False), rtol=rtol)
assert_allclose(correct, special.factorial2([n], exact=False)[0], rtol=rtol)
+ # extend="complex" only works for exact=False
+ kw = {"exact": False, "extend": "complex"}
+ # approximation only matches exactly for `n == 1 (mod k)`, see docstring
+ if n % 2 == 1:
+ assert_allclose(correct, special.factorial2(n, **kw), rtol=rtol)
+ assert_allclose(correct, special.factorial2([n], **kw)[0], rtol=rtol)
+
+ def test_factorial2_complex_reference(self):
+ # this tests for both floats and complex
+ def _check(n, expected):
+ rtol = 5e-15
+ kw = {"exact": False, "extend": "complex"}
+ assert_allclose(special.factorial2(n, **kw), expected, rtol=rtol)
+ assert_allclose(special.factorial2([n], **kw)[0], expected, rtol=rtol)
+
+ # Reference values from mpmath for:
+ # mpmath.power(2, n/2) * mpmath.gamma(n/2 + 1) * mpmath.sqrt(2 / mpmath.pi)
+ _check(3, expected=3)
+ _check(4, expected=special.factorial2(4) * math.sqrt(2 / math.pi))
+ _check(20, expected=special.factorial2(20) * math.sqrt(2 / math.pi))
+ # negative & complex values
+ _check(-0.5, expected=0.82217895866245855122)
+ _check(-0.5 + 0j, expected=0.82217895866245855122 + 0j)
+ _check(3 + 3j, expected=-1.0742236630142471526 + 1.4421398439387262897j)
+ # close to poles
+ _check(-1.9999, expected=7978.8918745523440682)
+ _check(-2 + 0.0001j, expected=0.0462499835314308444 - 7978.84559148876374493j)
+
@pytest.mark.parametrize("dtype", [np.int64, np.float64,
np.complex128, object])
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("dim", range(0, 5))
# test empty & non-empty arrays, with nans and mixed
@@ -2419,22 +2571,28 @@ def test_factorial2_int_reference(self, n):
[[], [1], [1.1], [np.nan], [np.nan + np.nan * 1j], [np.nan, 1]],
ids=["[]", "[1]", "[1.1]", "[NaN]", "[NaN+i*NaN]", "[NaN, 1]"],
)
- def test_factorial2_array_corner_cases(self, content, dim, exact, dtype):
+ def test_factorial2_array_corner_cases(self, content, dim, exact, extend, dtype):
# get dtype without calling array constructor (that might fail or mutate)
if dtype == np.int64 and any(np.isnan(x) or (x != int(x)) for x in content):
pytest.skip("impossible combination")
if dtype == np.float64 and any(_is_subdtype(type(x), "c") for x in content):
pytest.skip("impossible combination")
- kw = {"exact": exact}
+ kw = {"exact": exact, "extend": extend}
# np.array(x, ndim=0) will not be 0-dim. unless x is too
content = content if (dim > 0 or len(content) != 1) else content[0]
n = np.array(content, ndmin=dim, dtype=dtype)
result = None
- if not _is_subdtype(n.dtype, "i"):
+ if extend == "complex" and exact:
+ with pytest.raises(ValueError, match="Incompatible options:.*"):
+ special.factorial2(n, **kw)
+ elif not _is_subdtype(n.dtype, ["i", "f", "c"]):
with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorial2(n, **kw)
+ elif _is_subdtype(n.dtype, ["f", "c"]) and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ special.factorial2(n, **kw)
else:
result = special.factorial2(n, **kw)
@@ -2446,20 +2604,34 @@ def test_factorial2_array_corner_cases(self, content, dim, exact, dtype):
# result is empty if and only if n is empty, and has the same dimension
# as n; dtype stays the same, except when not empty and not exact:
if n.size:
- dtype = native_int if exact else np.float64
+ cx = (extend == "complex") and _is_subdtype(n.dtype, "c")
+ dtype = np.complex128 if cx else (native_int if exact else np.float64)
expected = np.array(ref, ndmin=dim, dtype=dtype)
- assert_really_equal(result, expected, rtol=1e-15)
+ assert_really_equal(result, expected, rtol=2e-15)
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, np.nan + np.nan*1j, None],
ids=["1", "1.1", "2+2j", "NaN", "NaN+i*NaN", "None"])
- def test_factorial2_scalar_corner_cases(self, n, exact):
- kw = {"exact": exact}
- if not _is_subdtype(type(n), "i"):
+ def test_factorial2_scalar_corner_cases(self, n, exact, extend):
+ kw = {"exact": exact, "extend": extend}
+ if extend == "complex" and exact:
+ with pytest.raises(ValueError, match="Incompatible options:.*"):
+ special.factorial2(n, **kw)
+ elif not _is_subdtype(type(n), ["i", "f", "c", type(None)]):
with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorial2(n, **kw)
+ elif _is_subdtype(type(n), ["f", "c"]) and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ special.factorial2(n, **kw)
+ elif n is None or np.isnan(n):
+ # account for dtype and whether extend="complex"
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ expected = np.complex128("nan+nanj") if complexify else np.float64("nan")
+ assert_really_equal(special.factorial2(n, **kw), expected)
else:
- assert_equal(special.factorial2(n, **kw), 1)
+ expected = self.factorialk_ref(n, k=2, **kw)
+ assert_really_equal(special.factorial2(n, **kw), expected, rtol=1e-15)
@pytest.mark.parametrize("k", range(1, 5))
# note that n=170 is the last integer such that factorial(n) fits float64;
@@ -2495,8 +2667,47 @@ def test_factorialk_int_reference(self, n, k):
assert_allclose(correct, special.factorialk(n, k, exact=False), rtol=rtol)
assert_allclose(correct, special.factorialk([n], k, exact=False)[0], rtol=rtol)
+ # extend="complex" only works for exact=False
+ kw = {"k": k, "exact": False, "extend": "complex"}
+ # approximation only matches exactly for `n == 1 (mod k)`, see docstring
+ if n % k == 1:
+ rtol = 2e-14
+ assert_allclose(correct, special.factorialk(n, **kw), rtol=rtol)
+ assert_allclose(correct, special.factorialk([n], **kw)[0], rtol=rtol)
+
+ def test_factorialk_complex_reference(self):
+ # this tests for both floats and complex
+ def _check(n, k, exp):
+ rtol = 1e-14
+ kw = {"k": k, "exact": False, "extend": "complex"}
+ assert_allclose(special.factorialk(n, **kw), exp, rtol=rtol)
+ assert_allclose(special.factorialk([n], **kw)[0], exp, rtol=rtol)
+
+ # Reference values from mpmath for:
+ # mpmath.power(k, (n-1)/k) * mpmath.gamma(n/k + 1) / mpmath.gamma(1/k + 1)
+ _check(n=4, k=3, exp=special.factorialk(4, k=3, exact=True))
+ _check(n=5, k=3, exp=7.29011132947227083)
+ _check(n=6.5, k=3, exp=19.6805080113566010)
+ # non-integer k
+ _check(n=3, k=2.5, exp=2.58465740293218541)
+ _check(n=11, k=2.5, exp=1963.5) # ==11*8.5*6*3.5; c.f. n == 1 (mod k)
+ _check(n=-3 + 3j + 1, k=-3 + 3j, exp=-2 + 3j)
+ # complex values
+ _check(n=4 + 4j, k=4, exp=-0.67855904082768043854 + 2.1993925819930311497j)
+ _check(n=4, k=4 - 4j, exp=1.9775338957222718742 + 0.92607172675423901371j)
+ _check(n=4 + 4j, k=4 - 4j, exp=0.1868492880824934475 + 0.87660580316894290247j)
+ # negative values
+ _check(n=-0.5, k=3, exp=0.72981013240713739354)
+ _check(n=-0.5 + 0j, k=3, exp=0.72981013240713739354 + 0j)
+ _check(n=2.9, k=-0.7, exp=0.45396591474966867296 + 0.56925525174685228866j)
+ _check(n=-0.6, k=-0.7, exp=-0.07190820089634757334 - 0.090170031876701730081j)
+ # close to poles
+ _check(n=-2.9999, k=3, exp=7764.7170695908828364)
+ _check(n=-3 + 0.0001j, k=3, exp=0.1349475632879599864 - 7764.5821055158365027j)
+
@pytest.mark.parametrize("dtype", [np.int64, np.float64,
np.complex128, object])
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("dim", range(0, 5))
# test empty & non-empty arrays, with nans and mixed
@@ -2505,22 +2716,28 @@ def test_factorialk_int_reference(self, n, k):
[[], [1], [1.1], [np.nan], [np.nan + np.nan * 1j], [np.nan, 1]],
ids=["[]", "[1]", "[1.1]", "[NaN]", "[NaN+i*NaN]", "[NaN, 1]"],
)
- def test_factorialk_array_corner_cases(self, content, dim, exact, dtype):
+ def test_factorialk_array_corner_cases(self, content, dim, exact, extend, dtype):
# get dtype without calling array constructor (that might fail or mutate)
if dtype == np.int64 and any(np.isnan(x) or (x != int(x)) for x in content):
pytest.skip("impossible combination")
if dtype == np.float64 and any(_is_subdtype(type(x), "c") for x in content):
pytest.skip("impossible combination")
- kw = {"k": 3, "exact": exact}
+ kw = {"k": 3, "exact": exact, "extend": extend}
# np.array(x, ndim=0) will not be 0-dim. unless x is too
content = content if (dim > 0 or len(content) != 1) else content[0]
n = np.array(content, ndmin=dim, dtype=dtype)
result = None
- if not _is_subdtype(n.dtype, "i"):
+ if extend == "complex" and exact:
+ with pytest.raises(ValueError, match="Incompatible options:.*"):
+ special.factorialk(n, **kw)
+ elif not _is_subdtype(n.dtype, ["i", "f", "c"]):
with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorialk(n, **kw)
+ elif _is_subdtype(n.dtype, ["f", "c"]) and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ special.factorialk(n, **kw)
else:
result = special.factorialk(n, **kw)
@@ -2532,22 +2749,35 @@ def test_factorialk_array_corner_cases(self, content, dim, exact, dtype):
# result is empty if and only if n is empty, and has the same dimension
# as n; dtype stays the same, except when not empty and not exact:
if n.size:
- dtype = native_int if exact else np.float64
+ cx = (extend == "complex") and _is_subdtype(n.dtype, "c")
+ dtype = np.complex128 if cx else (native_int if exact else np.float64)
expected = np.array(ref, ndmin=dim, dtype=dtype)
- assert_really_equal(result, expected, rtol=1e-15)
+ assert_really_equal(result, expected, rtol=2e-15)
+ @pytest.mark.parametrize("extend", ["zero", "complex"])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("k", range(1, 5))
@pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, np.nan + np.nan*1j, None],
ids=["1", "1.1", "2+2j", "NaN", "NaN+i*NaN", "None"])
- def test_factorialk_scalar_corner_cases(self, n, k, exact):
- kw = {"k": k, "exact": exact}
- if not _is_subdtype(type(n), "i"):
+ def test_factorialk_scalar_corner_cases(self, n, k, exact, extend):
+ kw = {"k": k, "exact": exact, "extend": extend}
+ if extend == "complex" and exact:
+ with pytest.raises(ValueError, match="Incompatible options:.*"):
+ special.factorialk(n, **kw)
+ elif not _is_subdtype(type(n), ["i", "f", "c", type(None)]):
with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorialk(n, **kw)
+ elif _is_subdtype(type(n), ["f", "c"]) and extend != "complex":
+ with pytest.raises(ValueError, match="In order to use non-integer.*"):
+ special.factorialk(n, **kw)
+ elif n is None or np.isnan(n):
+ # account for dtype and whether extend="complex"
+ complexify = (extend == "complex") and _is_subdtype(type(n), "c")
+ expected = np.complex128("nan+nanj") if complexify else np.float64("nan")
+ assert_really_equal(special.factorialk(n, **kw), expected)
else:
- # factorialk(1, k) == 1 for all k
- assert_equal(special.factorialk(n, **kw), 1)
+ expected = self.factorialk_ref(n, **kw)
+ assert_really_equal(special.factorialk(n, **kw), expected, rtol=1e-15)
@pytest.mark.parametrize("k", range(1, 5))
def test_factorialk_deprecation_exact(self, k):
@@ -2557,29 +2787,42 @@ def test_factorialk_deprecation_exact(self, k):
special.factorialk(1, k=k)
@pytest.mark.parametrize("boxed", [True, False])
- @pytest.mark.parametrize("exact", [True, False])
- @pytest.mark.parametrize("k", [0, 1.1, np.nan])
- def test_factorialk_raises_k_complex(self, k, exact, boxed):
+ @pytest.mark.parametrize("exact,extend",
+ [(True, "zero"), (False, "zero"), (False, "complex")])
+ @pytest.mark.parametrize("k", [-1, -1.0, 0, 0.0, 0 + 1j, 1.1, np.nan])
+ def test_factorialk_raises_k_complex(self, k, exact, extend, boxed):
n = [1] if boxed else 1
- kw = {"k": k, "exact": exact}
- with pytest.raises(ValueError, match="k must be a positive integer*"):
+ kw = {"k": k, "exact": exact, "extend": extend}
+ if extend == "zero":
+ msg = "In order to use non-integer.*"
+ if _is_subdtype(type(k), "i") and (k < 1):
+ msg = "For `extend='zero'`.*"
+ with pytest.raises(ValueError, match=msg):
+ special.factorialk(n, **kw)
+ elif k == 0:
+ with pytest.raises(ValueError, match="Parameter k cannot be zero!"):
+ special.factorialk(n, **kw)
+ else:
+ # no error
special.factorialk(n, **kw)
@pytest.mark.parametrize("boxed", [True, False])
- @pytest.mark.parametrize("exact", [True, False])
+ @pytest.mark.parametrize("exact,extend",
+ [(True, "zero"), (False, "zero"), (False, "complex")])
# neither integer, float nor complex
@pytest.mark.parametrize("k", ["string", np.datetime64("nat")],
ids=["string", "NaT"])
- def test_factorialk_raises_k_other(self, k, exact, boxed):
+ def test_factorialk_raises_k_other(self, k, exact, extend, boxed):
n = [1] if boxed else 1
- kw = {"k": k, "exact": exact}
- with pytest.raises(ValueError, match="k must be a positive integer*"):
+ kw = {"k": k, "exact": exact, "extend": extend}
+ with pytest.raises(ValueError, match="Unsupported data type.*"):
special.factorialk(n, **kw)
- @pytest.mark.parametrize("exact", [True, False])
+ @pytest.mark.parametrize("exact,extend",
+ [(True, "zero"), (False, "zero"), (False, "complex")])
@pytest.mark.parametrize("k", range(1, 12))
- def test_factorialk_dtype(self, k, exact):
- kw = {"k": k, "exact": exact}
+ def test_factorialk_dtype(self, k, exact, extend):
+ kw = {"k": k, "exact": exact, "extend": extend}
if exact and k in _FACTORIALK_LIMITS_64BITS.keys():
n = np.array([_FACTORIALK_LIMITS_32BITS[k]])
assert_equal(special.factorialk(n, **kw).dtype, np_long)
@@ -2594,7 +2837,7 @@ def test_factorialk_dtype(self, k, exact):
else:
n = np.array([_FACTORIALK_LIMITS_64BITS.get(k, 1)])
# for exact=True and k >= 10, we always return object;
- # for exact=False it's always float
+ # for exact=False it's always float (unless input is complex)
dtype = object if exact else np.float64
assert_equal(special.factorialk(n, **kw).dtype, dtype)
@@ -2602,7 +2845,7 @@ def test_factorial_mixed_nan_inputs(self):
x = np.array([np.nan, 1, 2, 3, np.nan])
expected = np.array([np.nan, 1, 2, 6, np.nan])
assert_equal(special.factorial(x, exact=False), expected)
- with pytest.raises(ValueError, match="factorial with `exact=True.*"):
+ with pytest.raises(ValueError, match="`exact=True` only supports.*"):
special.factorial(x, exact=True)
| ENH: extensions of `factorial{,2,k}` for complex domains, recurrences, ...
Following the clean-up of #15600 in #15841, we can now discuss the extensions separately.
## Design Space
There are several dimensions that come into play:
- extensions to negative integers
- extensions to floats
- extensions to complex numbers
- extensions to poles through recurrence (possible e.g. for `factorial2`)
## Extension Issues
Some people would like to have the float/complex extensions of these functions (e..g. https://github.com/scipy/scipy/pull/15349). I tend to agree, but that raises another problem:
#### Complex extensions are incompatible with current inputs
First off, everything that's 0 currently on the negative axis gets a different value. More importantly, all even numbers get smaller by a factor ~0.79=sqrt(2/pi) in the [extension](https://en.wikipedia.org/wiki/Double_factorial#Complex_arguments).
For non-complex floats, there doesn't seem to be another relevant extension, especially not one that matches on all the integers and isn't just a gratuitous hack (this is what #15841 ended up stalling on for a long while). Compared to #15600 I now propose that floats continue to be treated as errors for `extend='zero'` in `factorial{2,k}`.
#### Extensions based on recurrence are incompatible with current status as well as complex extension
This came up for example in #15299. It's [possible](https://en.wikipedia.org/wiki/Double_factorial#Negative_arguments) (for example) to invert the recurrence relation n!! = n * (n-2)!!, and so extend the double-factorial to negative integers. The resulting value would be neither 0 (currently) nor the pole in the complex extension.
## Design Considerations
- Complex extensions (together with non-positive floats) should be behind a keyword, roughly because:
- 0-for-negative-values is a valid approach, and has been in scipy ~forever.
- Since the values being produced are incompatible (for negative & even integers), the complex extension should be explicitly opt-in.
- Using recurrence relations for extending into poles is an open question; not sure if worth it, but if so, probably with another enum value for a potential `extend` keyword.
- Use the most general approximation, except when alternatives with better accuracy / performance exist.
## Possible Future Behaviour (note `extend=`, not `exact=`)
| Inputs | `factorial(`<br/>`extend=`<br/>`'zero')` | `factorial(`<br/>`extend=`<br/>`'complex')` | `factorial2(`<br/>`extend=`<br/>`'zero')` | `factorial2(`<br/>`extend=`<br/>`'complex')` | `factorialk(`<br/>`extend=`<br/>`'zero')` | `factorialk(`<br/>`extend=`<br/>`'complex')` |
|-|-|-|-|-|-|-|
| Scalar positive integer w/ `exact=True` | n! | n! | n!! | n!! | n!(k) | n!(k) |
| Scalar positive integer | Γ(n+1) | Γ(n+1) | [1a] for odd,<br/>[1b] for even<br/>(matches n!!) | [1a] (_doesn't_<br/>match n!!<br/>for even n) | n!(k)<br/>(no approx.) | [2] |
| Scalar negative integer / poles | 0.0 | NaN | 0.0 | NaN | 0.0 | NaN |
| Scalar positive float | Γ(n+1) | Γ(n+1) | `ValueError` | [1a] | `ValueError` | [2] |
| Scalar negative float | 0.0 | Γ(n+1) | `ValueError` | [1a] | `ValueError` | [2] |
| Scalar complex | `ValueError` | Γ(n+1) | `ValueError` | [1a] | `ValueError` | [2] |
| Scalar NaN | NaN | NaN | NaN | NaN | NaN | NaN |
| Positive integer in array | Γ(n+1) | Γ(n+1) |[1a] for odd,<br/>[1b] for even<br/>(matches n!!) | [1a] (_doesn't_<br/>match n!!<br/>for even n) | n!(k)<br/>(no approx.) | [2] |
| Negative integer in array | 0.0 | NaN | 0.0 | NaN | 0.0 | NaN |
| Positive float in array | Γ(n+1) | Γ(n+1) | `ValueError` | [1a] | `ValueError` | [2] |
| Negative float in array | 0.0 | Γ(n+1) | `ValueError` | [1a] | `ValueError` | [2] |
| Complex in array | `ValueError` | Γ(n+1) | `ValueError` | [1a] | `ValueError` | [2] |
| NaN in array | NaN | NaN | NaN | NaN | NaN | NaN |
| Empty array | `array([])` | `array([])` | `array([])` | `array([])` | `array([])` | `array([])` |
The formulas in question are:
* [1a]: `z!! = 2^((z-1)/2) Γ(z/2 + 1) / Γ(1/2 + 1) == 2^(z/2) Γ(z/2 + 1) sqrt(2 / π)` (holds exactly for odd integers and is the basis for the extension)
* [1b]: `z!! = 2^(z/2) Γ(z/2 + 1)` (holds only for even integers)
* [2]: `z!(α) = α^((z-1)/α) Γ(z/α + 1) / Γ(1/α + 1)` (extension based on α'th integers)
I'd very much welcome feedback on this - there are some trade-offs to be made (e.g. whether negative floats should also raises without `extend='complex'`, etc.)
| Not a general comment, but just a remark on one specific case.
In SciPy-1.10.*, the result of `factorial2(-1)` was `1` and this changed to `0` in version 1.11, which was an unexpected change that broke a library I'm involved in. We relied on the old behavior for the implementation of integrals involving products of Gaussian functions. In the recurrence relations for these integrals, `factorial2(2*n-1)` often appears in denominators, where `n` can be zero or a positive integer.
In case of the double factorial, it is common to define a meaningful result for negative odd integer arguments, see e.g. https://en.wikipedia.org/wiki/Double_factorial#Negative_arguments. Is there any chance of supporting odd negative arguments in `factorial2`? (Note that this is not related to complex arguments, but rather comes from a generalization of the standard recurrence relation.)
> Is there any chance of supporting odd negative arguments in `factorial2`?
Yes it's possible; I mentioned this in the OP already:🙃
> This came up for example in #15299. It's [possible](https://en.wikipedia.org/wiki/Double_factorial#Negative_arguments) (for example) to invert the recurrence relation n!! = n * (n-2)!!, and so extend the double-factorial to negative integers. The resulting value would be neither 0 (currently) nor the pole in the complex extension.
Personally I was waiting for 1.12 to branch before touching the factorial functions again, but feel free to raise a PR!
Sorry for having completely missed your point. I should have gone for that coffee first. The reason I didn't get it, is that in the rows "Scalar negative integer / poles" and "Negative integer in array" contain 0.0 or NaN. Shouldn't these follow the same rules? Letting the outcome depend on the dtype of the argument may cause a lot of confusion.
Do you suggest a PR for a 1.11.x version in which support for negative odd arguments is added to `factorial2` without changing the API? Or do you prefer to go for the API change right away?
P.S. I'm not convinced by the backward compatibility argument to treat integers differently. With 1.11, backward compatibility is already broken.
What I'm suggesting - as described in the OP (so you picked the right issue!) - is to add a keyword argument `extend=` to `factorial2` that allows _opting into_ a different behaviour for `n<0`. In your case, using the values that arise from the recurrence relation. Independently of that, we'll also want to eventually allow using the complex extension, which the design allows without further overhauls.
> P.S. I'm not convinced by the backward compatibility argument to treat integers differently. With 1.11, backward compatibility is already broken.
Yes, it was breakage we accepted (perhaps wrongly without a deprecation period), see https://github.com/scipy/scipy/issues/18813
Thanks for clarifying, and sorry for having missed some points. Just to avoid confusion, I'd rather ask a few more questions:
- The proposal is to have `extend="zero"` as the default value, because this is the current behavior?
- For our use case, the option `extend="recurrence"` would need to be implemented in `factorial2`? (It's not in the table, but this seems to make sense.)
- The `extend="recurrence"` is only meaningful for (some) integer arguments of `factorial2`. Would you return `NaN` when it is combined with a `float` or `complex` argument, or when a negative even integer argument is given?
> The proposal is to have `extend="zero"` as the default value, because this is the current behavior?
Yes
> For our use case, the option `extend="recurrence"` would need to be implemented in `factorial2`?
Yes
> The `extend="recurrence"` is only meaningful for (some) integer arguments of `factorial2`. Would you return `NaN` when it is combined with a `float` or `complex` argument, or when a negative even integer argument is given?
Raising an error for known-to-be-incompatible values is the best approach IMO. | 1,730,723,884,000 | [
"enhancement",
"scipy.special"
] | Feature Request | [
"scipy/special/_basic.py:_factorialx_array_approx",
"scipy/special/_basic.py:_factorialx_approx_core",
"scipy/special/_basic.py:factorial",
"scipy/special/_basic.py:factorial2",
"scipy/special/_basic.py:factorialk"
] | [
"scipy/special/_basic.py:_gamma1p"
] | 5 |
pandas-dev/pandas | pandas-dev__pandas-60407 | ee0902a832b7fa3e5821ada176566301791e09ec | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index f47ef095a8409..bbbf1d9ca60bd 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1055,7 +1055,9 @@ def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray:
from pandas import Index
fill_value = Index(self._left, copy=False)._na_value
- empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
+ empty = IntervalArray.from_breaks(
+ [fill_value] * (empty_len + 1), closed=self.closed
+ )
else:
empty = self._from_sequence([fill_value] * empty_len, dtype=self.dtype)
| diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index a0f96ff111444..b52240c208493 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -757,3 +757,12 @@ def test_shift_with_offsets_freq_empty(self):
df_shifted = DataFrame(index=shifted_dates)
result = df.shift(freq=offset)
tm.assert_frame_equal(result, df_shifted)
+
+ def test_series_shift_interval_preserves_closed(self):
+ # GH#60389
+ ser = Series(
+ [pd.Interval(1, 2, closed="right"), pd.Interval(2, 3, closed="right")]
+ )
+ result = ser.shift(1)
+ expected = Series([np.nan, pd.Interval(1, 2, closed="right")])
+ tm.assert_series_equal(result, expected)
| BUG: Cannot `shift` Intervals that are not `closed='right'` (the default)
### Pandas version checks
- [X] I have checked that this issue has not already been reported.
- [X] I have confirmed this bug exists on the [latest version](https://pandas.pydata.org/docs/whatsnew/index.html) of pandas.
- [X] I have confirmed this bug exists on the [main branch](https://pandas.pydata.org/docs/dev/getting_started/install.html#installing-the-development-version-of-pandas) of pandas.
### Reproducible Example
```python
import pandas as pd
pd.Series([pd.Interval(1, 2, closed='left'), pd.Interval(2, 3, closed='left')]).shift(1)
```
### Issue Description
[This line](https://github.com/pandas-dev/pandas/blob/1c986d6213904fd7d9acc5622dc91d029d3f1218/pandas/core/arrays/interval.py#L1058) in `shift` creates an empty `IntervalArray` without specifying which side the intervals are closed on. When that array and the one being shifted get concatenated, the following exception is raised:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/wmorrison/.local/share/pyenv/versions/3.11.5/envs/3.11.5-enspired@aws_lambda/lib/python3.11/site-packages/pandas/core/generic.py", line 11228, in shift
new_data = self._mgr.shift(periods=periods, fill_value=fill_value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/wmorrison/.local/share/pyenv/versions/3.11.5/envs/3.11.5-enspired@aws_lambda/lib/python3.11/site-packages/pandas/core/internals/base.py", line 312, in shift
return self.apply_with_block("shift", periods=periods, fill_value=fill_value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/wmorrison/.local/share/pyenv/versions/3.11.5/envs/3.11.5-enspired@aws_lambda/lib/python3.11/site-packages/pandas/core/internals/managers.py", line 363, in apply
applied = getattr(b, f)(**kwargs)
^^^^^^^^^^^^^^^^^^^^^^^
File "/home/wmorrison/.local/share/pyenv/versions/3.11.5/envs/3.11.5-enspired@aws_lambda/lib/python3.11/site-packages/pandas/core/internals/blocks.py", line 2020, in shift
new_values = self.values.T.shift(periods=periods, fill_value=fill_value).T
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/wmorrison/.local/share/pyenv/versions/3.11.5/envs/3.11.5-enspired@aws_lambda/lib/python3.11/site-packages/pandas/core/arrays/interval.py", line 1097, in shift
return self._concat_same_type([a, b])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/wmorrison/.local/share/pyenv/versions/3.11.5/envs/3.11.5-enspired@aws_lambda/lib/python3.11/site-packages/pandas/core/arrays/interval.py", line 1045, in _concat_same_type
raise ValueError("Intervals must all be closed on the same side.")
ValueError: Intervals must all be closed on the same side.
```
### Expected Behavior
The following `pd.Series[Interval]` should be returned, closed on the same side as the original Series
```
0 NaN
1 [1.0, 2.0)
dtype: interval
```
### Installed Versions
<details>
INSTALLED VERSIONS
------------------
commit : 0691c5cf90477d3503834d983f69350f250a6ff7
python : 3.11.5
python-bits : 64
OS : Linux
OS-release : 6.8.0-48-generic
Version : #48~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Mon Oct 7 11:24:13 UTC 2
machine : x86_64
processor : x86_64
byteorder : little
LC_ALL : None
LANG : en_US.UTF-8
LOCALE : en_US.UTF-8
pandas : 2.2.3
numpy : 2.1.3
pytz : 2024.2
dateutil : 2.9.0.post0
pip : 24.3.1
Cython : None
sphinx : None
IPython : None
adbc-driver-postgresql: None
adbc-driver-sqlite : None
bs4 : 4.12.3
blosc : None
bottleneck : None
dataframe-api-compat : None
fastparquet : None
fsspec : None
html5lib : None
hypothesis : None
gcsfs : None
jinja2 : 3.1.4
lxml.etree : None
matplotlib : 3.9.2
numba : None
numexpr : None
odfpy : None
openpyxl : 3.1.5
pandas_gbq : None
psycopg2 : None
pymysql : None
pyarrow : None
pyreadstat : None
pytest : 8.3.3
python-calamine : None
pyxlsb : None
s3fs : None
scipy : 1.14.1
sqlalchemy : None
tables : None
tabulate : None
xarray : None
xlrd : None
xlsxwriter : None
zstandard : None
tzdata : 2024.2
qtpy : None
pyqt5 : None
</details>
| Thanks for the report! Further investigations and PRs to fix are welcome.
take | 1,732,366,612,000 | [
"Interval"
] | Bug Report | [
"pandas/core/arrays/interval.py:IntervalArray.shift"
] | [] | 1 |
pandas-dev/pandas | pandas-dev__pandas-59900 | 8d2ca0bf84bcf44a800ac19bdb4ed7ec88c555e2 | diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 3cb0e75cfb815..05e1a36877e06 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -255,7 +255,9 @@ def _validate(data):
inferred_dtype = lib.infer_dtype(values, skipna=True)
if inferred_dtype not in allowed_types:
- raise AttributeError("Can only use .str accessor with string values!")
+ raise AttributeError(
+ f"Can only use .str accessor with string values, not {inferred_dtype}"
+ )
return inferred_dtype
def __getitem__(self, key):
| diff --git a/pandas/tests/series/accessors/test_str_accessor.py b/pandas/tests/series/accessors/test_str_accessor.py
index 09d965ef1f322..ff530459b78fb 100644
--- a/pandas/tests/series/accessors/test_str_accessor.py
+++ b/pandas/tests/series/accessors/test_str_accessor.py
@@ -15,7 +15,8 @@ def test_str_attribute(self):
# str accessor only valid with string values
ser = Series(range(5))
- with pytest.raises(AttributeError, match="only use .str accessor"):
+ msg = "Can only use .str accessor with string values, not integer"
+ with pytest.raises(AttributeError, match=msg):
ser.str.repeat(2)
def test_str_accessor_updates_on_inplace(self):
| ENH: increase verbosity on error
### Feature Type
- [ ] Adding new functionality to pandas
- [X] Changing existing functionality in pandas
- [ ] Removing existing functionality in pandas
### Problem Description
On a specific type error the error causing type is not logged in the error
### Feature Description
By replacing:
`raise AttributeError("Can only use .str accessor with string values!")`
with
`raise AttributeError(f"Can only use .str accessor with string values! Inferred dType: {inferred_dtype}")`
the problem would be solved
Details can be found in [here](https://github.com/pandas-dev/pandas/pull/59649/files)
### Alternative Solutions
Use a debugger and set a break point
### Additional Context
_No response_
| 1,727,349,958,000 | [
"Enhancement",
"Error Reporting",
"Strings"
] | Feature Request | [
"pandas/core/strings/accessor.py:StringMethods._validate"
] | [] | 1 |
|
pylint-dev/pylint | pylint-dev__pylint-10075 | 0687c851e14cf187bb213128c4f23feecd0105ac | diff --git a/pylint/checkers/variables.py b/pylint/checkers/variables.py
index e55968cf38..7a63798d91 100644
--- a/pylint/checkers/variables.py
+++ b/pylint/checkers/variables.py
@@ -1968,7 +1968,7 @@ def _check_consumer(
def _report_unfound_name_definition(
self,
- node: nodes.NodeNG,
+ node: nodes.Name,
current_consumer: NamesConsumer,
) -> bool:
"""Reports used-before-assignment error when all name definition nodes
@@ -1985,7 +1985,9 @@ def _report_unfound_name_definition(
return False
if self._is_variable_annotation_in_function(node):
return False
- if self._has_nonlocal_binding(node):
+ if self._has_nonlocal_in_enclosing_frame(
+ node, current_consumer.consumed_uncertain.get(node.name, [])
+ ):
return False
if (
node.name in self._reported_type_checking_usage_scopes
@@ -2375,11 +2377,21 @@ def _maybe_used_and_assigned_at_once(defstmt: _base_nodes.Statement) -> bool:
def _is_builtin(self, name: str) -> bool:
return name in self.linter.config.additional_builtins or utils.is_builtin(name)
- def _has_nonlocal_binding(self, node: nodes.Name) -> bool:
- """Checks if name node has a nonlocal binding in any enclosing frame."""
+ def _has_nonlocal_in_enclosing_frame(
+ self, node: nodes.Name, uncertain_definitions: list[nodes.NodeNG]
+ ) -> bool:
+ """Check if there is a nonlocal declaration in the nearest frame that encloses
+ both usage and definitions.
+ """
+ defining_frames = {definition.frame() for definition in uncertain_definitions}
frame = node.frame()
- while frame:
- if _is_nonlocal_name(node, frame):
+ is_enclosing_frame = False
+ while frame and not is_enclosing_frame:
+ is_enclosing_frame = all(
+ (frame is defining_frame) or frame.parent_of(defining_frame)
+ for defining_frame in defining_frames
+ )
+ if is_enclosing_frame and _is_nonlocal_name(node, frame):
return True
frame = frame.parent.frame() if frame.parent else None
return False
| diff --git a/tests/functional/u/used/used_before_assignment_nonlocal.py b/tests/functional/u/used/used_before_assignment_nonlocal.py
index 4dc8bbf943..a3d8ca6517 100644
--- a/tests/functional/u/used/used_before_assignment_nonlocal.py
+++ b/tests/functional/u/used/used_before_assignment_nonlocal.py
@@ -121,7 +121,9 @@ def inner():
def nonlocal_in_outer_frame_ok(callback, condition_a, condition_b):
- """Nonlocal declared in outer frame, usage and definition in different frames."""
+ """Nonlocal declared in outer frame, usage and definition in different frames,
+ both enclosed in outer frame.
+ """
def outer():
nonlocal callback
if condition_a:
@@ -133,3 +135,31 @@ def inner():
def callback():
pass
outer()
+
+
+def nonlocal_in_distant_outer_frame_fail(callback, condition_a, condition_b):
+ """Nonlocal declared in outer frame, both usage and definition immediately enclosed
+ in intermediate frame.
+ """
+ def outer():
+ nonlocal callback
+ def intermediate():
+ if condition_a:
+ def inner():
+ callback() # [possibly-used-before-assignment]
+ inner()
+ else:
+ if condition_b:
+ def callback():
+ pass
+ intermediate()
+ outer()
+
+
+def nonlocal_after_bad_usage_fail():
+ """Nonlocal declared after used-before-assignment."""
+ num = 1
+ def inner():
+ num = num + 1 # [used-before-assignment]
+ nonlocal num
+ inner()
diff --git a/tests/functional/u/used/used_before_assignment_nonlocal.txt b/tests/functional/u/used/used_before_assignment_nonlocal.txt
index 887985fda2..d48443f375 100644
--- a/tests/functional/u/used/used_before_assignment_nonlocal.txt
+++ b/tests/functional/u/used/used_before_assignment_nonlocal.txt
@@ -7,3 +7,5 @@ used-before-assignment:39:18:39:28:test_fail5:Using variable 'undefined1' before
used-before-assignment:90:10:90:18:type_annotation_never_gets_value_despite_nonlocal:Using variable 'some_num' before assignment:HIGH
used-before-assignment:96:14:96:18:inner_function_lacks_access_to_outer_args.inner:Using variable 'args' before assignment:HIGH
used-before-assignment:117:18:117:21:nonlocal_in_outer_frame_fail.outer.inner:Using variable 'num' before assignment:HIGH
+possibly-used-before-assignment:149:20:149:28:nonlocal_in_distant_outer_frame_fail.outer.intermediate.inner:Possibly using variable 'callback' before assignment:CONTROL_FLOW
+used-before-assignment:163:14:163:17:nonlocal_after_bad_usage_fail.inner:Using variable 'num' before assignment:HIGH
| [uba] Fix new false negative from #10034
```
def nonlocal_in_outer_frame_ok(callback, condition_a, condition_b):
def outer():
nonlocal callback
def inner():
if condition_a:
def inner2():
callback() # possibly-used-before-assignment?
inner2()
else:
if condition_b:
def callback():
pass
inner()
outer()
```
we should probably raise a message here?
_Originally posted by @zenlyj in https://github.com/pylint-dev/pylint/pull/10034#issuecomment-2453421927_
| 1,731,139,112,000 | [
"Unreleased",
"Skip news :mute:"
] | Bug Report | [
"pylint/checkers/variables.py:VariablesChecker._report_unfound_name_definition",
"pylint/checkers/variables.py:VariablesChecker._has_nonlocal_binding"
] | [
"pylint/checkers/variables.py:VariablesChecker._has_nonlocal_in_enclosing_frame"
] | 2 |
|
secdev/scapy | secdev__scapy-4586 | 8e08cbf759de6709a5b4af6bea3655d293129bb4 | diff --git a/scapy/contrib/automotive/doip.py b/scapy/contrib/automotive/doip.py
index b9d279bcf6b..3fcbdc5dad4 100644
--- a/scapy/contrib/automotive/doip.py
+++ b/scapy/contrib/automotive/doip.py
@@ -239,10 +239,7 @@ def answers(self, other):
def hashret(self):
# type: () -> bytes
- if self.payload_type in [0x8001, 0x8002, 0x8003]:
- return bytes(self)[:2] + struct.pack(
- "H", self.target_address ^ self.source_address)
- return bytes(self)[:2]
+ return bytes(self)[:3]
def post_build(self, pkt, pay):
# type: (bytes, bytes) -> bytes
| diff --git a/test/contrib/automotive/doip.uts b/test/contrib/automotive/doip.uts
index 9a7d61e3b7d..b5963a0d817 100644
--- a/test/contrib/automotive/doip.uts
+++ b/test/contrib/automotive/doip.uts
@@ -10,6 +10,8 @@
= Load Contrib Layer
+from test.testsocket import TestSocket, cleanup_testsockets, UnstableSocket
+
load_contrib("automotive.doip", globals_dict=globals())
load_contrib("automotive.uds", globals_dict=globals())
@@ -406,6 +408,30 @@ assert pkts[0][DoIP].payload_length == 2
assert pkts[0][DoIP:2].payload_length == 7
assert pkts[1][DoIP].payload_length == 103
+= Doip logical addressing
+
+filename = scapy_path("/test/pcaps/doip_functional_request.pcap.gz")
+tx_sock = TestSocket(DoIP)
+rx_sock = TestSocket(DoIP)
+tx_sock.pair(rx_sock)
+
+for pkt in PcapReader(filename):
+ if pkt.haslayer(DoIP):
+ tx_sock.send(pkt[DoIP])
+
+ans, unans = rx_sock.sr(DoIP(bytes(DoIP(payload_type=0x8001, source_address=0xe80, target_address=0xe400) / UDS() / UDS_TP())), multi=True, timeout=0.1, verbose=False)
+
+cleanup_testsockets()
+
+ans.summary()
+if unans:
+ unans.summary()
+
+assert len(ans) == 8
+ans.summary()
+assert len(unans) == 0
+
+
+ DoIP Communication tests
= Load libraries
diff --git a/test/pcaps/doip_functional_request.pcap.gz b/test/pcaps/doip_functional_request.pcap.gz
new file mode 100644
index 00000000000..c2b9e9cf35f
Binary files /dev/null and b/test/pcaps/doip_functional_request.pcap.gz differ
| DoIP functional addressing responses not associated with preceding request
### Brief description
When sending a request over DoIP using functional addressing, the DoIP source addesses of the ECU responses will be different from the target address of the preceding request, as the ECUs respond from their actual address instead of the functional "broadcast address" used in the request. `DoIP.hashret` effectively filters out these "mismatched" packets, so when using `DoIPSocket.sr(...)` with a functional target address, no responses will ever be returned.
### Scapy version
2.6.0
### Python version
3.11
### Operating system
Linux 6.1.21-v8+
### Additional environment information
_No response_
### How to reproduce
```python3
from scapy.contrib.automotive.doip import *
from scapy.contrib.automotive.uds import *
ip="169.254.207.236"
sock = DoIPSocket(ip)
ans, unans = sock.sr(DoIP(payload_type=0x8001, source_address=0xe80, target_address=0xe400) / UDS() / UDS_TP(), multi=True, timeout=2)
```
### Actual result
```console
INFO: Routing activation successful! Target address set to: 0x3107
Begin emission:
Finished sending 1 packets.
.........
Received 9 packets, got 0 answers, remaining 1 packets
```
### Expected result
```console
INFO: Routing activation successful! Target address set to: 0x3107
Begin emission:
Finished sending 1 packets.
.********
Received 9 packets, got 8 answers, remaining 0 packets
```
This result was obtained by monkey-patching DoIP.hashret to ignore the addresses, based on a config setting, analogous to how scapy handles IP broadcast traffic with the `conf.checkIPaddr` setting (see below)
### Related resources
Just for reference, this is the monkey-patch I'm using at the moment to enable me to run functional addressing requests (by disabling the setting whenever I need it):
```python3
from scapy.config import conf
from scapy.contrib.automotive.doip import DoIP
def hashret_patched(self):
if self.payload_type in [0x8001, 0x8002, 0x8003]:
if conf.checkDoIPaddr:
return bytes(self)[:2] + struct.pack(
"H", self.target_address ^ self.source_address)
return bytes(self)[:2]
return bytes(self)[:2]
conf.checkDoIPaddr = True
DoIP.hashret = hashret_patched
```
But there is maybe/probably a better way.
| Thanks for this issue.
Could you please reference a section in the DoIP Standard which describes this behaviour?
I couldn't find anything related to this.
Besides this, you can also pin the target_address in the DoIPSocket.__init__ function:
https://github.com/secdev/scapy/blob/c38a5de175be8e59742be473f4fb2dd6edef5503/scapy/contrib/automotive/doip.py#L440
Additionally, you can do the routing activation request yourself and eliminate the automatical setting of the target_address.
Could you please proved a pcap file for analysis?
Hey, thanks for taking a look.
I don't think there's anything in the spec that explicitly describes this behavior, it seems more like emergent behavior of the general principles where functionally addressed diagnostic requests are broadcast, and that responses sent always have the source address set according to the sending ECU. Otherwise (e.g. if the ECU responded with the source address set to the functional address the preceding request was addressed to), the recipient wouldn't be able to associate the individual diagnostic message responses with the ECU sending them, when they receive multiple responses for a single broadcast request, right?
I'm by no means an expert on this, I've just observed this behavior with the ECUs I'm working with, and it makes sense to me that this is how it works. Maybe I'm missing something? I have attached a pcap file that demonstrates the traffic I see when running the snippets I posted above: [doip_functional_request.pcap.zip](https://github.com/user-attachments/files/17330147/doip_functional_request.pcap.zip)
Hi, I’ve checked the DoIP standard regarding the described behaviour. I found this:

So you are doing a routing activation with SA 0x3107 but than your UDS packet is sent to Address 0xe400.
By strictly following the standard it should not even be possible to send a UDS packet to this TA.
To me, this behaviour looks like an out of specification behaviour. Neither the autosar standard nor the ISO13400 standard specify a broadcast mechanism as you show with your pcap file.
It would be great if you can provide any written reference for this feature.
I guess it must not have been clear from my previous comments, but the setup I have here is that my test device is connected to a gateway (which has address 0x3107). The gateway is connected to a number of ECUs on the vehicle network side.
ISO 13400-2:2019(E) Section 7.8 reads:
> [...] Functional logical addresses are used to address messages to groups of, or all of, the diagnostic application layer entities within a vehicle. [...] **For a DoIP Gateway the reception of a functionally addressed diagnostics message implies a multi- or broadcast on the connected in-vehicle sub-networks.** [...]
Thus, when sending functionally-addressed requests to the gateway, it will, in turn, broadcast them on the vehicle network, and route back any responses from the connected ECUs. | 1,731,098,604,000 | [] | Bug Report | [
"scapy/contrib/automotive/doip.py:DoIP.hashret"
] | [] | 1 |
django/django | django__django-18986 | 987854ba44b497b195536199f8f6d1dc440a43ca | diff --git a/django/core/management/commands/inspectdb.py b/django/core/management/commands/inspectdb.py
index 77605b178f69..58594fb66f80 100644
--- a/django/core/management/commands/inspectdb.py
+++ b/django/core/management/commands/inspectdb.py
@@ -106,9 +106,12 @@ def handle_inspection(self, options):
connection.introspection.get_primary_key_columns(
cursor, table_name
)
+ or []
)
primary_key_column = (
- primary_key_columns[0] if primary_key_columns else None
+ primary_key_columns[0]
+ if len(primary_key_columns) == 1
+ else None
)
unique_columns = [
c["columns"][0]
@@ -128,6 +131,11 @@ def handle_inspection(self, options):
yield ""
yield "class %s(models.Model):" % model_name
known_models.append(model_name)
+
+ if len(primary_key_columns) > 1:
+ fields = ", ".join([f"'{col}'" for col in primary_key_columns])
+ yield f" pk = models.CompositePrimaryKey({fields})"
+
used_column_names = [] # Holds column names used in the table so far
column_to_field_name = {} # Maps column names to names of model fields
used_relations = set() # Holds foreign relations used in the table.
@@ -151,12 +159,6 @@ def handle_inspection(self, options):
# Add primary_key and unique, if necessary.
if column_name == primary_key_column:
extra_params["primary_key"] = True
- if len(primary_key_columns) > 1:
- comment_notes.append(
- "The composite primary key (%s) found, that is not "
- "supported. The first column is selected."
- % ", ".join(primary_key_columns)
- )
elif column_name in unique_columns:
extra_params["unique"] = True
| diff --git a/tests/inspectdb/tests.py b/tests/inspectdb/tests.py
index 1be4efc43051..131bd45ce89e 100644
--- a/tests/inspectdb/tests.py
+++ b/tests/inspectdb/tests.py
@@ -655,11 +655,10 @@ def test_composite_primary_key(self):
call_command("inspectdb", table_name, stdout=out)
output = out.getvalue()
self.assertIn(
- f"column_1 = models.{field_type}(primary_key=True) # The composite "
- f"primary key (column_1, column_2) found, that is not supported. The "
- f"first column is selected.",
+ "pk = models.CompositePrimaryKey('column_1', 'column_2')",
output,
)
+ self.assertIn(f"column_1 = models.{field_type}()", output)
self.assertIn(
"column_2 = models.%s()"
% connection.features.introspected_field_types["IntegerField"],
| Add support for CompositePrimaryKey in inspectdb
Description
We can now replace the half-measure created when introspecting a composite primary key with the actual field.
PR
| 1,735,772,897,000 | [] | Feature Request | [
"django/core/management/commands/inspectdb.py:Command.handle_inspection"
] | [] | 1 |
|
django/django | django__django-18941 | 94436dee57ce677e6ffcbb0438e0441d5c261d62 | diff --git a/django/utils/html.py b/django/utils/html.py
index bc336d88a66c..0d107a0da9fe 100644
--- a/django/utils/html.py
+++ b/django/utils/html.py
@@ -357,6 +357,8 @@ def handle_word(
domain = punycode(domain)
except UnicodeError:
return word
+ local = quote(local, safe="")
+ domain = quote(domain, safe="")
url = self.mailto_template.format(local=local, domain=domain)
nofollow_attr = ""
# Make link.
| diff --git a/tests/utils_tests/test_html.py b/tests/utils_tests/test_html.py
index dc3768e6fae0..0beaf98bff2b 100644
--- a/tests/utils_tests/test_html.py
+++ b/tests/utils_tests/test_html.py
@@ -376,6 +376,19 @@ def test_urlize(self):
+ "한.글." * 15
+ "aaa</a>",
),
+ (
+ # RFC 6068 requires a mailto URI to percent-encode a number of
+ # characters that can appear in <addr-spec>.
+ "yes;this=is&a%[email protected]",
+ '<a href="mailto:yes%3Bthis%3Dis%26a%25valid%[email protected]"'
+ ">yes;this=is&a%[email protected]</a>",
+ ),
+ (
+ # Urlizer shouldn't urlize the "?org" part of this. But since
+ # it does, RFC 6068 requires percent encoding the "?".
+ "[email protected]?org",
+ '<a href="mailto:[email protected]%3Forg">[email protected]?org</a>',
+ ),
)
for value, output in tests:
with self.subTest(value=value):
| Urlize incorrectly handles punctuation in email addresses
Description
Several punctuation characters (%, &, +, !, etc.) can—and sometimes do—appear in the local part of an email address (before the @). The urlize template filter doesn't correctly encode them, which can result in broken mailto links.
Example (Django 5.1.4):
from django.template.defaultfilters import urlize
urlize("it%[email protected]")
# '<a href="mailto:it%[email protected]">it%[email protected]</a>'
# Expected:
# '<a href="mailto:it%[email protected]">it%[email protected]</a>'
Clicking the resulting mailto link might work as expected, or do nothing, or could launch a mail composer with a missing or incorrect email address, depending on the specific address, browser and email app. Sequences that could also be percent-encoded characters (like "%de" in the example) are especially prone to unpredictable results.
The mailto URI spec RFC 6068 requires percent encoding most punctuation in this situation (section 2, item 1; also see section 5, Encoding).
Proposed fix: apply urllib.parse.quote() to local where the mailto link is constructed in django.utils.html.Urlizer. (Although not strictly necessary, it wouldn't hurt to also quote domain there.)
| 1,734,303,083,000 | [] | Bug Report | [
"django/utils/html.py:Urlizer.handle_word"
] | [] | 1 |
|
django/django | django__django-18925 | 78a55a04c9e6591167e1993c35d3737a705c6ec9 | diff --git a/django/core/validators.py b/django/core/validators.py
index 8732ddf7adbf..c4e734c1d82a 100644
--- a/django/core/validators.py
+++ b/django/core/validators.py
@@ -2,7 +2,7 @@
import math
import re
from pathlib import Path
-from urllib.parse import urlsplit, urlunsplit
+from urllib.parse import urlsplit
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
@@ -128,8 +128,6 @@ def __call__(self, value):
@deconstructible
class URLValidator(RegexValidator):
- ul = "\u00a1-\uffff" # Unicode letters range (must not be a raw string).
-
# IP patterns
ipv4_re = (
r"(?:0|25[0-5]|2[0-4][0-9]|1[0-9]?[0-9]?|[1-9][0-9]?)"
@@ -177,31 +175,17 @@ def __call__(self, value):
splitted_url = urlsplit(value)
except ValueError:
raise ValidationError(self.message, code=self.code, params={"value": value})
- try:
- super().__call__(value)
- except ValidationError as e:
- # Trivial case failed. Try for possible IDN domain
- if value:
- scheme, netloc, path, query, fragment = splitted_url
- try:
- netloc = punycode(netloc) # IDN -> ACE
- except UnicodeError: # invalid domain part
- raise e
- url = urlunsplit((scheme, netloc, path, query, fragment))
- super().__call__(url)
- else:
- raise
- else:
- # Now verify IPv6 in the netloc part
- host_match = re.search(r"^\[(.+)\](?::[0-9]{1,5})?$", splitted_url.netloc)
- if host_match:
- potential_ip = host_match[1]
- try:
- validate_ipv6_address(potential_ip)
- except ValidationError:
- raise ValidationError(
- self.message, code=self.code, params={"value": value}
- )
+ super().__call__(value)
+ # Now verify IPv6 in the netloc part
+ host_match = re.search(r"^\[(.+)\](?::[0-9]{1,5})?$", splitted_url.netloc)
+ if host_match:
+ potential_ip = host_match[1]
+ try:
+ validate_ipv6_address(potential_ip)
+ except ValidationError:
+ raise ValidationError(
+ self.message, code=self.code, params={"value": value}
+ )
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
| diff --git a/tests/validators/tests.py b/tests/validators/tests.py
index 4ae0f6413e5e..7455c93d407e 100644
--- a/tests/validators/tests.py
+++ b/tests/validators/tests.py
@@ -126,6 +126,7 @@
"http://مثال.إختبار",
"http://例子.测试",
"http://उदाहरण.परीक्षा",
+ "https://މިހާރު.com", # (valid in IDNA 2008 but not IDNA 2003)
"http://-.~_!$&'()*+,;=%40:80%[email protected]",
"http://xn--7sbb4ac0ad0be6cf.xn--p1ai",
"http://1337.net",
| Dead code in URLValidator
Description
There's some dead code in django.core.validators.URLValidator which could be removed:
The entire Trivial case failed. Try for possible IDN domain section is no longer useful. This code attempts to re-validate a failed URL after encoding an international domain name using IDNA 2003 ("punycode"). But the URLValidator regular expressions have allowed all IDNs since Commit 2e65d56 (for #20003, in 2015), so the super call will never fail with a validation error that switching to IDNA 2003 would let pass.
The `ul` unicode letters property is no longer used. The regular expressions that had used it were moved into DomainNameValidator in Commit 4971a9a (for #18119, in Django 5.1).
For the first case, one way to verify the code is no longer in use is to run URLValidator on https://މިހާރު.com, which is a domain allowed by IDNA 2008 but prohibited by IDNA 2003. If the punycode() branch were coming into play, that URL would be rejected:
from django.core.validators import URLValidator
URLValidator()("https://މިހާރު.com")
# (No error)
from django.utils.encoding import punycode
punycode("މިހާރު.com")
# UnicodeError: Violation of BIDI requirement 3
# encoding with 'idna' codec failed
| 1,734,044,205,000 | [] | Bug Report | [
"django/core/validators.py:URLValidator.__call__"
] | [] | 1 |
|
django/django | django__django-18911 | 5e998d717f7b4220a1728bd49b66ca0162e2a6cb | diff --git a/django/db/backends/postgresql/schema.py b/django/db/backends/postgresql/schema.py
index 75bf33147233..964009988cbb 100644
--- a/django/db/backends/postgresql/schema.py
+++ b/django/db/backends/postgresql/schema.py
@@ -120,6 +120,8 @@ def _create_like_index_sql(self, model, field):
return None
def _using_sql(self, new_field, old_field):
+ if new_field.generated:
+ return ""
using_sql = " USING %(column)s::%(type)s"
new_internal_type = new_field.get_internal_type()
old_internal_type = old_field.get_internal_type()
| diff --git a/tests/migrations/test_operations.py b/tests/migrations/test_operations.py
index 6312a7d4a2e1..5557426e274f 100644
--- a/tests/migrations/test_operations.py
+++ b/tests/migrations/test_operations.py
@@ -6210,6 +6210,37 @@ def _test_add_generated_field(self, db_persist):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertColumnNotExists(f"{app_label}_pony", "modified_pink")
+ @skipUnlessDBFeature("supports_stored_generated_columns")
+ def test_generated_field_changes_output_field(self):
+ app_label = "test_gfcof"
+ operation = migrations.AddField(
+ "Pony",
+ "modified_pink",
+ models.GeneratedField(
+ expression=F("pink") + F("pink"),
+ output_field=models.IntegerField(),
+ db_persist=True,
+ ),
+ )
+ from_state, to_state = self.make_test_state(app_label, operation)
+ # Add generated column.
+ with connection.schema_editor() as editor:
+ operation.database_forwards(app_label, editor, from_state, to_state)
+ # Update output_field used in the generated field.
+ operation = migrations.AlterField(
+ "Pony",
+ "modified_pink",
+ models.GeneratedField(
+ expression=F("pink") + F("pink"),
+ output_field=models.DecimalField(decimal_places=2, max_digits=16),
+ db_persist=True,
+ ),
+ )
+ from_state = to_state.clone()
+ to_state = self.apply_operations(app_label, from_state, [operation])
+ with connection.schema_editor() as editor:
+ operation.database_forwards(app_label, editor, from_state, to_state)
+
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_add_generated_field_stored(self):
self._test_add_generated_field(db_persist=True)
| Changing output_field for GeneratedField leads to ProgrammingError with Postgres 16.5+
Description
Consider the following model and assume the initial migration has been applied:
class Order(models.Model):
order_no = models.IntegerField()
item_no = models.CharField(max_length=25)
qty = models.IntegerField()
cost = models.DecimalField(max_digits=10, decimal_places=2)
total_cost = models.GeneratedField(
expression=F("cost") * F("qty"),
output_field=models.BigIntegerField(),
db_persist=True,
)
During a code review we determined the output field should be a Decimal field and the field was modified as follows:
total_cost = models.GeneratedField(
expression=F("cost") * F("qty"),
output_field=models.DecimalField(decimal_places=2, max_digits=16),
db_persist=True,
)
And a new migration was generated:
migrations.AlterField(
model_name='order',
name='total_cost',
field=models.GeneratedField(db_persist=True, expression=django.db.models.expressions.CombinedExpression(models.F('cost'), '*', models.F('qty')), output_field=models.DecimalField(decimal_places=2, max_digits=16)),
),
In Postgres 16.4 and earlier, this migration is applied without error. (I'm aware that the value of the total_cost field is not recomputed for existing records when this migration is applied.).
Starting with Postgres 16.5 and up, this migration fails with the following error:
psycopg2.errors.InvalidColumnDefinition: cannot specify USING when altering type of generated column
DETAIL: Column "total_cost" is a generated column.
...
django.db.utils.ProgrammingError: cannot specify USING when altering type of generated column
This appears to be a result of the following change in Postgres 16.5 (release notes):
Disallow a USING clause when altering the type of a generated column (Peter Eisentraut) [§](https://postgr.es/c/5867ee005)
A generated column already has an expression specifying the column contents, so including USING doesn't make sense.
The Django documentation for GeneratedField makes it clear that
There are many database-specific restrictions on generated fields that Django doesn’t validate and the database may raise an error
For this reason, I almost didn't open a ticket. However, there is logic in db/backends/base/schema.py that checks if the expression of a GeneratedField changed. Consider this migration (which changes the expression from multiplication to addition):
migrations.AlterField(
model_name='order',
name='total_cost',
field=models.GeneratedField(db_persist=True, expression=django.db.models.expressions.CombinedExpression(models.F('cost'), '+', models.F('qty')), output_field=models.BigIntegerField()),
),
Attempting to apply this migration will raise the following error (even in Postgres 16.4):
ValueError: Modifying GeneratedFields is not supported - the field sales.Order.total_cost must be removed and re-added with the new definition.
This error is more helpful. It explains the problem better and even suggests a workaround.
Should we throw a similar error if the output_field of a GeneratedField is changed? Or add a tip to the documentation?
The above was tested with:
Django version 5.1.3
psycopg2 version 2.9.10
Postgres versions: 16.4, 16.5, 16.6, 17.0, and 17.2
| ['Thank you for the detailed report Replicated the error tests/migrations/test_operations.py diff --git a/tests/migrations/test_operations.py b/tests/migrations/test_operations.py index 6312a7d4a2..1b75c609b3 100644 a b class OperationTests(OperationTestBase): 61856185 with self.assertRaisesMessage(ValueError, msg): 61866186 self.apply_operations(app_label, project_state, operations) 61876187 6188 @skipUnlessDBFeature("supports_stored_generated_columns") 6189 def test_generated_field_changes_output_field(self): 6190 app_label = "test_gfcof" 6191 operation = migrations.AddField( 6192 "Pony", 6193 "modified_pink", 6194 models.GeneratedField( 6195 expression=F("pink") + F("pink"), 6196 output_field=models.IntegerField(), 6197 db_persist=True, 6198 ), 6199 ) 6200 project_state, new_state = self.make_test_state(app_label, operation) 6201 # Add generated column. 6202 with connection.schema_editor() as editor: 6203 operation.database_forwards(app_label, editor, project_state, new_state) 6204 # Update output_field used in the generated field. 6205 operations = [ 6206 migrations.AlterField( 6207 "Pony", 6208 "modified_pink", 6209 models.GeneratedField( 6210 expression=F("pink") + F("pink"), 6211 output_field=models.DecimalField(decimal_places=2, max_digits=16), 6212 db_persist=True, 6213 ), 6214 ), 6215 ] 6216 new_state = self.apply_operations(app_label, new_state, operations) 6217 with connection.schema_editor() as editor: 6218 operation.database_forwards(app_label, editor, project_state, new_state) 6219 61886220 def _test_add_generated_field(self, db_persist): 61896221 app_label = "test_agf" ValueError: Modifying GeneratedFields is not supported - the field sales.Order.total_cost must be removed and re-added with the new definition. This error is more helpful. It explains the problem better and even suggests a workaround. Having this error instead makes sense to me. CC-ed a couple of other folks who might have thoughts', 1733281732.0]
['Just to make sure before we commit to a solution. Does Posgres still allows for generated field alterations but disallow that USING is not used? If that\'s the case it seems like a better solution would be to have generated field alteration continue to be supported simply not specify USING (which is effectively redundant with the column type) instead of disabling the feature? django/db/backends/postgresql/schema.py diff --git a/django/db/backends/postgresql/schema.py b/django/db/backends/postgresql/schema.py index 75bf331472..964009988c 100644 a b def _create_like_index_sql(self, model, field): 120120 return None 121121 122122 def _using_sql(self, new_field, old_field): 123 if new_field.generated: 124 return "" 123125 using_sql = " USING %(column)s::%(type)s" 124126 new_internal_type = new_field.get_internal_type() 125127 old_internal_type = old_field.get_internal_type(', 1733308733.0]
["Replying to Simon Charette: Just to make sure before we commit to a solution. Does Posgres still allows for generated field alterations but disallow that USING is not used? If that's the case it seems like a better solution would be to have generated field alteration continue to be supported simply not specify USING (which is effectively redundant with the column type) instead of disabling the feature? I found that I can rename the GeneratedField without any issues. Renaming a field within the expression throws the ValueError: ValueError: Modifying GeneratedFields is not supported - the field sales.Order.total_cost must be removed and re-added with the new definition. This error appears to be raised by Django, so it's not even getting to Postgres.", 1733309553.0]
['It seems to be latter as the above patch works with adjusted tests provided by Sarah tests/migrations/test_operations.py diff --git a/tests/migrations/test_operations.py b/tests/migrations/test_operations.py index 3ac813b899..7aa3bfbc5d 100644 a b def _test_add_generated_field(self, db_persist): 61356135 operation.database_backwards(app_label, editor, new_state, project_state) 61366136 self.assertColumnNotExists(f"{app_label}_pony", "modified_pink") 61376137 6138 @skipUnlessDBFeature("supports_stored_generated_columns") 6139 def test_generated_field_changes_output_field(self): 6140 app_label = "test_gfcof" 6141 operation = migrations.AddField( 6142 "Pony", 6143 "modified_pink", 6144 models.GeneratedField( 6145 expression=F("pink") + F("pink"), 6146 output_field=models.IntegerField(), 6147 db_persist=True, 6148 ), 6149 ) 6150 from_state, to_state = self.make_test_state(app_label, operation) 6151 # Add generated column. 6152 with connection.schema_editor() as editor: 6153 operation.database_forwards(app_label, editor, from_state, to_state) 6154 # Update output_field used in the generated field. 6155 operation = migrations.AlterField( 6156 "Pony", 6157 "modified_pink", 6158 models.GeneratedField( 6159 expression=F("pink") + F("pink"), 6160 output_field=models.DecimalField(decimal_places=2, max_digits=16), 6161 db_persist=True, 6162 ), 6163 ) 6164 from_state = to_state.clone() 6165 to_state = self.apply_operations(app_label, from_state, [operation]) 6166 with connection.schema_editor() as editor: 6167 operation.database_forwards(app_label, editor, from_state, to_state) 6168 61386169 @skipUnlessDBFeature("supports_stored_generated_columns") 61396170 def test_add_generated_field_stored(self): 61406171 self._test_add_generated_field(db_persist=True)', 1733309845.0]
['In summary what Postgres 16.5 changed is that you longer can specify USING on ALTER COLUMN for generated columns ALTER TABLE "test_gfcof_pony" ALTER COLUMN "modified_pink" TYPE numeric(16, 2) USING "modified_pink"::numeric(16, 2); Which makes sense as USING is redundant with TYPE and could result in further ambiguity more if you change the generated expression at the same time. Well the solution appears to simply not specifying USING when dealing with GeneratedField alterations as suggested in comment:2 ALTER TABLE "test_gfcof_pony" ALTER COLUMN "modified_pink" TYPE numeric(16, 2);', 1733310855.0] | 1,733,831,217,000 | [] | Bug Report | [
"django/db/backends/postgresql/schema.py:DatabaseSchemaEditor._using_sql"
] | [] | 1 |
django/django | django__django-18906 | 1860a1afc9ac20750f932e8e0a94b32d096f2848 | diff --git a/django/forms/forms.py b/django/forms/forms.py
index 549a3adf6fa6..614f99039585 100644
--- a/django/forms/forms.py
+++ b/django/forms/forms.py
@@ -316,7 +316,7 @@ def full_clean(self):
"""
Clean all of self.data and populate self._errors and self.cleaned_data.
"""
- self._errors = ErrorDict()
+ self._errors = ErrorDict(renderer=self.renderer)
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
| diff --git a/tests/forms_tests/tests/test_forms.py b/tests/forms_tests/tests/test_forms.py
index cd909628cb03..d88ac33f24f0 100644
--- a/tests/forms_tests/tests/test_forms.py
+++ b/tests/forms_tests/tests/test_forms.py
@@ -5313,6 +5313,22 @@ class CommentForm(Form):
"required></p>",
)
+ def test_custom_renderer_error_dict(self):
+ class CustomRenderer(DjangoTemplates):
+ def render(self, template_name, context, request=None):
+ if template_name == "django/forms/errors/dict/default.html":
+ return "<strong>So many errors!</strong>"
+ return super().render(template_name, context, request)
+
+ form = Form({}, renderer=CustomRenderer())
+ form.full_clean()
+ form.add_error(None, "Test error")
+
+ self.assertHTMLEqual(
+ form.errors.render(),
+ "<strong>So many errors!</strong>",
+ )
+
def test_cyclic_context_boundfield_render(self):
class FirstNameForm(Form):
first_name = CharField()
| ErrorDict always uses default renderer
Description
When BaseForm.full_clean() instantiates ErrorDict, it doesn't pass it the renderer:
https://github.com/django/django/blob/1860a1afc9ac20750f932e8e0a94b32d096f2848/django/forms/forms.py#L319
Despite ErrorDict being ready to receive it and use it for rendering:
https://github.com/django/django/blob/1860a1afc9ac20750f932e8e0a94b32d096f2848/django/forms/utils.py#L124
Practically, this means customizations to the renderer are ignored when rendering the form errors using {{ errors }} in the template.
Building on top of the example and fix from #35987, I have a custom renderer that swaps out some templates:
from django import forms
from django.forms.renderers import TemplatesSetting
from django.forms.utils import ErrorList
from django.template.exceptions import TemplateDoesNotExist
class CustomRenderer(TemplatesSetting):
def get_template(self, template_name):
if template_name.startswith("django/forms/"):
# Load our custom version from "custom/forms/" if it exists
our_template = f"custom/{template_name.removeprefix('django/')}"
try:
return super().get_template(our_template)
except TemplateDoesNotExist:
pass
return super().get_template(template_name)
class CustomErrorList(ErrorList):
def copy(self):
# Copying the fix from Django Ticket #35987
copy = super().copy()
copy.renderer = self.renderer
return copy
class MyForm(forms.Form):
default_renderer = CustomRenderer()
def __init__(self, *args, error_class=CustomErrorList, **kwargs):
super().__init__(*args, error_class=error_class, **kwargs)
The custom error list template uses some CSS utility classes from Tailwind, like text-red-600:
{% if errors %}<ul class="text-red-600">{% for field, error in errors %}<li>{{ field }}{{ error }}</li>{% endfor %}</ul>{% endif %}
But creating a form with a non-field error and rendering the error dict uses the default renderer and its template:
In [1]: from example.forms import MyForm
...:
...: form = MyForm({})
...: form.full_clean()
...: form.add_error(None, "Test error")
In [2]: form.errors.render()
Out[2]: '<ul class="errorlist"><li>__all__<ul class="text-red-600"><li>Test error</li></ul></li></ul>'
I need to override full_clean() to set the renderer:
class MyForm(forms.Form):
...
def full_clean(self):
super().full_clean()
# Fix a bug in Django where self._errors = ErrorDict is not passed the
# renderer argument when initialized.
self._errors.renderer = self.renderer
Then form errors use my custom template:
In [1]: from example.forms import MyForm
...:
...: form = MyForm({})
...: form.full_clean()
...: form.add_error(None, "Test error")
In [2]: form.errors.render()
Out[2]: '<ul class="text-red-600"><li>__all__<ul class="text-red-600"><li>Test error</li></ul></li></ul>'
I think this has probably been an issue ever since a custom renderer became possible in #31026. The argument was added to ErrorDict but missed in BaseForm.full_clean(), the only place where the class is instantiated.
| 1,733,743,081,000 | [] | Bug Report | [
"django/forms/forms.py:BaseForm.full_clean"
] | [] | 1 |
|
django/django | django__django-18905 | 1860a1afc9ac20750f932e8e0a94b32d096f2848 | diff --git a/django/forms/utils.py b/django/forms/utils.py
index d24711d1a0b5..27eabe57dc58 100644
--- a/django/forms/utils.py
+++ b/django/forms/utils.py
@@ -163,6 +163,7 @@ def as_data(self):
def copy(self):
copy = super().copy()
copy.error_class = self.error_class
+ copy.renderer = self.renderer
return copy
def get_json_data(self, escape_html=False):
| diff --git a/tests/forms_tests/tests/test_forms.py b/tests/forms_tests/tests/test_forms.py
index cd909628cb03..f93d9b41157f 100644
--- a/tests/forms_tests/tests/test_forms.py
+++ b/tests/forms_tests/tests/test_forms.py
@@ -4849,6 +4849,12 @@ class CustomForm(Form):
form = CustomForm(renderer=custom)
self.assertEqual(form.renderer, custom)
+ def test_get_context_errors(self):
+ custom = CustomRenderer()
+ form = Form(renderer=custom)
+ context = form.get_context()
+ self.assertEqual(context["errors"].renderer, custom)
+
class TemplateTests(SimpleTestCase):
def test_iterate_radios(self):
diff --git a/tests/forms_tests/tests/test_utils.py b/tests/forms_tests/tests/test_utils.py
index f9a5d4c82a6a..a50f86c934b3 100644
--- a/tests/forms_tests/tests/test_utils.py
+++ b/tests/forms_tests/tests/test_utils.py
@@ -2,6 +2,7 @@
import json
from django.core.exceptions import ValidationError
+from django.forms.renderers import DjangoTemplates
from django.forms.utils import (
ErrorDict,
ErrorList,
@@ -161,6 +162,35 @@ def __str__(self):
'<a href="http://www.example.com/">example</a></li></ul>',
)
+ def test_error_list_copy(self):
+ e = ErrorList(
+ [
+ ValidationError(
+ message="message %(i)s",
+ params={"i": 1},
+ ),
+ ValidationError(
+ message="message %(i)s",
+ params={"i": 2},
+ ),
+ ]
+ )
+
+ e_copy = copy.copy(e)
+ self.assertEqual(e, e_copy)
+ self.assertEqual(e.as_data(), e_copy.as_data())
+
+ def test_error_list_copy_attributes(self):
+ class CustomRenderer(DjangoTemplates):
+ pass
+
+ renderer = CustomRenderer()
+ e = ErrorList(error_class="woopsies", renderer=renderer)
+
+ e_copy = e.copy()
+ self.assertEqual(e.error_class, e_copy.error_class)
+ self.assertEqual(e.renderer, e_copy.renderer)
+
def test_error_dict_copy(self):
e = ErrorDict()
e["__all__"] = ErrorList(
@@ -183,6 +213,16 @@ def test_error_dict_copy(self):
e_deepcopy = copy.deepcopy(e)
self.assertEqual(e, e_deepcopy)
+ def test_error_dict_copy_attributes(self):
+ class CustomRenderer(DjangoTemplates):
+ pass
+
+ renderer = CustomRenderer()
+ e = ErrorDict(renderer=renderer)
+
+ e_copy = copy.copy(e)
+ self.assertEqual(e.renderer, e_copy.renderer)
+
def test_error_dict_html_safe(self):
e = ErrorDict()
e["username"] = "Invalid username."
| ErrorList.copy() reverts to default renderer
Description
When an ErrorList is copied, it loses track of any custom renderer, reverting to the default one. Practically, this means custom styles are not applied to non-field errors when rendering a whole form.
For example, I wrote a custom renderer that swaps some templates like so:
from django import forms
from django.forms.renderers import TemplatesSetting
from django.template.exceptions import TemplateDoesNotExist
class CustomRenderer(TemplatesSetting):
def get_template(self, template_name):
if template_name.startswith("django/forms/"):
# Load our custom version from "custom/forms/" if it exists
our_template = f"custom/{template_name.removeprefix('django/')}"
try:
return super().get_template(our_template)
except TemplateDoesNotExist:
pass
return super().get_template(template_name)
class MyForm(forms.Form):
default_renderer = CustomRenderer()
The custom error list template uses some CSS utility classes from Tailwind, like text-red-600:
{% if errors %}<ul class="text-red-600">{% for error in errors %}<li>{{ error }}</li>{% endfor %}</ul>{% endif %}
Creating a form with a non-field error and rendering those errors uses the custom template:
In [1]: from example.forms import MyForm
...:
...: form = MyForm({})
...: form.full_clean()
...: form.add_error(None, "Test error")
In [2]: form.non_field_errors().render()
Out[2]: '<ul class="text-red-600"><li>Test error</li></ul>'
But rendering the whole form reverts to the default template, from the default renderer:
In [3]: form.render()
Out[3]: '<ul class="errorlist nonfield"><li>Test error</li></ul>\n\n <div></div>'
This occurs because the ErrorList is copied in Form.get_context():
https://github.com/django/django/blob/1860a1afc9ac20750f932e8e0a94b32d096f2848/django/forms/forms.py#L225
The fix would be to also copy over renderer in ErrorList.copy():
https://github.com/django/django/blob/1860a1afc9ac20750f932e8e0a94b32d096f2848/django/forms/utils.py#L165
I think this has probably been an issue ever since a custom renderer became possible in #31026.
| ['Thank you!', 1733723463.0] | 1,733,741,720,000 | [] | Bug Report | [
"django/forms/utils.py:ErrorList.copy"
] | [] | 1 |
django/django | django__django-18888 | edd74c3417fa3a0b29295012ff31dbe44843303c | diff --git a/django/core/management/commands/makemessages.py b/django/core/management/commands/makemessages.py
index 076667d41a8c..23ad424c5c31 100644
--- a/django/core/management/commands/makemessages.py
+++ b/django/core/management/commands/makemessages.py
@@ -40,7 +40,7 @@ def check_programs(*programs):
def is_valid_locale(locale):
- return re.match(r"^[a-z]+$", locale) or re.match(r"^[a-z]+_[A-Z].*$", locale)
+ return re.match(r"^[a-z]+$", locale) or re.match(r"^[a-z]+_[A-Z0-9].*$", locale)
@total_ordering
| diff --git a/tests/i18n/test_extraction.py b/tests/i18n/test_extraction.py
index 7aa600c4c1e2..e4a6260c336a 100644
--- a/tests/i18n/test_extraction.py
+++ b/tests/i18n/test_extraction.py
@@ -179,6 +179,15 @@ def test_valid_locale_with_country(self):
self.assertIn("processing locale en_GB", out.getvalue())
self.assertIs(Path("locale/en_GB/LC_MESSAGES/django.po").exists(), True)
+ def test_valid_locale_with_numeric_region_code(self):
+ out = StringIO()
+ management.call_command(
+ "makemessages", locale=["ar_002"], stdout=out, verbosity=1
+ )
+ self.assertNotIn("invalid locale ar_002", out.getvalue())
+ self.assertIn("processing locale ar_002", out.getvalue())
+ self.assertIs(Path("locale/ar_002/LC_MESSAGES/django.po").exists(), True)
+
def test_valid_locale_tachelhit_latin_morocco(self):
out = StringIO()
management.call_command(
| makemessages: is_valid_locale regex fails to validate locales with numeric region codes
Description
(last modified by Juan Pablo Mallarino)
The is_valid_locale function in the makemessages management command uses a regular expression that doesn't account for locales with numeric region codes, such as es_419. The current regex only allows uppercase letters after the underscore. This excludes valid locale codes that use numbers in the region subtag.
This can cause issues when trying to generate message files for these locales. For example, running makemessages with es_419 will not generate the expected message files, potentially leading to missing translations.
Proposed Solution
Modify the regular expression in is_valid_locale to include numeric characters in the region subtag validation. The suggested change is:
From:
r"^[a-z]+_[A-Z].*$"
To:
r"^[a-z]+_[A-Z0-9].*$"
This change would allow the validation of locales while still maintaining the expected format for other locale codes. This simple modification would ensure broader compatibility and avoid unexpected behavior when working with valid locales containing numeric region codes.
| ['This is the PR that introduced the validating function: \u200bhttps://github.com/django/django/pull/15521 and the issue https://code.djangoproject.com/ticket/33565', 1733325193.0]
["Support for regions was added in #33078 (see also test case like de-1996) The validation added in #33565 doesn't match this", 1733370439.0] | 1,733,406,361,000 | [] | Bug Report | [
"django/core/management/commands/makemessages.py:is_valid_locale"
] | [] | 1 |
django/django | django__django-18854 | edd74c3417fa3a0b29295012ff31dbe44843303c | diff --git a/django/contrib/postgres/fields/array.py b/django/contrib/postgres/fields/array.py
index 4171af82f9d2..a7e40703a3f8 100644
--- a/django/contrib/postgres/fields/array.py
+++ b/django/contrib/postgres/fields/array.py
@@ -169,7 +169,7 @@ def value_to_string(self, obj):
else:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
- return json.dumps(values)
+ return json.dumps(values, ensure_ascii=False)
def get_transform(self, name):
transform = super().get_transform(name)
diff --git a/django/contrib/postgres/fields/hstore.py b/django/contrib/postgres/fields/hstore.py
index cfc156ab596b..300458c0b118 100644
--- a/django/contrib/postgres/fields/hstore.py
+++ b/django/contrib/postgres/fields/hstore.py
@@ -43,7 +43,7 @@ def to_python(self, value):
return value
def value_to_string(self, obj):
- return json.dumps(self.value_from_object(obj))
+ return json.dumps(self.value_from_object(obj), ensure_ascii=False)
def formfield(self, **kwargs):
return super().formfield(
| diff --git a/tests/postgres_tests/test_array.py b/tests/postgres_tests/test_array.py
index ea7807687ea2..ba7151d4a2f5 100644
--- a/tests/postgres_tests/test_array.py
+++ b/tests/postgres_tests/test_array.py
@@ -1008,6 +1008,32 @@ def test_loading(self):
self.assertEqual(instance.field, [1, 2, None])
+class TestStringSerialization(PostgreSQLSimpleTestCase):
+ field_values = [["Django", "Python", None], ["Джанго", "פייתון", None, "król"]]
+
+ @staticmethod
+ def create_json_data(array_field_value):
+ fields = {"field": json.dumps(array_field_value, ensure_ascii=False)}
+ return json.dumps(
+ [{"model": "postgres_tests.chararraymodel", "pk": None, "fields": fields}]
+ )
+
+ def test_encode(self):
+ for field_value in self.field_values:
+ with self.subTest(field_value=field_value):
+ instance = CharArrayModel(field=field_value)
+ data = serializers.serialize("json", [instance])
+ json_data = self.create_json_data(field_value)
+ self.assertEqual(json.loads(data), json.loads(json_data))
+
+ def test_decode(self):
+ for field_value in self.field_values:
+ with self.subTest(field_value=field_value):
+ json_data = self.create_json_data(field_value)
+ instance = list(serializers.deserialize("json", json_data))[0].object
+ self.assertEqual(instance.field, field_value)
+
+
class TestValidation(PostgreSQLSimpleTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
diff --git a/tests/postgres_tests/test_hstore.py b/tests/postgres_tests/test_hstore.py
index cac3eb742af0..2d19364736be 100644
--- a/tests/postgres_tests/test_hstore.py
+++ b/tests/postgres_tests/test_hstore.py
@@ -297,39 +297,53 @@ class MyModel(PostgreSQLModel):
class TestSerialization(PostgreSQLSimpleTestCase):
- test_data = json.dumps(
- [
- {
- "model": "postgres_tests.hstoremodel",
- "pk": None,
- "fields": {
- "field": json.dumps({"a": "b"}),
- "array_field": json.dumps(
- [
- json.dumps({"a": "b"}),
- json.dumps({"b": "a"}),
- ]
- ),
- },
- }
- ]
- )
+ field_values = [
+ ({"a": "b"}, [{"a": "b"}, {"b": "a"}]),
+ (
+ {"все": "Трурль и Клапауций"},
+ [{"Трурль": "Клапауций"}, {"Клапауций": "Трурль"}],
+ ),
+ ]
+
+ @staticmethod
+ def create_json_data(field_value, array_field_value):
+ fields = {
+ "field": json.dumps(field_value, ensure_ascii=False),
+ "array_field": json.dumps(
+ [json.dumps(item, ensure_ascii=False) for item in array_field_value],
+ ensure_ascii=False,
+ ),
+ }
+ return json.dumps(
+ [{"model": "postgres_tests.hstoremodel", "pk": None, "fields": fields}]
+ )
def test_dumping(self):
- instance = HStoreModel(field={"a": "b"}, array_field=[{"a": "b"}, {"b": "a"}])
- data = serializers.serialize("json", [instance])
- self.assertEqual(json.loads(data), json.loads(self.test_data))
+ for field_value, array_field_value in self.field_values:
+ with self.subTest(field_value=field_value, array_value=array_field_value):
+ instance = HStoreModel(field=field_value, array_field=array_field_value)
+ data = serializers.serialize("json", [instance])
+ json_data = self.create_json_data(field_value, array_field_value)
+ self.assertEqual(json.loads(data), json.loads(json_data))
def test_loading(self):
- instance = list(serializers.deserialize("json", self.test_data))[0].object
- self.assertEqual(instance.field, {"a": "b"})
- self.assertEqual(instance.array_field, [{"a": "b"}, {"b": "a"}])
+ for field_value, array_field_value in self.field_values:
+ with self.subTest(field_value=field_value, array_value=array_field_value):
+ json_data = self.create_json_data(field_value, array_field_value)
+ instance = list(serializers.deserialize("json", json_data))[0].object
+ self.assertEqual(instance.field, field_value)
+ self.assertEqual(instance.array_field, array_field_value)
def test_roundtrip_with_null(self):
- instance = HStoreModel(field={"a": "b", "c": None})
- data = serializers.serialize("json", [instance])
- new_instance = list(serializers.deserialize("json", data))[0].object
- self.assertEqual(instance.field, new_instance.field)
+ for field_value in [
+ {"a": "b", "c": None},
+ {"Енеїда": "Ти знаєш, він який суціга", "Зефір": None},
+ ]:
+ with self.subTest(field_value=field_value):
+ instance = HStoreModel(field=field_value)
+ data = serializers.serialize("json", [instance])
+ new_instance = list(serializers.deserialize("json", data))[0].object
+ self.assertEqual(instance.field, new_instance.field)
class TestValidation(PostgreSQLSimpleTestCase):
| Postgresql: ArrayField with Unicode characters gets serialized as string of "\u XXXX" characters
Description
In ArrayField.value_to_string(self, obj) the value is encoded using JSON.dumps(values), which produces escaped Unicode \u XXXX by default.
For example, an ArrayField with 3 elements ["один", "два", "три"] (1,2,3 in Russian) will produce
["\u043e\u0434\u0438\u043d", "\u0434\u0432\u0430", "\u0442\u0440\u0438"]
While this is not a bug per se, this becomes a nuisance when viewing on result of "dumpdata" management command:
The ArrayField fields will be encoded differently from other text fields.
Perhaps there should be an option to turn on/off the ensure_ascii parameter in JSON.dumps(values, ensure_ascii=option)) ?
The option can be enabled by default, as we do for 'hstore' field, or perhaps enabled conditionally:
in the field settings ArrayField(name='numbers', ascii_only=False)
in settings.py ( ARRAY_FIELD_ENSURE_ASCII )
I will be glad to submit a patch.
| ["Given we made the decision to have JSON serialization default to ensure_ascii=False when dealing with Unicode in #29249 (68fc21b3784aa34c7ba5515ab02ef0c7b6ee856d) I think we should use the same approach here and use ensure_ascii=False for any usage of json.dumps in Field.value_to_string for fields that might include text which includes ArrayField, and HStoreField. I don't think an additional field option to control this behavior and certainly not a setting is warranted here as it should be possible to subclass either field class to override value_to_string and ensure_ascii=False does constitute a more coherent default. Feel free to assign the issue to you and submit a PR with tests for ArrayField and HStoreField.", 1732638340.0]
['\u200bhttps://github.com/django/django/pull/18854', 1732680151.0]
['In ded48546: Fixed #35944 -- Handled serialization of Unicode values in ArrayField and HStoreField.', 1733387518.0] | 1,732,701,709,000 | [] | Feature Request | [
"django/contrib/postgres/fields/array.py:ArrayField.value_to_string",
"django/contrib/postgres/fields/hstore.py:HStoreField.value_to_string"
] | [] | 2 |
django/django | django__django-18850 | ded485464214a3f69b64402b7d82221279f80008 | diff --git a/django/template/loader_tags.py b/django/template/loader_tags.py
index 1874d8c52881..36703b47823a 100644
--- a/django/template/loader_tags.py
+++ b/django/template/loader_tags.py
@@ -242,7 +242,11 @@ def do_block(parser, token):
return BlockNode(block_name, nodelist)
-def construct_relative_path(current_template_name, relative_name):
+def construct_relative_path(
+ current_template_name,
+ relative_name,
+ allow_recursion=False,
+):
"""
Convert a relative path (starting with './' or '../') to the full template
name based on the current_template_name.
@@ -264,7 +268,7 @@ def construct_relative_path(current_template_name, relative_name):
"The relative path '%s' points outside the file hierarchy that "
"template '%s' is in." % (relative_name, current_template_name)
)
- if current_template_name.lstrip("/") == new_name:
+ if not allow_recursion and current_template_name.lstrip("/") == new_name:
raise TemplateSyntaxError(
"The relative path '%s' was translated to template name '%s', the "
"same template in which the tag appears."
@@ -346,7 +350,11 @@ def do_include(parser, token):
options[option] = value
isolated_context = options.get("only", False)
namemap = options.get("with", {})
- bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
+ bits[1] = construct_relative_path(
+ parser.origin.template_name,
+ bits[1],
+ allow_recursion=True,
+ )
return IncludeNode(
parser.compile_filter(bits[1]),
extra_context=namemap,
| diff --git a/tests/template_tests/syntax_tests/test_include.py b/tests/template_tests/syntax_tests/test_include.py
index 3ee99b37981e..be0deee9260d 100644
--- a/tests/template_tests/syntax_tests/test_include.py
+++ b/tests/template_tests/syntax_tests/test_include.py
@@ -330,15 +330,43 @@ def test_include_recursive(self):
],
}
]
- engine = Engine(app_dirs=True)
- t = engine.get_template("recursive_include.html")
- self.assertEqual(
- "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1",
- t.render(Context({"comments": comments}))
- .replace(" ", "")
- .replace("\n", " ")
- .strip(),
- )
+ with self.subTest(template="recursive_include.html"):
+ engine = Engine(app_dirs=True)
+ t = engine.get_template("recursive_include.html")
+ self.assertEqual(
+ "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1",
+ t.render(Context({"comments": comments}))
+ .replace(" ", "")
+ .replace("\n", " ")
+ .strip(),
+ )
+ with self.subTest(template="recursive_relative_include.html"):
+ engine = Engine(app_dirs=True)
+ t = engine.get_template("recursive_relative_include.html")
+ self.assertEqual(
+ "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1",
+ t.render(Context({"comments": comments}))
+ .replace(" ", "")
+ .replace("\n", " ")
+ .strip(),
+ )
+ with self.subTest(template="tmpl"):
+ engine = Engine()
+ template = """
+ Recursion!
+ {% for c in comments %}
+ {{ c.comment }}
+ {% if c.children %}{% include tmpl with comments=c.children %}{% endif %}
+ {% endfor %}
+ """
+ outer_tmpl = engine.from_string("{% include tmpl %}")
+ output = outer_tmpl.render(
+ Context({"tmpl": engine.from_string(template), "comments": comments})
+ )
+ self.assertEqual(
+ "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1",
+ output.replace(" ", "").replace("\n", " ").strip(),
+ )
def test_include_cache(self):
"""
diff --git a/tests/template_tests/templates/recursive_relative_include.html b/tests/template_tests/templates/recursive_relative_include.html
new file mode 100644
index 000000000000..ae49cc0a4367
--- /dev/null
+++ b/tests/template_tests/templates/recursive_relative_include.html
@@ -0,0 +1,7 @@
+Recursion!
+{% for comment in comments %}
+ {{ comment.comment }}
+ {% if comment.children %}
+ {% include "./recursive_relative_include.html" with comments=comment.children %}
+ {% endif %}
+{% endfor %}
| Allow `./` and `../` in paths when recursively including templates
Description
(last modified by Gabriel Nick Pivovarov)
Hi. Currently, when trying to recursively include a Django template within itself using the include tag with a path that contains ./ or ../, Django raises a TemplateSyntaxError. However, using a path that does not contain ./ or ../ does not raise the error. When the error is raised, the debug toolbar describes it like this:
TemplateSyntaxError at /
The relative path ‘“./ul.html”’ was translated to template name ‘app/ul.html’, the same template in which the tag appears.
Here is an example of a template in a Django app called app with the path app/templates/app/ul.html that would produce the error given above:
<ul>
{% for section in sections %}
<li>
<p>{{ section.name }}</p>
{% if section.sections|length != 0 %}
{% include "./ul.html" with sections=section.sections %}
{% endif %}
</li>
{% endfor %}
</ul>
However, replacing the directory ./ul.html with the equivalent app/ul.html makes the error go away (assuming that the project's settings.py specifies APP_DIRS = True and the views and URLs are configured correctly). The actual paths are translated identically in both cases, and the behavior of the include tag should not depend simply on whether or not the path string uses ./ or ../ (or if it should, this is not documented in the Django documentation). Therefore, it seems that this is a bug. The expected behavior is that an error is only raised when recursively using the extends template, not when recursively using the include template.
Contrapositively, it appears that recursively extending a template using the extends tag with a path that does not contain ./ or ../ raises a TemplateDoesNotExist exception.
One possible fix is to modify the django/template/loader_tags.py file (https://github.com/django/django/blob/main/django/template/loader_tags.py) such that the error is raised when a template attempts to extend itself (not when a template attempts to include itself, which would otherwise be valid). The error handling logic in question starts on line 267 of that file within the construct_relative_path function; perhaps it should only be used when called from the do_extends function.
Here is a relevant discussion in the Django forums: https://forum.djangoproject.com/t/template-recursion-why-does-django-not-allow-and/31689
| ['Relative path support was added in #26402 and recursive include support was added in #3544. I think this was missed when working on #26402, replicated. Here is my test: tests/template_tests/syntax_tests/test_include.py diff --git a/tests/template_tests/syntax_tests/test_include.py b/tests/template_tests/syntax_tests/test_include.py index 3ee99b3798..6dafba5040 100644 a b class IncludeTests(SimpleTestCase): 339339 .replace("\\n", " ") 340340 .strip(), 341341 ) 342 t = engine.get_template("recursive_relative_include.html") 343 self.assertEqual( 344 "Recursion! A1 Recursion! B1 B2 B3 Recursion! C1", 345 t.render(Context({"comments": comments})) 346 .replace(" ", "") 347 .replace("\\n", " ") 348 .strip(), 349 ) 342350 343351 def test_include_cache(self): 344352 """ tests/template_tests/templates/recursive_relative_include.html diff --git a/tests/template_tests/templates/recursive_relative_include.html b/tests/template_tests/templates/recursive_relative_include.html index e69de29bb2..ae49cc0a43 100644 a b 1Recursion! 2{% for comment in comments %} 3 {{ comment.comment }} 4 {% if comment.children %} 5 {% include "./recursive_relative_include.html" with comments=comment.children %} 6 {% endif %} 7{% endfor %}', 1718185535.0]
['\u200bhttps://github.com/django/django/pull/18850', 1732629666.0] | 1,732,568,792,000 | [] | Bug Report | [
"django/template/loader_tags.py:construct_relative_path",
"django/template/loader_tags.py:do_include"
] | [] | 2 |
django/django | django__django-18846 | 857b1048d53ebf5fc5581c110e85c212b81ca83a | diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py
index 3e3151f0cf3c..076499b3e2cb 100644
--- a/django/core/management/commands/sqlmigrate.py
+++ b/django/core/management/commands/sqlmigrate.py
@@ -32,10 +32,9 @@ def add_arguments(self, parser):
)
def execute(self, *args, **options):
- # sqlmigrate doesn't support coloring its output but we need to force
- # no_color=True so that the BEGIN/COMMIT statements added by
- # output_transaction don't get colored either.
- options["no_color"] = True
+ # sqlmigrate doesn't support coloring its output, so make the
+ # BEGIN/COMMIT statements added by output_transaction colorless also.
+ self.style.SQL_KEYWORD = lambda noop: noop
return super().execute(*args, **options)
def handle(self, *args, **options):
| diff --git a/tests/migrations/test_commands.py b/tests/migrations/test_commands.py
index 724c88a28fa8..acd7ef4c4e5e 100644
--- a/tests/migrations/test_commands.py
+++ b/tests/migrations/test_commands.py
@@ -9,6 +9,7 @@
from django.apps import apps
from django.core.management import CommandError, call_command
+from django.core.management.base import SystemCheckError
from django.core.management.commands.makemigrations import (
Command as MakeMigrationsCommand,
)
@@ -859,7 +860,7 @@ def test_sqlmigrate_forwards(self):
sqlmigrate outputs forward looking SQL.
"""
out = io.StringIO()
- call_command("sqlmigrate", "migrations", "0001", stdout=out)
+ call_command("sqlmigrate", "migrations", "0001", stdout=out, no_color=True)
lines = out.getvalue().splitlines()
@@ -921,7 +922,14 @@ def test_sqlmigrate_backwards(self):
call_command("migrate", "migrations", verbosity=0)
out = io.StringIO()
- call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
+ call_command(
+ "sqlmigrate",
+ "migrations",
+ "0001",
+ stdout=out,
+ backwards=True,
+ no_color=True,
+ )
lines = out.getvalue().splitlines()
try:
@@ -1098,6 +1106,30 @@ def test_sqlmigrate_unrepresentable(self):
],
)
+ @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
+ def test_sqlmigrate_transaction_keywords_not_colorized(self):
+ out = io.StringIO()
+ with mock.patch(
+ "django.core.management.color.supports_color", lambda *args: True
+ ):
+ call_command("sqlmigrate", "migrations", "0001", stdout=out, no_color=False)
+ self.assertNotIn("\x1b", out.getvalue())
+
+ @override_settings(
+ MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_operations"},
+ INSTALLED_APPS=["django.contrib.auth"],
+ )
+ def test_sqlmigrate_system_checks_colorized(self):
+ with (
+ mock.patch(
+ "django.core.management.color.supports_color", lambda *args: True
+ ),
+ self.assertRaisesMessage(SystemCheckError, "\x1b"),
+ ):
+ call_command(
+ "sqlmigrate", "migrations", "0001", skip_checks=False, no_color=False
+ )
+
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
| sqlmigrate prevents normal colorization of system checks
Description
sqlmigrate forces no_color=True for the sake of not colorizing the sql keywords BEGIN; and COMMIT;, but this has the side effect of preventing system check output from being colorized.
(To reproduce, begin with a project that will emit a system check, and run sqlmigrate.)
Suggesting a small PR to preserve the non-colorization of BEGIN; and COMMIT; while still colorizing system checks.
| 1,732,485,967,000 | [] | Bug Report | [
"django/core/management/commands/sqlmigrate.py:Command.execute"
] | [] | 1 |
|
django/django | django__django-18820 | 4c452cc377f6f43acd90c6e54826ebd2e6219b0d | diff --git a/django/forms/formsets.py b/django/forms/formsets.py
index c8e5893f19b6..c2663154d4a5 100644
--- a/django/forms/formsets.py
+++ b/django/forms/formsets.py
@@ -570,7 +570,12 @@ def formset_factory(
"validate_max": validate_max,
"renderer": renderer,
}
- return type(form.__name__ + "FormSet", (formset,), attrs)
+ form_name = form.__name__
+ if form_name.endswith("Form"):
+ formset_name = form_name + "Set"
+ else:
+ formset_name = form_name + "FormSet"
+ return type(formset_name, (formset,), attrs)
def all_valid(formsets):
| diff --git a/tests/forms_tests/tests/test_formsets.py b/tests/forms_tests/tests/test_formsets.py
index f80c1dc09e35..9f7012a11fd8 100644
--- a/tests/forms_tests/tests/test_formsets.py
+++ b/tests/forms_tests/tests/test_formsets.py
@@ -149,6 +149,12 @@ def test_basic_formset(self):
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
+ def test_formset_name(self):
+ ArticleFormSet = formset_factory(ArticleForm)
+ ChoiceFormSet = formset_factory(Choice)
+ self.assertEqual(ArticleFormSet.__name__, "ArticleFormSet")
+ self.assertEqual(ChoiceFormSet.__name__, "ChoiceFormSet")
+
def test_form_kwargs_formset(self):
"""
Custom kwargs set on the formset instance are passed to the
| About the FormSet class name generated through the formset_factory.
Description
When you define Django Form, what format do you usually name it?
In my case, I usually name it "xxxForm".
The names of the forms used as examples in official documents related to Django Form are the same as my name define format.
# Working with forms docs.
class ContactForm(forms.Form):
subject = forms.CharField(max_length=100)
message = forms.CharField(widget=forms.Textarea)
sender = forms.EmailField()
cc_myself = forms.BooleanField(required=False)
# Working with forms docs.
class ArticleForm(forms.Form):
title = forms.CharField()
pub_date = forms.DateField()
# FormField docs.
class CommentForm(forms.Form):
name = forms.CharField()
url = forms.URLField()
comment = forms.CharField()
The way django people name it may be based on the developers' tendencies or tastes, but I think most of them are built in the format as xxxForm.
The FormSet class I created by passing xxxForm name format to the formset_factory is named as follows.
>>> ArticleFormSet = formset_factory(ArticleForm)
>>> ArticleFormSet.__name__
'ArticleFormFormSet'
The name of the FormSet class created through the formset_factory in the form xxxForm name format is a little strange.
This is because when formset_factory creates a class through type, it adds a static "FormSet" to the form name.
def formset_factory(
form,
formset=BaseFormSet,
extra=1,
can_order=False,
can_delete=False,
max_num=None,
validate_max=False,
min_num=None,
validate_min=False,
absolute_max=None,
can_delete_extra=True,
renderer=None,
):
...
return type(form.__name__ + "FormSet", (formset,), attrs)
I wonder if the format of these names is intended :)
| ['I agree this is a bit odd. \u200bThe test for the Formset.__repr__() uses a form called Choice so the result is ChoiceFormSet as expected. As you pointed out, perhaps to match the documented convention of ending all form names with "Form" we could consider appending "Set" rather than "FormSet" if form.__name__ ends with Form. I can\'t think of any way this would break backward compatibility, and it might make debugging a little less confusing.', 1731782903.0]
['Thank you for your good opinion @Tim Graham ! It was not found because we tested it through Choice in the test. I agree to change "FormSet" to "Set".', 1731786303.0]
['\u200bPR', 1731789286.0] | 1,731,810,719,000 | [] | Feature Request | [
"django/forms/formsets.py:formset_factory"
] | [] | 1 |
django/django | django__django-18796 | 63dbe30d3363715deaf280214d75b03f6d65a571 | diff --git a/django/contrib/admin/sites.py b/django/contrib/admin/sites.py
index dc67262afc50..3399bd87b85a 100644
--- a/django/contrib/admin/sites.py
+++ b/django/contrib/admin/sites.py
@@ -282,7 +282,7 @@ def wrapper(*args, **kwargs):
path("autocomplete/", wrap(self.autocomplete_view), name="autocomplete"),
path("jsi18n/", wrap(self.i18n_javascript, cacheable=True), name="jsi18n"),
path(
- "r/<int:content_type_id>/<path:object_id>/",
+ "r/<path:content_type_id>/<path:object_id>/",
wrap(contenttype_views.shortcut),
name="view_on_site",
),
| diff --git a/tests/admin_views/tests.py b/tests/admin_views/tests.py
index c5d8b8f4f668..3f106f681418 100644
--- a/tests/admin_views/tests.py
+++ b/tests/admin_views/tests.py
@@ -8664,6 +8664,19 @@ def test_custom_admin_site(self):
),
)
+ def test_view_on_site_url_non_integer_ids(self):
+ """The view_on_site URL accepts non-integer ids."""
+ self.assertEqual(
+ reverse(
+ "admin:view_on_site",
+ kwargs={
+ "content_type_id": "37156b6a-8a82",
+ "object_id": "37156b6a-8a83",
+ },
+ ),
+ "/test_admin/admin/r/37156b6a-8a82/37156b6a-8a83/",
+ )
+
@override_settings(ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
| "view on site" URL doesn't accept non-integer ContentType pks
Description
Most admin URL paths use the path converter to allow any type of ID, however, the "view on site" uses the int converter for content_type_id.
This doesn't work on MongoDB which uses ObjectIdAutoField (bson.ObjectId) for all models since AutoField isn't supported.
| 1,731,335,380,000 | [] | Bug Report | [
"django/contrib/admin/sites.py:AdminSite.get_urls"
] | [] | 1 |
|
django/django | django__django-18795 | 2bc43ccbdb28b9d87da172ef119ff3b48e6ff71a | diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py
index d1f31f021135..f9cafdb4bb40 100644
--- a/django/db/models/fields/__init__.py
+++ b/django/db/models/fields/__init__.py
@@ -392,7 +392,10 @@ def _check_db_default(self, databases=None, **kwargs):
if (
self.db_default is NOT_PROVIDED
- or isinstance(self.db_default, Value)
+ or (
+ isinstance(self.db_default, Value)
+ or not hasattr(self.db_default, "resolve_expression")
+ )
or databases is None
):
return []
| diff --git a/tests/invalid_models_tests/test_ordinary_fields.py b/tests/invalid_models_tests/test_ordinary_fields.py
index e30d41113809..1fcf3f708d47 100644
--- a/tests/invalid_models_tests/test_ordinary_fields.py
+++ b/tests/invalid_models_tests/test_ordinary_fields.py
@@ -1207,6 +1207,23 @@ class Model(models.Model):
expected_error = Error(msg=msg, obj=field, id="fields.E012")
self.assertEqual(errors, [expected_error])
+ def test_literals_not_treated_as_expressions(self):
+ """
+ DatabaseFeatures.supports_expression_defaults = False shouldn't
+ prevent non-expression literals (integer, float, boolean, etc.) from
+ being used as database defaults.
+ """
+
+ class Model(models.Model):
+ field = models.FloatField(db_default=1.0)
+
+ field = Model._meta.get_field("field")
+ with unittest.mock.patch.object(
+ connection.features, "supports_expression_defaults", False
+ ):
+ errors = field.check(databases=self.databases)
+ self.assertEqual(errors, [])
+
@isolate_apps("invalid_models_tests")
class GeneratedFieldTests(TestCase):
| System check for default database values with expressions prohibits non-expressions
Description
Since its introduction in Django 5.0, the fields.E011 system check for database backends that have DatabaseFeatures.supports_expression_defaults = False requires literal defaults to be wrapped in Value.
There are a number of test models that have int, float and string db_defaults that will raise a system check error if DatabaseFeatures.supports_expression_defaults = False
since these models don't have required_db_features = {"supports_expression_defaults"}.
I'm working on MongoDB which doesn't support any database defaults, literal or expressions.
| ["I'm not sure if the appropriate solution is to add Value wrapping to all literal defaults in Django's test models or to modify the \u200bisinstance(db_default, Value) check to also accept int/float, str, etc.", 1728031068.0]
["I think either solution could work, but maybe there's a subtlety I don't remember. I think wrapping all literals in Value is most likely to just work.", 1728272037.0]
['The issue I see with requiring Value wrapping is that third-party apps may not know to do it and therefore run into compatibility issues with backends with supports_expression_defaults = False.\u200b For the other solution I suggested, I\'m not sure we can make the isinstance()check exhaustive for all possible non-expression types. Perhaps it\'s more appropriate to replace isinstance(self.db_default, Value) with not hasattr(self.db_default, "resolve_expression") to reject expressions.', 1728376052.0] | 1,731,331,860,000 | [] | Bug Report | [
"django/db/models/fields/__init__.py:Field._check_db_default"
] | [] | 1 |
django/django | django__django-18785 | c12bc980e5b2bb25e447cd8dee550cad767f1ad2 | diff --git a/django/template/base.py b/django/template/base.py
index b974495c9c92..eaca428b10c4 100644
--- a/django/template/base.py
+++ b/django/template/base.py
@@ -57,7 +57,7 @@
from django.template.context import BaseContext
from django.utils.formats import localize
-from django.utils.html import conditional_escape, escape
+from django.utils.html import conditional_escape
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, SafeString, mark_safe
from django.utils.text import get_text_list, smart_split, unescape_string_literal
@@ -247,10 +247,10 @@ def get_exception_info(self, exception, token):
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
- before = escape(self.source[upto:start])
- during = escape(self.source[start:end])
- after = escape(self.source[end:next])
- source_lines.append((num, escape(self.source[upto:next])))
+ before = self.source[upto:start]
+ during = self.source[start:end]
+ after = self.source[end:next]
+ source_lines.append((num, self.source[upto:next]))
upto = next
total = len(source_lines)
| diff --git a/tests/template_tests/templates/test_extends_block_error.html b/tests/template_tests/templates/test_extends_block_error.html
index c4733747a237..8133c93ccd31 100644
--- a/tests/template_tests/templates/test_extends_block_error.html
+++ b/tests/template_tests/templates/test_extends_block_error.html
@@ -1,2 +1,2 @@
{% extends "test_extends_block_error_parent.html" %}
-{% block content %}{% include "missing.html" %}{% endblock %}
+{% block content %}{% include "index.html" %}{% include "missing.html" %}{% include "index.html" %}{% endblock %}
diff --git a/tests/template_tests/tests.py b/tests/template_tests/tests.py
index 14df81669b12..7364c7ca6462 100644
--- a/tests/template_tests/tests.py
+++ b/tests/template_tests/tests.py
@@ -5,7 +5,6 @@
from django.test import SimpleTestCase, override_settings
from django.urls import NoReverseMatch
from django.utils import translation
-from django.utils.html import escape
class TemplateTestMixin:
@@ -157,10 +156,32 @@ def test_render_tag_error_in_extended_block(self):
with self.assertRaises(TemplateDoesNotExist) as cm:
template.render(context)
if self.debug_engine:
+ self.assertEqual(
+ cm.exception.template_debug["before"],
+ '{% block content %}{% include "index.html" %}',
+ )
self.assertEqual(
cm.exception.template_debug["during"],
- escape('{% include "missing.html" %}'),
+ '{% include "missing.html" %}',
+ )
+ self.assertEqual(
+ cm.exception.template_debug["after"],
+ '{% include "index.html" %}{% endblock %}\n',
+ )
+ self.assertEqual(
+ cm.exception.template_debug["source_lines"][0],
+ (1, '{% extends "test_extends_block_error_parent.html" %}\n'),
+ )
+ self.assertEqual(
+ cm.exception.template_debug["source_lines"][1],
+ (
+ 2,
+ '{% block content %}{% include "index.html" %}'
+ '{% include "missing.html" %}'
+ '{% include "index.html" %}{% endblock %}\n',
+ ),
)
+ self.assertEqual(cm.exception.template_debug["source_lines"][2], (3, ""))
def test_super_errors(self):
"""
| Template system: escape() calls in get_exception_info() should be removed
Description
Here there are some calls to escape()
They shouldn't be there: escaping happens in templates for non-safe strings anyway, so there's no need.
And there _is_ a drawback: as an example, the Python Sentry SDK copies this info, but because it gets sent over the wire (as a JSON string) the information that this has already been escaped is lost, and on the receiving end it is escaped again.
This means that on the server-side the Error-tracking, in my case Bugsink will show doubly escaped html code snippets. This looks something like this:
<p class="relative text-slate-600 text-base md:text-xl mb-4 md:mb-5">
Removing the calls to escape simply solves this. Which makes sense: calling escape is simply not the responsibility of this piece of code, it should just stay marked as unsafe and be escape at the edges (on rendering).
| ['Given #33461 added force_escape to message, we might need to move that escaping into the template', 1731045354.0] | 1,731,056,506,000 | [] | Bug Report | [
"django/template/base.py:Template.get_exception_info"
] | [] | 1 |
django/django | django__django-18752 | 2debd018dbc7aba0b98b4c082bbb1fa1d195a47e | diff --git a/django/db/migrations/questioner.py b/django/db/migrations/questioner.py
index e1081ab70ac2..2e6119558188 100644
--- a/django/db/migrations/questioner.py
+++ b/django/db/migrations/questioner.py
@@ -160,8 +160,8 @@ def _ask_default(self, default=""):
else:
try:
return eval(code, {}, {"datetime": datetime, "timezone": timezone})
- except (SyntaxError, NameError) as e:
- self.prompt_output.write("Invalid input: %s" % e)
+ except Exception as e:
+ self.prompt_output.write(f"{e.__class__.__name__}: {e}")
def ask_not_null_addition(self, field_name, model_name):
"""Adding a NOT NULL field to a model."""
| diff --git a/tests/migrations/test_questioner.py b/tests/migrations/test_questioner.py
index c1aebcb22491..ec1013923b06 100644
--- a/tests/migrations/test_questioner.py
+++ b/tests/migrations/test_questioner.py
@@ -61,10 +61,32 @@ def test_questioner_no_default_no_user_entry(self, mock_input):
)
@mock.patch("builtins.input", side_effect=["bad code", "exit"])
- def test_questioner_no_default_bad_user_entry_code(self, mock_input):
+ def test_questioner_no_default_syntax_error(self, mock_input):
with self.assertRaises(SystemExit):
self.questioner._ask_default()
- self.assertIn("Invalid input: ", self.prompt.getvalue())
+ self.assertIn("SyntaxError: invalid syntax", self.prompt.getvalue())
+
+ @mock.patch("builtins.input", side_effect=["datetim", "exit"])
+ def test_questioner_no_default_name_error(self, mock_input):
+ with self.assertRaises(SystemExit):
+ self.questioner._ask_default()
+ self.assertIn(
+ "NameError: name 'datetim' is not defined", self.prompt.getvalue()
+ )
+
+ @mock.patch("builtins.input", side_effect=["datetime.dat", "exit"])
+ def test_questioner_no_default_attribute_error(self, mock_input):
+ with self.assertRaises(SystemExit):
+ self.questioner._ask_default()
+ self.assertIn(
+ "AttributeError: module 'datetime' has no attribute 'dat'",
+ self.prompt.getvalue(),
+ )
+
+ @mock.patch("builtins.input", side_effect=[KeyboardInterrupt()])
+ def test_questioner_no_default_keyboard_interrupt(self, mock_input):
+ with self.assertRaises(KeyboardInterrupt):
+ self.questioner._ask_default()
@mock.patch("builtins.input", side_effect=["", "n"])
def test_questioner_no_default_no_user_entry_boolean(self, mock_input):
| Loop on all errors in `InteractiveMigrationQuestioner._ask_default()`
Description
When changing some fields, the migration questioner can ask for new field default values entered through a Python prompt.
Yesterday, I made a typo which made the makemigrations process exit. This was a little frustrating as there were several fields to fix, and my work answering for the first fields was lost.
Currently, the questioner loops on SyntaxError and NameError (source). My mistake lead to an AttributeError, hence the crash and lost work. I propose we extend this clause to Exception - there's little harm in displaying the error and looping.
Here's a reproducing example. Say you drop null=True from two model fields:
example/models.py
diff --git example/models.py example/models.py
index 816fe37..9743877 100644
22
33
44class Book(models.Model):
5 title = models.CharField(max_length=100, null=True)
6 published_date = models.DateField(null=True)
5 title = models.CharField(max_length=100)
6 published_date = models.DateField()
makemigrations asks you about the first one:
$ ./manage.py makemigrations example
It is impossible to change a nullable field 'title' on book to non-nullable without providing a default. This is because the database needs something to populate existing rows.
Please select a fix:
1) Provide a one-off default now (will be set on all existing rows with a null value for this column)
2) Ignore for now. Existing rows that contain NULL values will have to be handled manually, for example with a RunPython or RunSQL operation.
3) Quit and manually define a default value in models.py.
Select an option: 1
Please enter the default value as valid Python.
The datetime and django.utils.timezone modules are available, so it is possible to provide e.g. timezone.now as a value.
Type 'exit' to exit this prompt
>>> "Unknown book"
Immediately after this, it asks about the second field. Making a typo like .dat() instead of .date() crashes the process:
It is impossible to change a nullable field 'published_date' on book to non-nullable without providing a default. This is because the database needs something to populate existing rows.
Please select a fix:
1) Provide a one-off default now (will be set on all existing rows with a null value for this column)
2) Ignore for now. Existing rows that contain NULL values will have to be handled manually, for example with a RunPython or RunSQL operation.
3) Quit and manually define a default value in models.py.
Select an option: 1
Please enter the default value as valid Python.
The datetime and django.utils.timezone modules are available, so it is possible to provide e.g. timezone.now as a value.
Type 'exit' to exit this prompt
>>> datetime.dat(1970, 1, 1)
Traceback (most recent call last):
File "/..././manage.py", line 21, in <module>
main()
...
File "/.../.venv/.../django/db/migrations/questioner.py", line 162, in _ask_default
return eval(code, {}, {"datetime": datetime, "timezone": timezone})
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<string>", line 1, in <module>
AttributeError: module 'datetime' has no attribute 'dat'. Did you mean: 'date'?
But if you instead made a typo in the datetime name, it would loop:
>>> datetme.date(1970,1,1)
Invalid input: name 'datetme' is not defined
>>>
| ['As long as KeyboardInterrupt(BaseException) is allowed to bubble up (which catching for Exception allows) I agree that not trying to catch a predefined set of exceptions is a better default.', 1730539235.0]
['In 3434fab7: Refs #35882 -- Added test for migration questioner KeyboardInterrupt.', 1731917749.0]
['In e035db1: Fixed #35882 -- Made migration questioner loop on all errors.', 1731917750.0] | 1,730,534,216,000 | [] | Feature Request | [
"django/db/migrations/questioner.py:InteractiveMigrationQuestioner._ask_default"
] | [] | 1 |
django/django | django__django-18653 | e970bb7ca71c00594b42a024a15a8ac007cc2c7a | diff --git a/django/template/base.py b/django/template/base.py
index ee2e145c041a..b974495c9c92 100644
--- a/django/template/base.py
+++ b/django/template/base.py
@@ -533,9 +533,13 @@ def skip_past(self, endtag):
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
+ if self.origin.template_name:
+ origin = repr(self.origin.template_name)
+ else:
+ origin = "the template"
raise self.error(
token,
- "%r must be the first tag in the template." % node,
+ "{%% %s %%} must be the first tag in %s." % (token.contents, origin),
)
if not isinstance(node, TextNode):
nodelist.contains_nontext = True
| diff --git a/tests/template_tests/test_extends.py b/tests/template_tests/test_extends.py
index ce1838654bac..0d2a93468ce3 100644
--- a/tests/template_tests/test_extends.py
+++ b/tests/template_tests/test_extends.py
@@ -1,9 +1,9 @@
import os
-from django.template import Context, Engine, TemplateDoesNotExist
+from django.template import Context, Engine, TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
-from .utils import ROOT
+from .utils import ROOT, setup
RECURSIVE = os.path.join(ROOT, "recursive_templates")
@@ -181,3 +181,17 @@ def test_block_override_in_extended_included_template(self):
)
template = engine.get_template("base.html")
self.assertEqual(template.render(Context({})), "12AB")
+
+ @setup(
+ {"index.html": "{% block content %}B{% endblock %}{% extends 'base.html' %}"}
+ )
+ def test_extends_not_first_tag_in_extended_template(self):
+ msg = "{% extends 'base.html' %} must be the first tag in 'index.html'."
+ with self.assertRaisesMessage(TemplateSyntaxError, msg):
+ self.engine.get_template("index.html")
+
+ def test_extends_not_first_tag_in_extended_template_from_string(self):
+ template_string = "{% block content %}B{% endblock %}{% extends 'base.html' %}"
+ msg = "{% extends 'base.html' %} must be the first tag in the template."
+ with self.assertRaisesMessage(TemplateSyntaxError, msg):
+ Engine().from_string(template_string)
| Improve non-first {% extends %} error message
Description
Currently if you put {% extends %} after another tag, the error looks like:
TemplateSyntaxError: <ExtendsNode: extends 'base.html'> must be the first tag in the template.
ExtendsNode is a leaked internal detail. Showing its repr() is a bit confusing, especially for a beginner programmer.
The message would be clearer if it showed the actual text of the tag:
TemplateSyntaxError: {% extends 'base.html' %} must be the first tag in the template.
| ['So After looking at the code I am a bit stuck on what should be done, the error message is constructed using the __repr__ method of ExtendNode class # The way the error is constructed if node.must_be_first and nodelist.contains_nontext: raise self.error( token, "%r must be the first tag in the template." % node, ) And this is the __repr__ function # __repr__ function for ExtendsNode class def __repr__(self): return "<%s: extends %s>" % (self.__class__.__name__, self.parent_name.token) I don\'t think changing the repr method or hard coding the error message is any better. So what should be done ?', 1727505575.0]
['The repr should not be changed, it’s still used in other contexts like debugging. Change the error message to show/build the extends tag source.', 1727511576.0]
["Just sent a PR to github to add a clear error message. \u200bPR Edit: I didn't notice the first PR, sorry about that.", 1728200226.0] | 1,728,218,012,000 | [] | Feature Request | [
"django/template/base.py:Parser.extend_nodelist"
] | [] | 1 |
django/django | django__django-18508 | 519087819ed6e8bfbe6be208df71a7df19f23a58 | diff --git a/django/db/migrations/operations/models.py b/django/db/migrations/operations/models.py
index 9aad9c809ee7..c8f7a2627a91 100644
--- a/django/db/migrations/operations/models.py
+++ b/django/db/migrations/operations/models.py
@@ -184,6 +184,38 @@ def reduce(self, operation, app_label):
managers=operation.managers,
),
]
+ elif (
+ isinstance(operation, AlterModelTable)
+ and self.name_lower == operation.name_lower
+ ):
+ return [
+ CreateModel(
+ self.name,
+ fields=self.fields,
+ options={
+ **self.options,
+ "db_table": operation.table,
+ },
+ bases=self.bases,
+ managers=self.managers,
+ ),
+ ]
+ elif (
+ isinstance(operation, AlterModelTableComment)
+ and self.name_lower == operation.name_lower
+ ):
+ return [
+ CreateModel(
+ self.name,
+ fields=self.fields,
+ options={
+ **self.options,
+ "db_table_comment": operation.table_comment,
+ },
+ bases=self.bases,
+ managers=self.managers,
+ ),
+ ]
elif (
isinstance(operation, AlterTogetherOptionOperation)
and self.name_lower == operation.name_lower
| diff --git a/tests/migrations/test_optimizer.py b/tests/migrations/test_optimizer.py
index 2acbc7f09f74..3ed30102bf15 100644
--- a/tests/migrations/test_optimizer.py
+++ b/tests/migrations/test_optimizer.py
@@ -154,6 +154,46 @@ def test_create_alter_model_managers(self):
],
)
+ def test_create_alter_model_table(self):
+ self.assertOptimizesTo(
+ [
+ migrations.CreateModel("Foo", fields=[]),
+ migrations.AlterModelTable(
+ name="foo",
+ table="foo",
+ ),
+ ],
+ [
+ migrations.CreateModel(
+ "Foo",
+ fields=[],
+ options={
+ "db_table": "foo",
+ },
+ ),
+ ],
+ )
+
+ def test_create_alter_model_table_comment(self):
+ self.assertOptimizesTo(
+ [
+ migrations.CreateModel("Foo", fields=[]),
+ migrations.AlterModelTableComment(
+ name="foo",
+ table_comment="A lovely table.",
+ ),
+ ],
+ [
+ migrations.CreateModel(
+ "Foo",
+ fields=[],
+ options={
+ "db_table_comment": "A lovely table.",
+ },
+ ),
+ ],
+ )
+
def test_create_model_and_remove_model_options(self):
self.assertOptimizesTo(
[
| Reduce CreateModel + AlterModelTable to CreateModel
Description
Like #33572, migration optimization can reduce CreateModel and AlterModelTable for the same model down to just CreateModel with db_table in options.
| 1,724,324,683,000 | [] | Performance Issue | [
"django/db/migrations/operations/models.py:CreateModel.reduce"
] | [] | 1 |
|
django/django | django__django-18470 | 7380ac57340653854bc2cfe0ed80298cdac6061d | diff --git a/django/contrib/staticfiles/storage.py b/django/contrib/staticfiles/storage.py
index 04a5edbd3086..dfc3137f76fd 100644
--- a/django/contrib/staticfiles/storage.py
+++ b/django/contrib/staticfiles/storage.py
@@ -308,22 +308,23 @@ def post_process(self, paths, dry_run=False, **options):
processed_adjustable_paths[name] = (name, hashed_name, processed)
paths = {path: paths[path] for path in adjustable_paths}
- substitutions = False
-
+ unresolved_paths = []
for i in range(self.max_post_process_passes):
- substitutions = False
+ unresolved_paths = []
for name, hashed_name, processed, subst in self._post_process(
paths, adjustable_paths, hashed_files
):
# Overwrite since hashed_name may be newer.
processed_adjustable_paths[name] = (name, hashed_name, processed)
- substitutions = substitutions or subst
+ if subst:
+ unresolved_paths.append(name)
- if not substitutions:
+ if not unresolved_paths:
break
- if substitutions:
- yield "All", None, RuntimeError("Max post-process passes exceeded.")
+ if unresolved_paths:
+ problem_paths = ", ".join(sorted(unresolved_paths))
+ yield problem_paths, None, RuntimeError("Max post-process passes exceeded.")
# Store the processed paths
self.hashed_files.update(hashed_files)
| diff --git a/tests/staticfiles_tests/project/loop/baz.css b/tests/staticfiles_tests/project/loop/baz.css
new file mode 100644
index 000000000000..4021a1b1e6ca
--- /dev/null
+++ b/tests/staticfiles_tests/project/loop/baz.css
@@ -0,0 +1,3 @@
+body {
+ background-color: #fafafa;
+}
diff --git a/tests/staticfiles_tests/test_storage.py b/tests/staticfiles_tests/test_storage.py
index d6ea03b7446a..9ca4d6255329 100644
--- a/tests/staticfiles_tests/test_storage.py
+++ b/tests/staticfiles_tests/test_storage.py
@@ -186,7 +186,9 @@ def test_import_loop(self):
err = StringIO()
with self.assertRaisesMessage(RuntimeError, "Max post-process passes exceeded"):
call_command("collectstatic", interactive=False, verbosity=0, stderr=err)
- self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue())
+ self.assertEqual(
+ "Post-processing 'bar.css, foo.css' failed!\n\n", err.getvalue()
+ )
self.assertPostCondition()
def test_post_processing(self):
| Improve `RuntimeError: Max post-process passes exceeded.` error
Description
(last modified by Michael)
Having just spend 3 hours trying to debug a collect static issue, to help future travellers, I recommend printing the files that caused max depth to be exceeded, often when a file references itself it causes recursion, but the only clue it currently prints is 'All' which does not narrow down the problem. We can surely do better than that! The proposed change prints the problem files only that keep chaning and can't be resolved:
So instead of getting:
Post-processing 'All' failed!
We get the new and improved:
Post-processing 'jsapp/jsapp/notify.min.js' failed!
Or if more than one file:
Post-processing 'jsapp/jsapp/notify.min.js, jsapp/jsapp/somethingelse.min.js' failed!
I recommend changing from:
# contrib/staticfiles/storage.py line 313: in def post_process(self, paths, dry_run=False, **options):
unresolved_paths = []
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(
paths, adjustable_paths, hashed_files
):
# Overwrite since hashed_name may be newer.
processed_adjustable_paths[name] = (name, hashed_name, processed)
if subst and i == self.max_post_process_passes - 1:
unresolved_paths.append(name)
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
problem_paths_str = ", ".join(unresolved_paths) if unresolved_paths else "All"
yield problem_paths_str, None, RuntimeError("Max post-process passes exceeded.")
I recommend changing to (see the comments on the right of the proposed changed lines 1/5 to 5/5):
# contrib/staticfiles/storage.py line 313: in def post_process(self, paths, dry_run=False, **options):
unresolved_paths = [] # < -- add this line 1/5
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(
paths, adjustable_paths, hashed_files
):
# Overwrite since hashed_name may be newer.
processed_adjustable_paths[name] = (name, hashed_name, processed)
if subst and i == self.max_post_process_passes - 1: # < -- add this line 2/5
unresolved_paths.append(name) # < -- add this line 3/5
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
problem_paths_str = ", ".join(unresolved_paths) if unresolved_paths else "All" # < -- add this line 4/5
yield problem_paths_str, None, RuntimeError("Max post-process passes exceeded.") # < -- change this line 5/5
| ['Adding the suggestion as a diff django/contrib/staticfiles/storage.py diff --git a/django/contrib/staticfiles/storage.py b/django/contrib/staticfiles/storage.py index 04a5edbd30..5c41fd6828 100644 a b class HashedFilesMixin: 309309 310310 paths = {path: paths[path] for path in adjustable_paths} 311311 substitutions = False 312 312 unresolved_paths = [] 313313 for i in range(self.max_post_process_passes): 314314 substitutions = False 315315 for name, hashed_name, processed, subst in self._post_process( … … class HashedFilesMixin: 318318 # Overwrite since hashed_name may be newer. 319319 processed_adjustable_paths[name] = (name, hashed_name, processed) 320320 substitutions = substitutions or subst 321 if subst and i == self.max_post_process_passes - 1: 322 unresolved_paths.append(name) 321323 322324 if not substitutions: 323325 break 324326 325327 if substitutions: 326 yield "All", None, RuntimeError("Max post-process passes exceeded.") 328 problem_paths_str = ", ".join(unresolved_paths) if unresolved_paths else "All" 329 yield problem_paths_str, None, RuntimeError("Max post-process passes exceeded.") 327330 328331 # Store the processed paths 329332 self.hashed_files.update(hashed_files) Appreciate clear error messages so sounds good to me', 1723434306.0] | 1,723,467,922,000 | [] | Feature Request | [
"django/contrib/staticfiles/storage.py:HashedFilesMixin.post_process"
] | [] | 1 |
django/django | django__django-18105 | dd46cab6e076ec766ef0727a16f4219e3e6cb552 | diff --git a/django/contrib/auth/management/__init__.py b/django/contrib/auth/management/__init__.py
index b29a980cb2d5..c40f2aa69dd2 100644
--- a/django/contrib/auth/management/__init__.py
+++ b/django/contrib/auth/management/__init__.py
@@ -46,6 +46,13 @@ def create_permissions(
if not app_config.models_module:
return
+ try:
+ Permission = apps.get_model("auth", "Permission")
+ except LookupError:
+ return
+ if not router.allow_migrate_model(using, Permission):
+ return
+
# Ensure that contenttypes are created for this app. Needed if
# 'django.contrib.auth' is in INSTALLED_APPS before
# 'django.contrib.contenttypes'.
@@ -62,28 +69,15 @@ def create_permissions(
try:
app_config = apps.get_app_config(app_label)
ContentType = apps.get_model("contenttypes", "ContentType")
- Permission = apps.get_model("auth", "Permission")
except LookupError:
return
- if not router.allow_migrate_model(using, Permission):
- return
-
- # This will hold the permissions we're looking for as
- # (content_type, (codename, name))
- searched_perms = []
- # The codenames and ctypes that should exist.
- ctypes = set()
- for klass in app_config.get_models():
- # Force looking up the content types in the current database
- # before creating foreign keys to them.
- ctype = ContentType.objects.db_manager(using).get_for_model(
- klass, for_concrete_model=False
- )
+ models = list(app_config.get_models())
- ctypes.add(ctype)
- for perm in _get_all_permissions(klass._meta):
- searched_perms.append((ctype, perm))
+ # Grab all the ContentTypes.
+ ctypes = ContentType.objects.db_manager(using).get_for_models(
+ *models, for_concrete_models=False
+ )
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
@@ -91,20 +85,22 @@ def create_permissions(
all_perms = set(
Permission.objects.using(using)
.filter(
- content_type__in=ctypes,
+ content_type__in=set(ctypes.values()),
)
.values_list("content_type", "codename")
)
perms = []
- for ct, (codename, name) in searched_perms:
- if (ct.pk, codename) not in all_perms:
- permission = Permission()
- permission._state.db = using
- permission.codename = codename
- permission.name = name
- permission.content_type = ct
- perms.append(permission)
+ for model in models:
+ ctype = ctypes[model]
+ for codename, name in _get_all_permissions(model._meta):
+ if (ctype.pk, codename) not in all_perms:
+ permission = Permission()
+ permission._state.db = using
+ permission.codename = codename
+ permission.name = name
+ permission.content_type = ctype
+ perms.append(permission)
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
| diff --git a/tests/auth_tests/test_management.py b/tests/auth_tests/test_management.py
index 0cc56b6760d7..5765c500346a 100644
--- a/tests/auth_tests/test_management.py
+++ b/tests/auth_tests/test_management.py
@@ -1528,7 +1528,7 @@ class CreatePermissionsMultipleDatabasesTests(TestCase):
def test_set_permissions_fk_to_using_parameter(self):
Permission.objects.using("other").delete()
- with self.assertNumQueries(6, using="other") as captured_queries:
+ with self.assertNumQueries(4, using="other") as captured_queries:
create_permissions(apps.get_app_config("auth"), verbosity=0, using="other")
self.assertIn("INSERT INTO", captured_queries[-1]["sql"].upper())
self.assertGreater(Permission.objects.using("other").count(), 0)
| Optimize post-migrate permission creation
Description
(last modified by Adam Johnson)
I have often seen django.contrib.auth.management.create_permissions() take a significant amount of time in test run profiles. It can be optimized by batching more of its operations, including making ContentTypeManager.get_for_models() use batch creation.
For a comparison, I profiled 1518 of Django’s tests in modules called “models”:
$ python -m cProfile -o profile runtests.py --parallel 1 *model*
$ python -m pstats profile <<< 'sort cumtime
stats 10000' | less
Before optimization stats:
Total 11,938,857 function calls taking 5.349 seconds.
88 calls to create_permissions() take 456ms, ~8.5% of the total time.
After optimization stats:
Total 11,359,071 function calls taking 5.035 seconds.
88 calls to create_permissions() now take 239ms, ~4.7% of the toal time.
217ms and 579,786 function calls saved.
Optimization is limited because the post_migrate signal runs once per migrated app config, so there’s no chance to bulk create *all* content types and permissions at once. If we introduced a new “all migrated apps” signal, that could reduce runtime further by batching all creation.
| ['Accepting following an initial review of the patch which looks sensible. Setting as patch needs improvement due to the comments raised by David and Mariusz.', 1714134972.0]
['I repeated the profiling with the latest version of the patch, on top of the latest main commit. The numbers are similar. Before optimization stats: Total 12,387,798 function calls taking 5.589 seconds. 88 calls to create_permissions() take 483ms, ~8.6% of the total time. After optimization stats: Total 11,797,519 function calls taking 5.207 seconds. 88 calls to create_permissions() take 241ms, ~4.6% of the total time. 590,279 function calls and 242ms saved.', 1714750043.0] | 1,714,126,168,000 | [] | Performance Issue | [
"django/contrib/auth/management/__init__.py:create_permissions"
] | [] | 1 |
django/django | django__django-18104 | ceea86baa36b91d0002911770340a2d7bd4f64b7 | diff --git a/django/db/models/options.py b/django/db/models/options.py
index ed7be7dd7a84..68a7228cbea6 100644
--- a/django/db/models/options.py
+++ b/django/db/models/options.py
@@ -5,6 +5,7 @@
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
+from django.core.signals import setting_changed
from django.db import connections
from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint
from django.db.models.query_utils import PathInfo
@@ -230,6 +231,9 @@ def contribute_to_class(self, cls, name):
self.db_table, connection.ops.max_name_length()
)
+ if self.swappable:
+ setting_changed.connect(self.setting_changed)
+
def _format_names(self, objs):
"""App label/class name interpolation for object names."""
names = {"app_label": self.app_label.lower(), "class": self.model_name}
@@ -399,7 +403,7 @@ def verbose_name_raw(self):
with override(None):
return str(self.verbose_name)
- @property
+ @cached_property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
@@ -427,6 +431,10 @@ def swapped(self):
return swapped_for
return None
+ def setting_changed(self, *, setting, **kwargs):
+ if setting == self.swappable and "swapped" in self.__dict__:
+ del self.swapped
+
@cached_property
def managers(self):
managers = []
| diff --git a/tests/model_meta/models.py b/tests/model_meta/models.py
index bc69d61a59cd..20a75baf4f2e 100644
--- a/tests/model_meta/models.py
+++ b/tests/model_meta/models.py
@@ -166,6 +166,11 @@ class Relating(models.Model):
people_hidden = models.ManyToManyField(Person, related_name="+")
+class Swappable(models.Model):
+ class Meta:
+ swappable = "MODEL_META_TESTS_SWAPPED"
+
+
# ParentListTests models
class CommonAncestor(models.Model):
pass
diff --git a/tests/model_meta/tests.py b/tests/model_meta/tests.py
index 0aa04d760d19..93883b5cf1c3 100644
--- a/tests/model_meta/tests.py
+++ b/tests/model_meta/tests.py
@@ -3,7 +3,7 @@
from django.core.exceptions import FieldDoesNotExist
from django.db.models import CharField, Field, ForeignObjectRel, ManyToManyField
from django.db.models.options import EMPTY_RELATION_TREE, IMMUTABLE_WARNING
-from django.test import SimpleTestCase
+from django.test import SimpleTestCase, override_settings
from .models import (
AbstractPerson,
@@ -16,6 +16,7 @@
Relating,
Relation,
SecondParent,
+ Swappable,
)
from .results import TEST_RESULTS
@@ -233,6 +234,31 @@ def test_gettext(self):
self.assertEqual(Person._meta.verbose_name_raw, "Person")
+class SwappedTests(SimpleTestCase):
+ def test_plain_model_none(self):
+ self.assertIsNone(Relation._meta.swapped)
+
+ def test_unset(self):
+ self.assertIsNone(Swappable._meta.swapped)
+
+ def test_set_and_unset(self):
+ with override_settings(MODEL_META_TESTS_SWAPPED="model_meta.Relation"):
+ self.assertEqual(Swappable._meta.swapped, "model_meta.Relation")
+ self.assertIsNone(Swappable._meta.swapped)
+
+ def test_setting_none(self):
+ with override_settings(MODEL_META_TESTS_SWAPPED=None):
+ self.assertIsNone(Swappable._meta.swapped)
+
+ def test_setting_non_label(self):
+ with override_settings(MODEL_META_TESTS_SWAPPED="not-a-label"):
+ self.assertEqual(Swappable._meta.swapped, "not-a-label")
+
+ def test_setting_self(self):
+ with override_settings(MODEL_META_TESTS_SWAPPED="model_meta.swappable"):
+ self.assertIsNone(Swappable._meta.swapped)
+
+
class RelationTreeTests(SimpleTestCase):
all_models = (Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating)
| Cache Model._meta.swapped
Description
Another candidate for caching, like #35232 before.
The Model._meta.swapped property returns the model that this one has been swapped for. Since most models are not swappable (only auth.User is officially swappable), it returns None in nearly all cases.
I found this property was the most called function in Django when profiling a subset of Django’s tests, with:
$ python -m cProfile -o profile runtests.py --parallel 1 *model*
$ python -m pstats profile <<< 'sort ncalls
stats 10000' | less
This showed 439,484 calls to swapped, taking 29ms of the 5.597s test run, or 0.5% of the total runtime.
After adding @cached_property, this is reduced to 3,653 calls, rounding down to 0ms.
| ["Thank you Adam! I a little on the fence on this one but let's give it a chance. Could you please a test in the PR to cover for the new (non trivial) logic?", 1714115192.0] | 1,714,123,267,000 | [] | Performance Issue | [
"django/db/models/options.py:Options.contribute_to_class"
] | [
"django/db/models/options.py:Options.setting_changed"
] | 1 |
django/django | django__django-18059 | c223d14025dd9ef0d354332c537ed8622a1ec29c | diff --git a/django/utils/log.py b/django/utils/log.py
index fd0cc1bdc1ff..a25b97a7d5a4 100644
--- a/django/utils/log.py
+++ b/django/utils/log.py
@@ -92,6 +92,13 @@ def __init__(self, include_html=False, email_backend=None, reporter_class=None):
)
def emit(self, record):
+ # Early return when no email will be sent.
+ if (
+ not settings.ADMINS
+ # Method not overridden.
+ and self.send_mail.__func__ is AdminEmailHandler.send_mail
+ ):
+ return
try:
request = record.request
subject = "%s (%s IP): %s" % (
| diff --git a/tests/logging_tests/tests.py b/tests/logging_tests/tests.py
index 20d2852fde00..610bdc112434 100644
--- a/tests/logging_tests/tests.py
+++ b/tests/logging_tests/tests.py
@@ -1,6 +1,7 @@
import logging
from contextlib import contextmanager
from io import StringIO
+from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
@@ -470,6 +471,26 @@ def test_emit_no_form_tag(self):
self.assertIn('<div id="traceback">', body_html)
self.assertNotIn("<form", body_html)
+ @override_settings(ADMINS=[])
+ def test_emit_no_admins(self):
+ handler = AdminEmailHandler()
+ record = self.logger.makeRecord(
+ "name",
+ logging.ERROR,
+ "function",
+ "lno",
+ "message",
+ None,
+ None,
+ )
+ with mock.patch.object(
+ handler,
+ "format_subject",
+ side_effect=AssertionError("Should not be called"),
+ ):
+ handler.emit(record)
+ self.assertEqual(len(mail.outbox), 0)
+
class SettingsConfigTest(AdminScriptTestCase):
"""
diff --git a/tests/view_tests/tests/test_defaults.py b/tests/view_tests/tests/test_defaults.py
index 415a9a8c6746..66bc1da16889 100644
--- a/tests/view_tests/tests/test_defaults.py
+++ b/tests/view_tests/tests/test_defaults.py
@@ -123,7 +123,7 @@ def test_bad_request(self):
)
def test_custom_bad_request_template(self):
response = self.client.get("/raises400/")
- self.assertIs(response.wsgi_request, response.context[-1].request)
+ self.assertIs(response.wsgi_request, response.context.request)
@override_settings(
TEMPLATES=[
| AdminEmailHandler wastes work when ADMINS isn’t set
Description
AdminEmailHandler.emit() does a lot of work to assemble the message it passes to mail_admins. If settings.ADMINS is empty, mail_admins() returns instantly, wasting all the message-creation work. It’s quite common to not configure ADMINS, whether in lieu of more advanced tools like Sentry, or during tests.
In a quick benchmark on my M1 Mac Pro on Python 3.11, the overhead is ~2.5ms:
In [1]: import logging
In [2]: logger = logging.getLogger('django')
In [3]: %timeit logger.error("Yada")
...
2.78 ms ± 75.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
In [4]: logger = logging.getLogger('example')
In [5]: %timeit logger.error("Yada")
...
8.37 µs ± 38.9 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)
This can be avoided by adding an initial check to AdminEmailHandler.emit().
| ['Makes sense, thank you!', 1712655969.0] | 1,712,674,027,000 | [] | Performance Issue | [
"django/utils/log.py:AdminEmailHandler.emit"
] | [] | 1 |
django/django | django__django-17984 | 921670c6943e9c532137b7d164885f2d3ab436b8 | diff --git a/django/db/models/fields/related_descriptors.py b/django/db/models/fields/related_descriptors.py
index a2f00eb172c4..c7848ee63a3e 100644
--- a/django/db/models/fields/related_descriptors.py
+++ b/django/db/models/fields/related_descriptors.py
@@ -195,6 +195,9 @@ def get_prefetch_querysets(self, instances, querysets=None):
else:
query = {"%s__in" % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
+ # There can be only one object prefetched for each instance so clear
+ # ordering if the query allows it without side effects.
+ queryset.query.clear_ordering()
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
@@ -469,6 +472,9 @@ def get_prefetch_querysets(self, instances, querysets=None):
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {"%s__in" % self.related.field.name: instances}
queryset = queryset.filter(**query)
+ # There can be only one object prefetched for each instance so clear
+ # ordering if the query allows it without side effects.
+ queryset.query.clear_ordering()
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
| diff --git a/tests/prefetch_related/test_prefetch_related_objects.py b/tests/prefetch_related/test_prefetch_related_objects.py
index ca1f904c5205..eea9a7fff78c 100644
--- a/tests/prefetch_related/test_prefetch_related_objects.py
+++ b/tests/prefetch_related/test_prefetch_related_objects.py
@@ -1,7 +1,7 @@
from django.db.models import Prefetch, prefetch_related_objects
from django.test import TestCase
-from .models import Author, Book, Reader
+from .models import Author, Book, House, Reader, Room
class PrefetchRelatedObjectsTests(TestCase):
@@ -33,6 +33,17 @@ def setUpTestData(cls):
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
+ cls.house1 = House.objects.create(name="b1", address="1")
+ cls.house2 = House.objects.create(name="b2", address="2")
+
+ cls.room1 = Room.objects.create(name="a1", house=cls.house1)
+ cls.room2 = Room.objects.create(name="a2", house=cls.house2)
+
+ cls.house1.main_room = cls.room1
+ cls.house1.save()
+ cls.house2.main_room = cls.room2
+ cls.house2.save()
+
def test_unknown(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertRaises(AttributeError):
@@ -58,20 +69,75 @@ def test_m2m_reverse(self):
def test_foreignkey_forward(self):
authors = list(Author.objects.all())
- with self.assertNumQueries(1):
+ with self.assertNumQueries(1) as ctx:
prefetch_related_objects(authors, "first_book")
+ self.assertNotIn("ORDER BY", ctx.captured_queries[0]["sql"])
with self.assertNumQueries(0):
[author.first_book for author in authors]
+ authors = list(Author.objects.all())
+ with self.assertNumQueries(1) as ctx:
+ prefetch_related_objects(
+ authors,
+ Prefetch("first_book", queryset=Book.objects.order_by("-title")),
+ )
+ self.assertNotIn("ORDER BY", ctx.captured_queries[0]["sql"])
+
def test_foreignkey_reverse(self):
books = list(Book.objects.all())
- with self.assertNumQueries(1):
+ with self.assertNumQueries(1) as ctx:
prefetch_related_objects(books, "first_time_authors")
+ self.assertIn("ORDER BY", ctx.captured_queries[0]["sql"])
with self.assertNumQueries(0):
[list(book.first_time_authors.all()) for book in books]
+ books = list(Book.objects.all())
+ with self.assertNumQueries(1) as ctx:
+ prefetch_related_objects(
+ books,
+ Prefetch(
+ "first_time_authors",
+ queryset=Author.objects.order_by("-name"),
+ ),
+ )
+ self.assertIn("ORDER BY", ctx.captured_queries[0]["sql"])
+
+ def test_one_to_one_forward(self):
+ houses = list(House.objects.all())
+ with self.assertNumQueries(1) as ctx:
+ prefetch_related_objects(houses, "main_room")
+ self.assertNotIn("ORDER BY", ctx.captured_queries[0]["sql"])
+
+ with self.assertNumQueries(0):
+ [house.main_room for house in houses]
+
+ houses = list(House.objects.all())
+ with self.assertNumQueries(1) as ctx:
+ prefetch_related_objects(
+ houses,
+ Prefetch("main_room", queryset=Room.objects.order_by("-name")),
+ )
+ self.assertNotIn("ORDER BY", ctx.captured_queries[0]["sql"])
+
+ def test_one_to_one_reverse(self):
+ rooms = list(Room.objects.all())
+ with self.assertNumQueries(1) as ctx:
+ prefetch_related_objects(rooms, "main_room_of")
+ self.assertNotIn("ORDER BY", ctx.captured_queries[0]["sql"])
+
+ with self.assertNumQueries(0):
+ [room.main_room_of for room in rooms]
+
+ rooms = list(Room.objects.all())
+ with self.assertNumQueries(1) as ctx:
+ prefetch_related_objects(
+ rooms,
+ Prefetch("main_room_of", queryset=House.objects.order_by("-name")),
+ )
+ self.assertNotIn("ORDER BY", ctx.captured_queries[0]["sql"])
+
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
authors = list(Author.objects.all())
| Elide ordering of prefetch querysets for single valued relationships
Description
(last modified by Laurent Lyaudet)
While the ordering of multi-valued relationships must be preserved when prefetching relationships is it unnecessary when using prefetch_related against single valued relationships.
For example, given the following models
class Author(models.Model):
name = models.CharField(max_length=200)
class Meta:
ordering = ["name"]
class Book(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(Author, related_name="books", on_delete=models.CASCADE)
class Meta:
ordering = ["title"]
The ordering of an author's books in Author.objects.prefetch_related("books") has a significance as multiple books might be associated with each authors.
It's not the case for a book's author in Book.objects.prefetch_related("author") through as the relationship can only contain a single author and there is a single way to order the members of a singleton.
In other words sorted([element], key=sort_func) will result in [element] for any sort_func.
This property holds true for all the single valued relationships that the ORM supports (backward and forward 1:1 and forward 1:M) which allows the prefetching to elide any predefined ordering safely to avoid an unnecessary and possibly expensive ordering defined for the related model queryset.
Currently the prefetch of authors will use the order by and add useless load on the DB server.
It would be useful to remove this order by.
#ClimateChangeBrake
| ['Meta.ordering is working as expected, please \u200brefer to its documentation and associated warning Ordering is not a free operation. Each field you add to the ordering incurs a cost to your database. Each foreign key you add will implicitly include all of its default orderings as well. If you don\'t want this implicit behaviour then don\'t use Meta.ordering. If you want to keep using it but not for particular prefetches than use Prefetch objects with a queryset that explicitly calls order_by() to disable ordering. B.objects.prefetch_related(Prefetch("a", A.objects.order_by()))', 1710530134.0]
["Again a fast and without thought answer. I already know for this solution with Prefetch. Continue bashing good ideas because you don't like people giving them. I'll applaude at the end. There is no way it is useful to keep an order by when you do a query SELECT * FROM a WHERE a.id IN (.....100 or more ids here) ORDER BY name; then add the result in the cache of B objects. What you reject without thought yields a speed-up of 10 to 15 % on very big prefetches...", 1710531280.0]
["Please refrain from assuming bad faith from triagers regarding the resolution of this ticket. The provided resolution was a reflected based on your report details and in no way based on your persona. What do you suggest should happen for the thousands of projects out there that rely on prefetch_related to return results in a way that respects Meta.ordering? We can't simply make the behaviour of prefetch_related inconsistent with the normal behaviour or related manager access because it performs poorly when defined against an non-indexed field. I think the documentation warning I referred to is unfortunately all we can do to warn about this behaviour. Either use Meta.ordering and be prepared to deal with its implicit footguns or don't use it and use order_by where appropriate. Whether Meta.ordering should exist in the first place is debatable as it's at the origin of many unexpected behaviour with other features of the ORM (aggregation comes to mind) but making prefetch_related special case it would not only be backward incompatible but inconsistent with how the rest of the framework treats it.", 1710538813.0]
['I spent my night on it but I was able to make a patch, and I don\'t think there will be any regression. Consider the following models in some project TestNoOrderByForForeignKeyPrefetches and some app test_no_order_by models.py file: from django.db import models class A(models.Model): name = models.CharField(max_length=200) class Meta: ordering = ["name"] class B(models.Model): name = models.CharField(max_length=200) a = models.ForeignKey(A, related_name="bs", on_delete=models.CASCADE) class Meta: ordering = ["name"] Then consider the following command TestNoOrderByForForeignKeyPrefetches/test_no_order_by/management/commands/test_no_order_by_command.py : from django.core.management.base import BaseCommand from django.db import connection from django.db.models import Prefetch, QuerySet, RawQuerySet from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ReverseOneToOneDescriptor, ) from TestNoOrderByForForeignKeyPrefetches.test_no_order_by.models import A, B old_prefetch_init = Prefetch.__init__ def new_prefetch_init(self, *args, **kwargs): result = old_prefetch_init(self, *args, **kwargs) if self.queryset is not None: self.queryset._do_not_modify_order_by = True return result Prefetch.__init__ = new_prefetch_init old_get_prefetch_querysets_forward_many_to_one = ForwardManyToOneDescriptor.get_prefetch_querysets old_get_prefetch_querysets_reverse_one_to_one = ReverseOneToOneDescriptor.get_prefetch_querysets def get_prefetch_querysets_forward_many_to_one(self, *args, **kwargs): result = old_get_prefetch_querysets_forward_many_to_one(self, *args, **kwargs) if not hasattr(result[0], \'_do_not_modify_order_by\'): result = (result[0].order_by(), *result[1:]) return result def get_prefetch_querysets_reverse_one_to_one(self, *args, **kwargs): result = old_get_prefetch_querysets_reverse_one_to_one(self, *args, **kwargs) if not hasattr(result[0], \'_do_not_modify_order_by\'): result = (result[0].order_by(), *result[1:]) return result ForwardManyToOneDescriptor.get_prefetch_querysets = get_prefetch_querysets_forward_many_to_one ReverseOneToOneDescriptor.get_prefetch_querysets = get_prefetch_querysets_reverse_one_to_one old_clone_queryset = QuerySet._clone def new_clone_queryset(self): result = old_clone_queryset(self) if hasattr(self, \'_do_not_modify_order_by\'): result._do_not_modify_order_by = True return result QuerySet._clone = new_clone_queryset old_clone_raw_queryset = RawQuerySet._clone def new_clone_raw_queryset(self): result = old_clone_raw_queryset(self) if hasattr(self, \'_do_not_modify_order_by\'): result._do_not_modify_order_by = True return result RawQuerySet._clone = new_clone_raw_queryset class Command(BaseCommand): help = "Test" def handle(self, *args, **options): B.objects.all().delete() A.objects.all().delete() a1 = A.objects.create(name="a1") a2 = A.objects.create(name="a2") a3 = A.objects.create(name="a3") a4 = A.objects.create(name="a4") a5 = A.objects.create(name="a5") a6 = A.objects.create(name="a6") a7 = A.objects.create(name="a7") b1 = B.objects.create(a=a1, name="b1") b2 = B.objects.create(a=a2, name="b2") b3 = B.objects.create(a=a3, name="b3") b4 = B.objects.create(a=a4, name="b4") b5 = B.objects.create(a=a5, name="b5") b6 = B.objects.create(a=a6, name="b6") b7 = B.objects.create(a=a7, name="b7") bs = list(B.objects.all().prefetch_related("a")) a_s = list(A.objects.all().prefetch_related("bs")) bs = list(B.objects.all().prefetch_related( Prefetch( "a", queryset=A.objects.order_by("-name") ), )) a_s = list(A.objects.all().prefetch_related( Prefetch( "bs", queryset=B.objects.order_by("-name") ), )) print(connection.queries) If you launch the command with python3 manage.py test_no_order_by_command, you will see that there are 8 SELECT after the 14 INSERT and that there is only 7 ORDER BY on them as requested. I will prepare a PR.', 1710549223.0]
['\u200bPR', 1710551387.0]
['Here is the PR, I will improve it when requested : \u200bhttps://github.com/django/django/pull/17984 :) I still have doubts about keeping the order by even with manual Prefetch. I need to verify if it is possible to bypass the filter by id.', 1710555004.0]
["Laurent, thanks for this patch, however I agree with Simon. I appreciate you'd like to reopen the ticket, but please \u200bfollow the triaging guidelines with regards to wontfix tickets and take this to DevelopersMailingList.", 1710567356.0] | 1,710,572,814,000 | [] | Performance Issue | [
"django/db/models/fields/related_descriptors.py:ForwardManyToOneDescriptor.get_prefetch_querysets",
"django/db/models/fields/related_descriptors.py:ReverseOneToOneDescriptor.get_prefetch_querysets"
] | [] | 2 |
django/django | django__django-17904 | fad334e1a9b54ea1acb8cce02a25934c5acfe99f | diff --git a/django/urls/resolvers.py b/django/urls/resolvers.py
index 5f9941dd65e8..1b26aed8c112 100644
--- a/django/urls/resolvers.py
+++ b/django/urls/resolvers.py
@@ -128,9 +128,6 @@ def get_ns_resolver(ns_pattern, resolver, converters):
class LocaleRegexDescriptor:
- def __init__(self, attr):
- self.attr = attr
-
def __get__(self, instance, cls=None):
"""
Return a compiled regular expression based on the active language.
@@ -140,15 +137,23 @@ def __get__(self, instance, cls=None):
# As a performance optimization, if the given regex string is a regular
# string (not a lazily-translated string proxy), compile it once and
# avoid per-language compilation.
- pattern = getattr(instance, self.attr)
+ pattern = instance._regex
if isinstance(pattern, str):
- instance.__dict__["regex"] = instance._compile(pattern)
+ instance.__dict__["regex"] = self._compile(pattern)
return instance.__dict__["regex"]
language_code = get_language()
if language_code not in instance._regex_dict:
- instance._regex_dict[language_code] = instance._compile(str(pattern))
+ instance._regex_dict[language_code] = self._compile(str(pattern))
return instance._regex_dict[language_code]
+ def _compile(self, regex):
+ try:
+ return re.compile(regex)
+ except re.error as e:
+ raise ImproperlyConfigured(
+ f'"{regex}" is not a valid regular expression: {e}'
+ ) from e
+
class CheckURLMixin:
def describe(self):
@@ -164,12 +169,11 @@ def _check_pattern_startswith_slash(self):
"""
Check that the pattern does not begin with a forward slash.
"""
- regex_pattern = self.regex.pattern
if not settings.APPEND_SLASH:
# Skip check as it can be useful to start a URL pattern with a slash
# when APPEND_SLASH=False.
return []
- if regex_pattern.startswith(("/", "^/", "^\\/")) and not regex_pattern.endswith(
+ if self._regex.startswith(("/", "^/", "^\\/")) and not self._regex.endswith(
"/"
):
warning = Warning(
@@ -186,7 +190,7 @@ def _check_pattern_startswith_slash(self):
class RegexPattern(CheckURLMixin):
- regex = LocaleRegexDescriptor("_regex")
+ regex = LocaleRegexDescriptor()
def __init__(self, regex, name=None, is_endpoint=False):
self._regex = regex
@@ -219,8 +223,7 @@ def check(self):
return warnings
def _check_include_trailing_dollar(self):
- regex_pattern = self.regex.pattern
- if regex_pattern.endswith("$") and not regex_pattern.endswith(r"\$"):
+ if self._regex.endswith("$") and not self._regex.endswith(r"\$"):
return [
Warning(
"Your URL pattern {} uses include with a route ending with a '$'. "
@@ -232,15 +235,6 @@ def _check_include_trailing_dollar(self):
else:
return []
- def _compile(self, regex):
- """Compile and return the given regular expression."""
- try:
- return re.compile(regex)
- except re.error as e:
- raise ImproperlyConfigured(
- '"%s" is not a valid regular expression: %s' % (regex, e)
- ) from e
-
def __str__(self):
return str(self._regex)
@@ -250,7 +244,7 @@ def __str__(self):
)
-def _route_to_regex(route, is_endpoint=False):
+def _route_to_regex(route, is_endpoint):
"""
Convert a path pattern into a regular expression. Return the regular
expression and a dictionary mapping the capture names to the converters.
@@ -296,15 +290,36 @@ def _route_to_regex(route, is_endpoint=False):
return "".join(parts), converters
+class LocaleRegexRouteDescriptor:
+ def __get__(self, instance, cls=None):
+ """
+ Return a compiled regular expression based on the active language.
+ """
+ if instance is None:
+ return self
+ # As a performance optimization, if the given route is a regular string
+ # (not a lazily-translated string proxy), compile it once and avoid
+ # per-language compilation.
+ if isinstance(instance._route, str):
+ instance.__dict__["regex"] = re.compile(instance._regex)
+ return instance.__dict__["regex"]
+ language_code = get_language()
+ if language_code not in instance._regex_dict:
+ instance._regex_dict[language_code] = re.compile(
+ _route_to_regex(str(instance._route), instance._is_endpoint)[0]
+ )
+ return instance._regex_dict[language_code]
+
+
class RoutePattern(CheckURLMixin):
- regex = LocaleRegexDescriptor("_route")
+ regex = LocaleRegexRouteDescriptor()
def __init__(self, route, name=None, is_endpoint=False):
self._route = route
+ self._regex, self.converters = _route_to_regex(str(route), is_endpoint)
self._regex_dict = {}
self._is_endpoint = is_endpoint
self.name = name
- self.converters = _route_to_regex(str(route), is_endpoint)[1]
def match(self, path):
match = self.regex.search(path)
@@ -356,9 +371,6 @@ def _check_pattern_unmatched_angle_brackets(self):
warnings.append(Warning(msg % (self.describe(), "<"), id="urls.W010"))
return warnings
- def _compile(self, route):
- return re.compile(_route_to_regex(route, self._is_endpoint)[0])
-
def __str__(self):
return str(self._route)
| diff --git a/tests/i18n/patterns/locale/en/LC_MESSAGES/django.mo b/tests/i18n/patterns/locale/en/LC_MESSAGES/django.mo
index ec7644b504c3..b1f63b103106 100644
Binary files a/tests/i18n/patterns/locale/en/LC_MESSAGES/django.mo and b/tests/i18n/patterns/locale/en/LC_MESSAGES/django.mo differ
diff --git a/tests/i18n/patterns/locale/en/LC_MESSAGES/django.po b/tests/i18n/patterns/locale/en/LC_MESSAGES/django.po
index 9a14a80ceb8f..ac98eb5f08aa 100644
--- a/tests/i18n/patterns/locale/en/LC_MESSAGES/django.po
+++ b/tests/i18n/patterns/locale/en/LC_MESSAGES/django.po
@@ -7,31 +7,53 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-06-15 11:33+0200\n"
+"POT-Creation-Date: 2024-03-01 21:18+0000\n"
"PO-Revision-Date: 2011-06-14 16:16+0100\n"
"Last-Translator: Jannis Leidel <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
+"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Language: \n"
#: urls/default.py:11
-msgid "^translated/$"
+#, fuzzy
+#| msgid "^translated/$"
+msgid "translated/"
msgstr "^translated/$"
#: urls/default.py:12
+#, fuzzy
+#| msgid "^translated/$"
+msgid "^translated-regex/$"
+msgstr "^translated/$"
+
+#: urls/default.py:14
msgid "^translated/(?P<slug>[\\w-]+)/$"
msgstr "^translated/(?P<slug>[\\w-]+)/$"
-#: urls/default.py:17
+#: urls/default.py:25
+msgid "^with-arguments/(?P<argument>[\\w-]+)/(?:(?P<optional>[\\w-]+).html)?$"
+msgstr ""
+
+#: urls/default.py:29
msgid "^users/$"
msgstr "^users/$"
-#: urls/default.py:18 urls/wrong.py:7
+#: urls/default.py:31 urls/wrong.py:7
msgid "^account/"
msgstr "^account/"
#: urls/namespace.py:9 urls/wrong_namespace.py:10
msgid "^register/$"
msgstr "^register/$"
+
+#: urls/namespace.py:10
+msgid "^register-without-slash$"
+msgstr ""
+
+#: urls/namespace.py:11
+#, fuzzy
+#| msgid "^register/$"
+msgid "register-as-path/"
+msgstr "^register/$"
diff --git a/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.mo b/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.mo
index 5eac50466cb7..544bfdbfc664 100644
Binary files a/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.mo and b/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.mo differ
diff --git a/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.po b/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.po
index a938e3371dea..aa58c506d562 100644
--- a/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.po
+++ b/tests/i18n/patterns/locale/nl/LC_MESSAGES/django.po
@@ -7,29 +7,37 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-06-15 11:33+0200\n"
+"POT-Creation-Date: 2024-03-01 21:18+0000\n"
"PO-Revision-Date: 2011-06-14 16:16+0100\n"
"Last-Translator: Jannis Leidel <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
+"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Language: \n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
#: urls/default.py:11
-msgid "^translated/$"
-msgstr "^vertaald/$"
+msgid "translated/"
+msgstr "vertaald/"
#: urls/default.py:12
+msgid "^translated-regex/$"
+msgstr "^vertaald-regex/$"
+
+#: urls/default.py:14
msgid "^translated/(?P<slug>[\\w-]+)/$"
msgstr "^vertaald/(?P<slug>[\\w-]+)/$"
-#: urls/default.py:17
+#: urls/default.py:25
+msgid "^with-arguments/(?P<argument>[\\w-]+)/(?:(?P<optional>[\\w-]+).html)?$"
+msgstr ""
+
+#: urls/default.py:29
msgid "^users/$"
msgstr "^gebruikers/$"
-#: urls/default.py:18 urls/wrong.py:7
+#: urls/default.py:31 urls/wrong.py:7
msgid "^account/"
msgstr "^profiel/"
@@ -37,6 +45,10 @@ msgstr "^profiel/"
msgid "^register/$"
msgstr "^registreren/$"
-#: urls/namespace.py:12
+#: urls/namespace.py:10
+msgid "^register-without-slash$"
+msgstr ""
+
+#: urls/namespace.py:11
msgid "register-as-path/"
msgstr "registreren-als-pad/"
diff --git a/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.mo b/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.mo
index 1d7b346c278c..8e36cb206409 100644
Binary files a/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.mo and b/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.mo differ
diff --git a/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.po b/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.po
index fd3388e4b013..464d14bc1f75 100644
--- a/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.po
+++ b/tests/i18n/patterns/locale/pt_BR/LC_MESSAGES/django.po
@@ -7,32 +7,50 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-06-15 11:34+0200\n"
+"POT-Creation-Date: 2024-03-01 21:18+0000\n"
"PO-Revision-Date: 2011-06-14 16:17+0100\n"
"Last-Translator: Jannis Leidel <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
+"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Language: \n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
#: urls/default.py:11
-msgid "^translated/$"
-msgstr "^traduzidos/$"
+msgid "translated/"
+msgstr "traduzidos/"
#: urls/default.py:12
+msgid "^translated-regex/$"
+msgstr "^traduzidos-regex/$"
+
+#: urls/default.py:14
msgid "^translated/(?P<slug>[\\w-]+)/$"
msgstr "^traduzidos/(?P<slug>[\\w-]+)/$"
-#: urls/default.py:17
+#: urls/default.py:25
+msgid "^with-arguments/(?P<argument>[\\w-]+)/(?:(?P<optional>[\\w-]+).html)?$"
+msgstr ""
+
+#: urls/default.py:29
msgid "^users/$"
msgstr "^usuarios/$"
-#: urls/default.py:18 urls/wrong.py:7
+#: urls/default.py:31 urls/wrong.py:7
msgid "^account/"
msgstr "^conta/"
#: urls/namespace.py:9 urls/wrong_namespace.py:10
msgid "^register/$"
msgstr "^registre-se/$"
+
+#: urls/namespace.py:10
+msgid "^register-without-slash$"
+msgstr ""
+
+#: urls/namespace.py:11
+#, fuzzy
+#| msgid "^register/$"
+msgid "register-as-path/"
+msgstr "^registre-se/$"
diff --git a/tests/i18n/patterns/tests.py b/tests/i18n/patterns/tests.py
index e2fee904b149..bd329e69f8e7 100644
--- a/tests/i18n/patterns/tests.py
+++ b/tests/i18n/patterns/tests.py
@@ -134,6 +134,9 @@ class URLTranslationTests(URLTestCaseBase):
def test_no_prefix_translated(self):
with translation.override("en"):
self.assertEqual(reverse("no-prefix-translated"), "/translated/")
+ self.assertEqual(
+ reverse("no-prefix-translated-regex"), "/translated-regex/"
+ )
self.assertEqual(
reverse("no-prefix-translated-slug", kwargs={"slug": "yeah"}),
"/translated/yeah/",
@@ -141,6 +144,7 @@ def test_no_prefix_translated(self):
with translation.override("nl"):
self.assertEqual(reverse("no-prefix-translated"), "/vertaald/")
+ self.assertEqual(reverse("no-prefix-translated-regex"), "/vertaald-regex/")
self.assertEqual(
reverse("no-prefix-translated-slug", kwargs={"slug": "yeah"}),
"/vertaald/yeah/",
@@ -148,6 +152,9 @@ def test_no_prefix_translated(self):
with translation.override("pt-br"):
self.assertEqual(reverse("no-prefix-translated"), "/traduzidos/")
+ self.assertEqual(
+ reverse("no-prefix-translated-regex"), "/traduzidos-regex/"
+ )
self.assertEqual(
reverse("no-prefix-translated-slug", kwargs={"slug": "yeah"}),
"/traduzidos/yeah/",
@@ -180,7 +187,7 @@ def test_translate_url_utility(self):
"/nl/profiel/registreren-als-pad/",
)
self.assertEqual(translation.get_language(), "en")
- # URL with parameters.
+ # re_path() URL with parameters.
self.assertEqual(
translate_url("/en/with-arguments/regular-argument/", "nl"),
"/nl/with-arguments/regular-argument/",
@@ -191,6 +198,11 @@ def test_translate_url_utility(self):
),
"/nl/with-arguments/regular-argument/optional.html",
)
+ # path() URL with parameter.
+ self.assertEqual(
+ translate_url("/en/path-with-arguments/regular-argument/", "nl"),
+ "/nl/path-with-arguments/regular-argument/",
+ )
with translation.override("nl"):
self.assertEqual(translate_url("/nl/gebruikers/", "en"), "/en/users/")
diff --git a/tests/i18n/patterns/urls/default.py b/tests/i18n/patterns/urls/default.py
index 418e9f568568..090b92eeca66 100644
--- a/tests/i18n/patterns/urls/default.py
+++ b/tests/i18n/patterns/urls/default.py
@@ -8,7 +8,8 @@
urlpatterns = [
path("not-prefixed/", view, name="not-prefixed"),
path("not-prefixed-include/", include("i18n.patterns.urls.included")),
- re_path(_(r"^translated/$"), view, name="no-prefix-translated"),
+ path(_("translated/"), view, name="no-prefix-translated"),
+ re_path(_(r"^translated-regex/$"), view, name="no-prefix-translated-regex"),
re_path(
_(r"^translated/(?P<slug>[\w-]+)/$"),
view,
@@ -25,6 +26,11 @@
view,
name="with-arguments",
),
+ path(
+ _("path-with-arguments/<str:argument>/"),
+ view,
+ name="path-with-arguments",
+ ),
re_path(_(r"^users/$"), view, name="users"),
re_path(
_(r"^account/"), include("i18n.patterns.urls.namespace", namespace="account")
| Stop URL system checks from compiling regular expressions
Description
(last modified by Adam Johnson)
Continuing my project to optimize the system checks, I found some good optimizations under django.core.checks.urls.check_url_config(), which showed up as quite expensive in profiling.
Looking down the call tree, it seems the most expensive part of this process is compiling the each URL pattern’s regular expression. This is unnecessary work though, as the checks only need *uncompiled* regular expression patterns. Using the compiled versions “undoes” the lazy-compile optimization that LocaleRegexDescriptor was created for in #27453 / 6e222dae5636f875c19ec66f730a4241abe33faa, at least for any process that runs checks.
The checks were fetching the uncompiled pattern with self.regex.pattern, which makse LocaleRegexDescriptor compile the pattern only to then read the uncompiled pattern from its pattern attribute.
Additionally, RoutePattern was calling _route_to_regex() twice to fetch its two result variables in different places: once in __init__() and again in _compile() (in the non-translated case). This function has non-trivial cost so avoiding double execution is worth it.
Before optimization stats:
check_url_config took 67ms, or ~10% of the time for checks.
LocaleRegexDescriptor.__get__() showed 965 calls taking ~60ms, ~9% of the total runtime of checks.
re.compile() showed 741 calls for 94ms.
_route_to_regex() had 1900 calls taking 18ms (~2.6% of the total runtime).
After optimization:
check_url_config() took 5ms, ~0.9% of the new total runtime.
The calls to LocaleRegexDescriptor.__get__ are gone.
re.compile() drops to 212 calls from other sites, for a total of 51ms.
_route_to_regex() drops to the expected 950 calls, taking half the time at 9ms.
(I also tried benchmarking with django-asv but got inconclusive results where change was within the error margins.)
| ['Tentatively accepted.', 1709000987.0] | 1,708,802,262,000 | [] | Performance Issue | [
"django/urls/resolvers.py:LocaleRegexDescriptor.__init__",
"django/urls/resolvers.py:LocaleRegexDescriptor.__get__",
"django/urls/resolvers.py:CheckURLMixin._check_pattern_startswith_slash",
"django/urls/resolvers.py:RegexPattern._check_include_trailing_dollar",
"django/urls/resolvers.py:RegexPattern._compile",
"django/urls/resolvers.py:_route_to_regex",
"django/urls/resolvers.py:RoutePattern.__init__",
"django/urls/resolvers.py:RoutePattern._compile"
] | [
"django/urls/resolvers.py:LocaleRegexDescriptor._compile",
"django/urls/resolvers.py:LocaleRegexRouteDescriptor.__get__"
] | 8 |
django/django | django__django-17885 | 03c0a3de722c4a7de9f3edfeb26417ebc8b90fe9 | diff --git a/django/contrib/admin/options.py b/django/contrib/admin/options.py
index 6d5c0708a322..78063a134d2a 100644
--- a/django/contrib/admin/options.py
+++ b/django/contrib/admin/options.py
@@ -41,6 +41,7 @@
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
+from django.db.models.functions import Cast
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet,
@@ -1207,9 +1208,33 @@ def construct_search(field_name):
may_have_duplicates = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
- orm_lookups = [
- construct_search(str(search_field)) for search_field in search_fields
- ]
+ str_annotations = {}
+ orm_lookups = []
+ for field in search_fields:
+ if field.endswith("__exact"):
+ field_name = field.rsplit("__exact", 1)[0]
+ try:
+ field_obj = queryset.model._meta.get_field(field_name)
+ except FieldDoesNotExist:
+ lookup = construct_search(field)
+ orm_lookups.append(lookup)
+ continue
+ # Add string cast annotations for non-string exact lookups.
+ if not isinstance(field_obj, (models.CharField, models.TextField)):
+ str_annotations[f"{field_name}_str"] = Cast(
+ field_name, output_field=models.CharField()
+ )
+ orm_lookups.append(f"{field_name}_str__exact")
+ else:
+ lookup = construct_search(field)
+ orm_lookups.append(lookup)
+ else:
+ lookup = construct_search(str(field))
+ orm_lookups.append(lookup)
+
+ if str_annotations:
+ queryset = queryset.annotate(**str_annotations)
+
term_queries = []
for bit in smart_split(search_term):
if bit.startswith(('"', "'")) and bit[0] == bit[-1]:
| diff --git a/tests/admin_changelist/admin.py b/tests/admin_changelist/admin.py
index 349ef7d465b6..d9dc498e8427 100644
--- a/tests/admin_changelist/admin.py
+++ b/tests/admin_changelist/admin.py
@@ -48,6 +48,7 @@ class ChildAdmin(admin.ModelAdmin):
list_display = ["name", "parent"]
list_per_page = 10
list_filter = ["parent", "age"]
+ search_fields = ["age__exact", "name__exact"]
def get_queryset(self, request):
return super().get_queryset(request).select_related("parent")
diff --git a/tests/admin_changelist/tests.py b/tests/admin_changelist/tests.py
index d8055a809be2..a823a72f7d7f 100644
--- a/tests/admin_changelist/tests.py
+++ b/tests/admin_changelist/tests.py
@@ -860,6 +860,25 @@ def test_custom_lookup_with_pk_shortcut(self):
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abcd])
+ def test_search_with_exact_lookup_for_non_string_field(self):
+ child = Child.objects.create(name="Asher", age=11)
+ model_admin = ChildAdmin(Child, custom_site)
+
+ for search_term, expected_result in [
+ ("11", [child]),
+ ("Asher", [child]),
+ ("1", []),
+ ("A", []),
+ ("random", []),
+ ]:
+ request = self.factory.get("/", data={SEARCH_VAR: search_term})
+ request.user = self.superuser
+ with self.subTest(search_term=search_term):
+ # 1 query for filtered result, 1 for filtered count, 1 for total count.
+ with self.assertNumQueries(3):
+ cl = model_admin.get_changelist_instance(request)
+ self.assertCountEqual(cl.queryset, expected_result)
+
def test_no_distinct_for_m2m_in_list_filter_without_params(self):
"""
If a ManyToManyField is in list_filter but isn't in any lookup params,
| Make ModelAdmin.search_fields raise data errors on __exact lookups for non-string fields.
Description
Currently, all queries are done as string lookups which gives something like this on PostgreSQL, for example: ("admin_views_pluggablesearchperson"."age"::text) = UPPER(20)). It would be more efficient if the admin cast the search value to an integer and used that for the query.
| ['PR: \u200bhttps://github.com/django/django/pull/6219', 1456649223.0]
['I think we should favor the approach suggested in #26184 instead.', 1456652736.0]
["I agree, let's revisit this if it doesn't.", 1457113813.0]
["As far as I tested, you can now use something like search_fields = ['votes__exact'] to do the appropriate query, however, if a non-integer search term is entered, the page crashes with ValueError invalid literal for int() with base 10: 'abc' from IntegerField.get_prep_value(). Probably that exception should be caught in ModelAdmin.get_search_results() and no results returned.", 1532099527.0]
['PR: \u200bhttps://github.com/django/django/pull/10339', 1535321698.0]
["Replying to Tim Graham: As far as I tested, you can now use something like search_fields = ['votes__exact'] to do the appropriate query, however, if a non-integer search term is entered, the page crashes with ValueError invalid literal for int() with base 10: 'abc' from IntegerField.get_prep_value(). Probably that exception should be caught in ModelAdmin.get_search_results() and no results returned. We have to consider that there might be multiple search fields of mixed types though, i.e. search_fields = ['votes__exact', 'title', 'text__contains'], just so the matches for other fields does not get discarded just because one of the fields threw an error. Instead of trying to catch these errors from .filter() as in the previous patch, we could run to_python() for each search field in order to test if that particular field would raise an exception later on when the query is run.", 1593498353.0]
['#34191 was a duplicate for DecimalField.', 1669781193.0]
['Replying to Mariusz Felisiak: #34191 was a duplicate for DecimalField. You can use iexact instead of exact for non-string fields, I think, it even makes sense, because the word exact means complete equality even with type. It works for me.', 1671817064.0]
['can i work on this?', 1706431804.0]
['PR: \u200bhttps://github.com/django/django/pull/17885', 1708402891.0] | 1,708,379,069,000 | [
"selenium"
] | Performance Issue | [
"django/contrib/admin/options.py:ModelAdmin.get_search_results"
] | [] | 1 |
django/django | django__django-17874 | 28a3fbe0048883fdd5cefd6ffecb88e351121891 | diff --git a/django/db/models/options.py b/django/db/models/options.py
index 9b3106f67ef6..e63a81c2d8e7 100644
--- a/django/db/models/options.py
+++ b/django/db/models/options.py
@@ -395,9 +395,11 @@ def can_migrate(self, connection):
)
return True
- @property
+ @cached_property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
+ if isinstance(self.verbose_name, str):
+ return self.verbose_name
with override(None):
return str(self.verbose_name)
| diff --git a/tests/model_meta/models.py b/tests/model_meta/models.py
index 6da62be2ac26..bc69d61a59cd 100644
--- a/tests/model_meta/models.py
+++ b/tests/model_meta/models.py
@@ -1,6 +1,7 @@
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
+from django.utils.translation import gettext_lazy as _
class Relation(models.Model):
@@ -124,6 +125,9 @@ class Person(BasePerson):
# GR fields
generic_relation_concrete = GenericRelation(Relation)
+ class Meta:
+ verbose_name = _("Person")
+
class ProxyPerson(Person):
class Meta:
diff --git a/tests/model_meta/tests.py b/tests/model_meta/tests.py
index fe2f6e63da8d..7204a5e93a14 100644
--- a/tests/model_meta/tests.py
+++ b/tests/model_meta/tests.py
@@ -222,6 +222,17 @@ def test_get_fields_only_searches_forward_on_apps_not_ready(self):
opts.apps.models_ready = True
+class VerboseNameRawTests(SimpleTestCase):
+ def test_string(self):
+ # Clear cached property.
+ Relation._meta.__dict__.pop("verbose_name_raw", None)
+ self.assertEqual(Relation._meta.verbose_name_raw, "relation")
+
+ def test_gettext(self):
+ Person._meta.__dict__.pop("verbose_name_raw", None)
+ self.assertEqual(Person._meta.verbose_name_raw, "Person")
+
+
class RelationTreeTests(SimpleTestCase):
all_models = (Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating)
| Cache Options.verbose_name_raw
Description
(last modified by Adam Johnson)
Another candidate for caching, like #35230, following the same system check profiling.
The Model._meta.verbose_name_raw property returns the stringified version of the verbose_name attribute whilst temporarily disabling translations. It is only used in django.contrib.auth for creating permission names.
I found this property was taking ~15% of the total runtime for system checks on a project with 118 models. calls. Turning it into a cached_property and adding a no-translation fast path saves nearly all this cost, with the below results.
Before: 520 calls taking 10ms
After: 105 calls taking ~0ms
| 1,708,292,988,000 | [] | Performance Issue | [
"django/db/models/options.py:Options.verbose_name_raw"
] | [] | 1 |
|
roboflow/supervision | roboflow__supervision-1739 | 6f55d9de9e0f5469f11f768fb993de133f7d5af3 | diff --git a/supervision/detection/utils.py b/supervision/detection/utils.py
index a2cbd87bd..0d5ec475e 100644
--- a/supervision/detection/utils.py
+++ b/supervision/detection/utils.py
@@ -720,25 +720,71 @@ def move_masks(
masks (npt.NDArray[np.bool_]): A 3D array of binary masks corresponding to the
predictions. Shape: `(N, H, W)`, where N is the number of predictions, and
H, W are the dimensions of each mask.
- offset (npt.NDArray[np.int32]): An array of shape `(2,)` containing non-negative
- int values `[dx, dy]`.
+ offset (npt.NDArray[np.int32]): An array of shape `(2,)` containing int values
+ `[dx, dy]`. Supports both positive and negative values for bidirectional
+ movement.
resolution_wh (Tuple[int, int]): The width and height of the desired mask
resolution.
Returns:
(npt.NDArray[np.bool_]) repositioned masks, optionally padded to the specified
shape.
- """
- if offset[0] < 0 or offset[1] < 0:
- raise ValueError(f"Offset values must be non-negative integers. Got: {offset}")
+ Examples:
+ ```python
+ import numpy as np
+ import supervision as sv
+ mask = np.array([[[False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False]]], dtype=bool)
+
+ offset = np.array([1, 1])
+ sv.move_masks(mask, offset, resolution_wh=(4, 4))
+ # array([[[False, False, False, False],
+ # [False, False, False, False],
+ # [False, False, True, True],
+ # [False, False, True, True]]], dtype=bool)
+
+ offset = np.array([-2, 2])
+ sv.move_masks(mask, offset, resolution_wh=(4, 4))
+ # array([[[False, False, False, False],
+ # [False, False, False, False],
+ # [False, False, False, False],
+ # [True, False, False, False]]], dtype=bool)
+ ```
+ """
mask_array = np.full((masks.shape[0], resolution_wh[1], resolution_wh[0]), False)
- mask_array[
- :,
- offset[1] : masks.shape[1] + offset[1],
- offset[0] : masks.shape[2] + offset[0],
- ] = masks
+
+ if offset[0] < 0:
+ source_x_start = -offset[0]
+ source_x_end = min(masks.shape[2], resolution_wh[0] - offset[0])
+ destination_x_start = 0
+ destination_x_end = min(resolution_wh[0], masks.shape[2] + offset[0])
+ else:
+ source_x_start = 0
+ source_x_end = min(masks.shape[2], resolution_wh[0] - offset[0])
+ destination_x_start = offset[0]
+ destination_x_end = offset[0] + source_x_end - source_x_start
+
+ if offset[1] < 0:
+ source_y_start = -offset[1]
+ source_y_end = min(masks.shape[1], resolution_wh[1] - offset[1])
+ destination_y_start = 0
+ destination_y_end = min(resolution_wh[1], masks.shape[1] + offset[1])
+ else:
+ source_y_start = 0
+ source_y_end = min(masks.shape[1], resolution_wh[1] - offset[1])
+ destination_y_start = offset[1]
+ destination_y_end = offset[1] + source_y_end - source_y_start
+
+ if source_x_end > source_x_start and source_y_end > source_y_start:
+ mask_array[
+ :,
+ destination_y_start:destination_y_end,
+ destination_x_start:destination_x_end,
+ ] = masks[:, source_y_start:source_y_end, source_x_start:source_x_end]
return mask_array
| diff --git a/test/detection/test_utils.py b/test/detection/test_utils.py
index 87e50f6a4..d93c72c83 100644
--- a/test/detection/test_utils.py
+++ b/test/detection/test_utils.py
@@ -16,6 +16,7 @@
merge_data,
merge_metadata,
move_boxes,
+ move_masks,
process_roboflow_result,
scale_boxes,
xcycwh_to_xyxy,
@@ -442,6 +443,268 @@ def test_move_boxes(
assert np.array_equal(result, expected_result)
[email protected](
+ "masks, offset, resolution_wh, expected_result, exception",
+ [
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([0, 0]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-1, -1]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, True, False, False],
+ [True, True, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-2, -2]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-3, -3]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-2, -1]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, False, False, False],
+ [True, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-1, -2]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, True, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-2, 2]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [True, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([3, 3]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([3, 3]),
+ (6, 6),
+ np.array(
+ [
+ [
+ [False, False, False, False, False, False],
+ [False, False, False, False, False, False],
+ [False, False, False, False, False, False],
+ [False, False, False, False, False, False],
+ [False, False, False, False, True, True],
+ [False, False, False, False, True, True],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ ],
+)
+def test_move_masks(
+ masks: np.ndarray,
+ offset: np.ndarray,
+ resolution_wh: Tuple[int, int],
+ expected_result: np.ndarray,
+ exception: Exception,
+) -> None:
+ with exception:
+ result = move_masks(masks=masks, offset=offset, resolution_wh=resolution_wh)
+ np.testing.assert_array_equal(result, expected_result)
+
+
@pytest.mark.parametrize(
"xyxy, factor, expected_result, exception",
[
| `move_masks` only supports movement in positive direction
If you compare the code of `move_masks`, `move_detections` and `move_oriented_boxes`, you'll find that only mask code restricts offset direction:
```python
if offset[0] < 0 or offset[1] < 0:
raise ValueError(f"Offset values must be non-negative integers. Got: {offset}")
```
It should be possible to move masks in either direction, even if it results in cropping.
To complete this:
- [ ] Change the code so masks can be moved with negative offset
- [ ] Create a unit test suite for `move_masks`
---
It would help us immensely and speed up the review process if you could create a [Colab](https://colab.research.google.com/) showcasing the changes, but for this task it is optional. You may use the [Starter Template](https://colab.research.google.com/drive/1rin7WrS-UvVIe-_Gfxmu-yVslGphOq89?usp=sharing).
| Hi @LinasKo, I would like to contribute to this. Can you assign it to me? | 1,734,206,715,000 | [] | Feature Request | [
"supervision/detection/utils.py:move_masks"
] | [] | 1 |
roboflow/supervision | roboflow__supervision-1698 | 6889e33da587ed6e161334d815274d520f2844c4 | diff --git a/supervision/detection/core.py b/supervision/detection/core.py
index 74c663a89..14f4c0ef8 100644
--- a/supervision/detection/core.py
+++ b/supervision/detection/core.py
@@ -1201,6 +1201,8 @@ def __getitem__(
"""
if isinstance(index, str):
return self.data.get(index)
+ if self.is_empty():
+ return Detections.empty()
if isinstance(index, int):
index = [index]
return Detections(
| diff --git a/test/detection/test_core.py b/test/detection/test_core.py
index 61796bef2..dfa784fcb 100644
--- a/test/detection/test_core.py
+++ b/test/detection/test_core.py
@@ -223,6 +223,12 @@
None,
pytest.raises(IndexError),
),
+ (
+ Detections.empty(),
+ np.isin(Detections.empty()["class_name"], ["cat", "dog"]),
+ Detections.empty(),
+ DoesNotRaise(),
+ ), # Filter an empty detections by specific class names
],
)
def test_getitem(
| Crash when filtering empty detections: xyxy shape (0, 0, 4).
Reproduction code:
```python
import supervision as sv
import numpy as np
CLASSES = [0, 1, 2]
prediction = sv.Detections.empty()
prediction = prediction[np.isin(prediction["class_name"], CLASSES)]
```
Error:
```
Traceback (most recent call last):
File "/Users/linasko/.settler_workspace/pr/supervision-fresh/run_detections.py", line 7, in <module>
prediction = prediction[np.isin(prediction["class_name"], CLASSES)]
File "/Users/linasko/.settler_workspace/pr/supervision-fresh/supervision/detection/core.py", line 1206, in __getitem__
return Detections(
File "<string>", line 10, in __init__
File "/Users/linasko/.settler_workspace/pr/supervision-fresh/supervision/detection/core.py", line 144, in __post_init__
validate_detections_fields(
File "/Users/linasko/.settler_workspace/pr/supervision-fresh/supervision/validators/__init__.py", line 120, in validate_detections_fields
validate_xyxy(xyxy)
File "/Users/linasko/.settler_workspace/pr/supervision-fresh/supervision/validators/__init__.py", line 11, in validate_xyxy
raise ValueError(
ValueError: xyxy must be a 2D np.ndarray with shape (_, 4), but got shape (0, 0, 4)
```
| 1,732,955,352,000 | [] | Bug Report | [
"supervision/detection/core.py:Detections.__getitem__"
] | [] | 1 |
|
UKPLab/sentence-transformers | UKPLab__sentence-transformers-3104 | 9093aa8e1c9ae4325b424ef97d8c1464050afa5c | diff --git a/sentence_transformers/cross_encoder/CrossEncoder.py b/sentence_transformers/cross_encoder/CrossEncoder.py
index c8d8d1d84..c4345017a 100644
--- a/sentence_transformers/cross_encoder/CrossEncoder.py
+++ b/sentence_transformers/cross_encoder/CrossEncoder.py
@@ -123,8 +123,7 @@ def __init__(
if device is None:
device = get_device_name()
logger.info(f"Use pytorch device: {device}")
-
- self._target_device = torch.device(device)
+ self.model.to(device)
if default_activation_function is not None:
self.default_activation_function = default_activation_function
@@ -154,11 +153,11 @@ def smart_batching_collate(self, batch: list[InputExample]) -> tuple[BatchEncodi
*texts, padding=True, truncation="longest_first", return_tensors="pt", max_length=self.max_length
)
labels = torch.tensor(labels, dtype=torch.float if self.config.num_labels == 1 else torch.long).to(
- self._target_device
+ self.model.device
)
for name in tokenized:
- tokenized[name] = tokenized[name].to(self._target_device)
+ tokenized[name] = tokenized[name].to(self.model.device)
return tokenized, labels
@@ -174,7 +173,7 @@ def smart_batching_collate_text_only(self, batch: list[InputExample]) -> BatchEn
)
for name in tokenized:
- tokenized[name] = tokenized[name].to(self._target_device)
+ tokenized[name] = tokenized[name].to(self.model.device)
return tokenized
@@ -232,7 +231,6 @@ def fit(
scaler = torch.npu.amp.GradScaler()
else:
scaler = torch.cuda.amp.GradScaler()
- self.model.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
@@ -272,7 +270,7 @@ def fit(
train_dataloader, desc="Iteration", smoothing=0.05, disable=not show_progress_bar
):
if use_amp:
- with torch.autocast(device_type=self._target_device.type):
+ with torch.autocast(device_type=self.model.device.type):
model_predictions = self.model(**features, return_dict=True)
logits = activation_fct(model_predictions.logits)
if self.config.num_labels == 1:
@@ -438,7 +436,6 @@ def predict(
pred_scores = []
self.model.eval()
- self.model.to(self._target_device)
with torch.no_grad():
for features in iterator:
model_predictions = self.model(**features, return_dict=True)
@@ -604,3 +601,21 @@ def push_to_hub(
tags=tags,
**kwargs,
)
+
+ def to(self, device: int | str | torch.device | None = None) -> None:
+ return self.model.to(device)
+
+ @property
+ def _target_device(self) -> torch.device:
+ logger.warning(
+ "`CrossEncoder._target_device` has been removed, please use `CrossEncoder.device` instead.",
+ )
+ return self.device
+
+ @_target_device.setter
+ def _target_device(self, device: int | str | torch.device | None = None) -> None:
+ self.to(device)
+
+ @property
+ def device(self) -> torch.device:
+ return self.model.device
| diff --git a/tests/test_cross_encoder.py b/tests/test_cross_encoder.py
index 9c17aa093..027751d1a 100644
--- a/tests/test_cross_encoder.py
+++ b/tests/test_cross_encoder.py
@@ -192,3 +192,35 @@ def test_bfloat16() -> None:
ranking = model.rank("Hello there!", ["Hello, World!", "Heya!"])
assert isinstance(ranking, list)
+
+
[email protected](not torch.cuda.is_available(), reason="CUDA must be available to test moving devices effectively.")
[email protected]("device", ["cpu", "cuda"])
+def test_device_assignment(device):
+ model = CrossEncoder("cross-encoder/stsb-distilroberta-base", device=device)
+ assert model.device.type == device
+
+
[email protected](not torch.cuda.is_available(), reason="CUDA must be available to test moving devices effectively.")
+def test_device_switching():
+ # test assignment using .to
+ model = CrossEncoder("cross-encoder/stsb-distilroberta-base", device="cpu")
+ assert model.device.type == "cpu"
+ assert model.model.device.type == "cpu"
+
+ model.to("cuda")
+ assert model.device.type == "cuda"
+ assert model.model.device.type == "cuda"
+
+ del model
+ torch.cuda.empty_cache()
+
+
[email protected](not torch.cuda.is_available(), reason="CUDA must be available to test moving devices effectively.")
+def test_target_device_backwards_compat():
+ model = CrossEncoder("cross-encoder/stsb-distilroberta-base", device="cpu")
+ assert model.device.type == "cpu"
+
+ assert model._target_device.type == "cpu"
+ model._target_device = "cuda"
+ assert model.device.type == "cuda"
| `CrossEncoder` is not pushed to cuda until predict is called, even if cuda is specified as device.
Hi, this is more like a question rather than a bug or issue.
When I specify the target device during initialization of any CrossEncoder, the model is not pushed to that device until the `predict` or the `fit` method is called, until then the model is kept in cpu.
```python
from sentence_transformers import CrossEncoder
model2 = CrossEncoder("mixedbread-ai/mxbai-rerank-large-v1", device="cuda:0")
print(model2.model.device)
# cpu
```
I mean I expect the model to be pushed to the specified device during initialization and until I am calling predict it is taking up my system Ram.
Is there any high level reason(s) why this is the case?
| I’m not sure of the intention behind this implementation, but I think it’s because the following code within the fit function is where the data is first transferred to the GPU.
https://github.com/UKPLab/sentence-transformers/blob/df6a8e8278b49e7ca01401f46799610106a7b640/sentence_transformers/cross_encoder/CrossEncoder.py#L235
yes, but until I call fit or predict my model is kept in the cpu, which is inconvenient IMO and also takes up the ram.
Hello!
Apologies for the delay.
This was a design decision made by my predecessor, it was also the case for Sentence Transformer models, but it has been updated there (See #2351) as I believe it's better to immediately move the model to the desired device.
I'll fix this when I start updating cross-encoders soon, although I'm also open to a PR much like #2351 in the meantime.
- Tom Aarsen
Hello @tomaarsen!
thanks for the response!
I would like to create the PR to fix this, could you please assign this to me?
Gladly! | 1,732,870,446,000 | [] | Bug Report | [
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.__init__",
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.smart_batching_collate",
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.smart_batching_collate_text_only",
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.fit",
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.predict"
] | [
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.to",
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder._target_device",
"sentence_transformers/cross_encoder/CrossEncoder.py:CrossEncoder.device"
] | 5 |
UKPLab/sentence-transformers | UKPLab__sentence-transformers-3035 | b9316f90cdd8dc9e0c5c6d99353f89c4621c8828 | diff --git a/sentence_transformers/trainer.py b/sentence_transformers/trainer.py
index 3fd20eb1f..2a7907e3a 100644
--- a/sentence_transformers/trainer.py
+++ b/sentence_transformers/trainer.py
@@ -209,7 +209,7 @@ def __init__(
"args": args,
"data_collator": data_collator,
"train_dataset": train_dataset,
- "eval_dataset": eval_dataset,
+ "eval_dataset": eval_dataset if eval_dataset is not None or evaluator is None else "dummy",
"model_init": model_init,
"compute_metrics": compute_metrics,
"callbacks": callbacks,
@@ -222,6 +222,12 @@ def __init__(
else:
super_kwargs["tokenizer"] = tokenizer
super().__init__(**super_kwargs)
+ # Transformers v4.46.0 introduced a ValueError if `eval_dataset` is None while eval_strategy is not "no",
+ # but in Sentence Transformers you can also evaluate without an eval_dataset via an evaluator, so we set
+ # it to "dummy" in that case to avoid the ValueError
+ if self.eval_dataset == "dummy":
+ self.eval_dataset = None
+
# Every Sentence Transformer model can always return a loss, so we set this to True
# to avoid having to specify it in the data collator or model's forward
self.can_return_loss = True
| diff --git a/tests/test_trainer.py b/tests/test_trainer.py
index 64fbf827e..e7e9827d5 100644
--- a/tests/test_trainer.py
+++ b/tests/test_trainer.py
@@ -8,8 +8,10 @@
import pytest
import torch
+from datasets.dataset_dict import DatasetDict
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
+from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import is_datasets_available, is_training_available
from tests.utils import SafeTemporaryDirectory
@@ -230,3 +232,47 @@ def test_trainer(
original_embeddings = original_model.encode("The cat is on the mat.", convert_to_tensor=True)
new_embeddings = model.encode("The cat is on the the mat.", convert_to_tensor=True)
assert not torch.equal(original_embeddings, new_embeddings)
+
+
[email protected]("use_eval_dataset", [True, False])
[email protected]("use_evaluator", [True, False])
+def test_trainer_no_eval_dataset_with_eval_strategy(
+ stsb_bert_tiny_model: SentenceTransformer,
+ stsb_dataset_dict: DatasetDict,
+ use_eval_dataset: bool,
+ use_evaluator: bool,
+ tmp_path: Path,
+) -> None:
+ # Expect a crash when `args.eval_strategy` is not "no" but neither `eval_dataset` or `evaluator` is provided
+ # Otherwise, the trainer should be created without any issues
+ model = stsb_bert_tiny_model
+ train_dataset = stsb_dataset_dict["train"].select(range(10))
+ eval_dataset = stsb_dataset_dict["validation"].select(range(10))
+ evaluator = EmbeddingSimilarityEvaluator(
+ sentences1=eval_dataset["sentence1"],
+ sentences2=eval_dataset["sentence2"],
+ scores=[score / 5 for score in eval_dataset["score"]],
+ name="stsb-validation",
+ )
+ loss = losses.CosineSimilarityLoss(model=model)
+ args = SentenceTransformerTrainingArguments(output_dir=tmp_path, eval_strategy="steps")
+
+ kwargs = {}
+ if use_eval_dataset:
+ kwargs["eval_dataset"] = eval_dataset
+ if use_evaluator:
+ kwargs["evaluator"] = evaluator
+
+ if not use_eval_dataset and not use_evaluator:
+ context = pytest.raises(ValueError, match=".*`args.eval_strategy`.*")
+ else:
+ context = nullcontext()
+
+ with context:
+ SentenceTransformerTrainer(
+ model=model,
+ args=args,
+ train_dataset=train_dataset,
+ loss=loss,
+ **kwargs,
+ )
| `eval_dataset` or `evaluator`?
The docs say
```
You can use both an eval_dataset and an evaluator, one or the other, or neither. They evaluate based on the eval_strategy and eval_steps [Training Arguments](https://www.sbert.net/docs/sentence_transformer/training_overview.html#training-arguments).
```
But when I omit `eval_dataset` and only pass an `evaluator` into my args I get this from `SentenceTransformerTrainer`:
```
File ~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:419, in Trainer.__init__(self, model, args, data_collator, train_dataset, eval_dataset, processing_class, model_init, compute_loss_func, compute_metrics, callbacks, optimizers, preprocess_logits_for_metrics)
[413](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:413) raise ValueError(
[414](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:414) "When using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result`"
[415](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:415) " boolean argument which will be triggered after the last batch of the eval set to signal that the"
[416](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:416) " summary statistics should be returned by the function."
[417](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:417) )
[418](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:418) if args.eval_strategy is not None and args.eval_strategy != "no" and eval_dataset is None:
--> [419](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:419) raise ValueError(
[420](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:420) f"You have set `args.eval_strategy` to {args.eval_strategy} but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. "
[421](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:421) )
[422](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:422) self.args = args
[423](https://vscode-remote+wsl-002bubuntu.vscode-resource.vscode-cdn.net/home/deklan/code/investor_company_str_pairs/notebooks/~/code/investor_company_str_pairs/.venv/lib/python3.12/site-packages/transformers/trainer.py:423) self.compute_loss_func = compute_loss_func
ValueError: You have set `args.eval_strategy` to IntervalStrategy.STEPS but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`.
```
My versions are
```
"sentence-transformers>=3.2.1",
"transformers[torch]>=4.46.1",
```
| Downgrading to `transformers==4.44.0` seems to have fixed
Hello!
Thanks for reporting - this is indeed likely an issue with the newest `transformers` version(s). I'll try and chase it down and include a fix in the next release. I'll keep you in the loop.
- Tom Aarsen
The original PR that introduced this problem: https://github.com/huggingface/transformers/pull/33743 | 1,730,845,921,000 | [] | Bug Report | [
"sentence_transformers/trainer.py:SentenceTransformerTrainer.__init__"
] | [] | 1 |
py-pdf/pypdf | py-pdf__pypdf-2964 | 72a883886ab83ff130098dd99e6f1110b2b7b264 | diff --git a/pypdf/_writer.py b/pypdf/_writer.py
index 08f06f33e..a296caee4 100644
--- a/pypdf/_writer.py
+++ b/pypdf/_writer.py
@@ -3000,6 +3000,8 @@ def _insert_filtered_annotations(
outlist.append(self._add_object(anc))
else:
d = cast("DictionaryObject", ano["/A"])["/D"]
+ if isinstance(d, NullObject):
+ continue
if isinstance(d, str):
# it is a named dest
if str(d) in self.get_named_dest_root():
| diff --git a/tests/test_writer.py b/tests/test_writer.py
index b6df2da05..c10782262 100644
--- a/tests/test_writer.py
+++ b/tests/test_writer.py
@@ -2484,6 +2484,16 @@ def test_append_pdf_with_dest_without_page(caplog):
assert len(writer.named_destinations) == 3
[email protected]_socket
+def test_destination_is_nullobject():
+ """Tests for #2958"""
+ url = "https://github.com/user-attachments/files/17822279/C0.00.-.COVER.SHEET.pdf"
+ name = "iss2958.pdf"
+ source_data = BytesIO(get_data_from_url(url, name=name))
+ writer = PdfWriter()
+ writer.append(source_data)
+
+
@pytest.mark.enable_socket
def test_destination_page_is_none():
"""Tests for #2963"""
| PdfWriter().append throwing 'NullObject' object is not subscriptable for a specific PDF file
I am using pypdf to merge PDF files. I haven't had an issue, until I hit a specific PDF file. Every other PDF file (even ones from the same "bundle" [plan set]). I just have no idea why this PDF file is having an issue with the Writer, or if there is any sort of workaround/front end adjustment I can make to the PDF file.
## Environment
```bash
$ python -m platform
Windows-11-10.0.22631-SP0
$ python -c "import pypdf;print(pypdf._debug_versions)"
pypdf==5.1.0, crypt_provider=('local_crypt_fallback', '0.0.0'), PIL=none
```
## Code + PDF
This is a minimal, complete example that shows the issue:
```python
import glob
import pypdf
fromp = '\\\\UNC\\Path\\*.pdf' #actual path exists, other PDF files merge fine
merger = pypdf.PdfWriter()
paths = glob.glob(fromp)
paths.sort()
for pdf in paths:
merger.append(pdf) #Get error here on the problem PDF file
```
File link below:
[C0.00 - COVER SHEET.pdf](https://github.com/user-attachments/files/17822279/C0.00.-.COVER.SHEET.pdf)
## Traceback
This is the complete traceback I see:
```
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "C:\Users\ethanwade\AppData\Roaming\Python\Python312\site-packages\pypdf\_writer.py", line 2638, in append
self.merge(
File "C:\Users\ethanwade\AppData\Roaming\Python\Python312\site-packages\pypdf\_writer.py", line 2796, in merge
lst = self._insert_filtered_annotations(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ethanwade\AppData\Roaming\Python\Python312\site-packages\pypdf\_writer.py", line 2994, in _insert_filtered_annotations
p = self._get_cloned_page(d[0], pages, reader)
~^^^
TypeError: 'NullObject' object is not subscriptable
```
| Thanks for the report. This specific PDF declares a destination of `null`, which does not look right:
```
58 0 obj
<<
/Border [0 0 0]
/A <<
/Type /Action
/S /GoTo
/D null
>>
/NM (AJNXWZZHGGPOHNQA)
/Rect [1995 2679 2007 2706]
/Subtype /Link
>>
endobj
```
Inserting
```python
if isinstance(d, NullObject):
continue
```
after https://github.com/py-pdf/pypdf/blob/bd5b962e81c7cd9ad29aa7ff7dedb4197326ebbb/pypdf/_writer.py#L3002 seems to fix this. | 1,732,355,741,000 | [] | Bug Report | [
"pypdf/_writer.py:PdfWriter._insert_filtered_annotations"
] | [] | 1 |
py-pdf/pypdf | py-pdf__pypdf-2934 | 98aa9742e757ec428e1953ba7f47c6d7c44b331a | diff --git a/pypdf/_cmap.py b/pypdf/_cmap.py
index e8e23f9ab..54c54436e 100644
--- a/pypdf/_cmap.py
+++ b/pypdf/_cmap.py
@@ -527,6 +527,8 @@ def _type1_alternative(
v = chr(int(words[2][4:], 16))
except ValueError: # pragma: no cover
continue
+ else:
+ continue
map_dict[chr(i)] = v
int_entry.append(i)
return map_dict, int_entry
| diff --git a/tests/test_cmap.py b/tests/test_cmap.py
index 2a83bc3b5..07a778520 100644
--- a/tests/test_cmap.py
+++ b/tests/test_cmap.py
@@ -259,3 +259,13 @@ def test_too_many_differences():
name = "iss2836.pdf"
reader = PdfReader(BytesIO(get_data_from_url(url, name=name)))
assert reader.pages[0].extract_text() == ""
+
+
[email protected]_socket
+def test_iss2925():
+ url = (
+ "https://github.com/user-attachments/files/17621508/2305.09315.pdf"
+ )
+ name = "iss2925.pdf"
+ reader = PdfReader(BytesIO(get_data_from_url(url, name=name)))
+ assert "slicing on the PDG to extract the relevant contextual" in reader.pages[3].extract_text()
| Undefined variable in text extraction with version 5.1.0
Our CI-pipelines run latest version of `pypdf` in some of our example notebooks.
These fail with the following exception with version `5.1.0`
## Environment
Github runner - `ubuntu-latest`
## Code + PDF
This is a minimal, complete example that shows the issue:
```python
import requests
from pdf2image import convert_from_path
from pypdf import PdfReader
def download_pdf(url):
response = requests.get(url)
if response.status_code == 200:
return BytesIO(response.content)
else:
raise Exception(f"Failed to download PDF: Status code {response.status_code}")
def get_pdf_images(pdf_url):
# Download the PDF
pdf_file = download_pdf(pdf_url)
# Save the PDF temporarily to disk (pdf2image requires a file path)
temp_file = "temp.pdf"
with open(temp_file, "wb") as f:
f.write(pdf_file.read())
reader = PdfReader(temp_file)
page_texts = []
for page_number in range(len(reader.pages)):
page = reader.pages[page_number]
text = page.extract_text()
page_texts.append(text)
images = convert_from_path(temp_file)
assert len(images) == len(page_texts)
return (images, page_texts)
sample_pdfs = [
{
"title": "ConocoPhillips Sustainability Highlights - Nature (24-0976)",
"url": "https://static.conocophillips.com/files/resources/24-0976-sustainability-highlights_nature.pdf",
},
{
"title": "ConocoPhillips Managing Climate Related Risks",
"url": "https://static.conocophillips.com/files/resources/conocophillips-2023-managing-climate-related-risks.pdf",
},
{
"title": "ConocoPhillips 2023 Sustainability Report",
"url": "https://static.conocophillips.com/files/resources/conocophillips-2023-sustainability-report.pdf",
},
]
for pdf in sample_pdfs:
page_images, page_texts = get_pdf_images(pdf["url"])
pdf["images"] = page_images
pdf["texts"] = page_texts
```
Pdfs in links above, but also applies to several others.
## Traceback
This is the complete traceback I see:
```python
UnboundLocalError Traceback (most recent call last)
Cell In[8], line 2
1 for pdf in sample_pdfs:
----> 2 page_images, page_texts = get_pdf_images(pdf["url"])
3 pdf["images"] = page_images
4 pdf["texts"] = page_texts
Cell In[6], line 24, in get_pdf_images(pdf_url)
22 for page_number in range(len(reader.pages)):
23 page = reader.pages[page_number]
---> 24 text = page.extract_text()
25 page_texts.append(text)
26 images = convert_from_path("temp.pdf")
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_page.py:2393, in PageObject.extract_text(self, orientations, space_width, visitor_operand_before, visitor_operand_after, visitor_text, extraction_mode, *args, **kwargs)
2390 if isinstance(orientations, int):
2391 orientations = (orientations,)
-> 2393 return self._extract_text(
2394 self,
2395 self.pdf,
2396 orientations,
2397 space_width,
2398 PG.CONTENTS,
2399 visitor_operand_before,
2400 visitor_operand_after,
2401 visitor_text,
2402 )
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_page.py:1868, in PageObject._extract_text(self, obj, pdf, orientations, space_width, content_key, visitor_operand_before, visitor_operand_after, visitor_text)
1866 if "/Font" in resources_dict:
1867 for f in cast(DictionaryObject, resources_dict["/Font"]):
-> 1868 cmaps[f] = build_char_map(f, space_width, obj)
1869 cmap: Tuple[
1870 Union[str, Dict[int, str]], Dict[str, str], str, Optional[DictionaryObject]
1871 ] = (
(...)
1875 None,
1876 ) # (encoding,CMAP,font resource name,dictionary-object of font)
1877 try:
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_cmap.py:33, in build_char_map(font_name, space_width, obj)
19 """
20 Determine information about a font.
21
(...)
30
31 """
32 ft: DictionaryObject = obj["/Resources"]["/Font"][font_name] # type: ignore
---> 33 font_subtype, font_halfspace, font_encoding, font_map = build_char_map_from_dict(
34 space_width, ft
35 )
36 return font_subtype, font_halfspace, font_encoding, font_map, ft
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_cmap.py:56, in build_char_map_from_dict(space_width, ft)
42 """
43 Determine information about a font.
44
(...)
53
54 """
55 font_type = cast(str, ft["/Subtype"].get_object())
---> 56 encoding, map_dict = get_encoding(ft)
58 space_key_char = get_actual_str_key(" ", encoding, map_dict)
59 font_width_map = build_font_width_map(ft, space_width * 2.0)
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_cmap.py:129, in get_encoding(ft)
125 def get_encoding(
126 ft: DictionaryObject
127 ) -> Tuple[Union[str, Dict[int, str]], Dict[Any, Any]]:
128 encoding = _parse_encoding(ft)
--> 129 map_dict, int_entry = _parse_to_unicode(ft)
131 # Apply rule from PDF ref 1.7 §5.9.1, 1st bullet:
132 # if cmap not empty encoding should be discarded
133 # (here transformed into identity for those characters)
134 # If encoding is a string it is expected to be an identity translation.
135 if isinstance(encoding, dict):
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_cmap.py:212, in _parse_to_unicode(ft)
210 if "/ToUnicode" not in ft:
211 if ft.get("/Subtype", "") == "/Type1":
--> 212 return _type1_alternative(ft, map_dict, int_entry)
213 else:
214 return {}, []
File ~/work/pyvespa/pyvespa/.venv/lib/python3.10/site-packages/pypdf/_cmap.py:530, in _type1_alternative(ft, map_dict, int_entry)
528 except ValueError: # pragma: no cover
529 continue
--> 530 map_dict[chr(i)] = v
531 int_entry.append(i)
532 return map_dict, int_entry
UnboundLocalError: local variable 'v' referenced before assignment
```
| Thanks for the report. Apparently, https://github.com/py-pdf/pypdf/commit/96b46add0d61940f099f40a9676bb8fff300eaa6#diff-92b0af0cd341537aaf4208e79fac6fca9faca0e6021135693779ab5c82f5593dL535-L536 went missing when applying the latest text extraction changes and this case has never been covered by any tests.
Feel free to submit a corresponding PR including a test which would previously fail. | 1,730,742,162,000 | [] | Bug Report | [
"pypdf/_cmap.py:_type1_alternative"
] | [] | 1 |
py-pdf/pypdf | py-pdf__pypdf-2656 | c227b0c725af6d0afc88bcf89348ece9b65adcb5 | diff --git a/pypdf/_doc_common.py b/pypdf/_doc_common.py
index 84e99208a..0baab4fc7 100644
--- a/pypdf/_doc_common.py
+++ b/pypdf/_doc_common.py
@@ -492,17 +492,19 @@ def get_fields(
tree: Optional[TreeObject] = None,
retval: Optional[Dict[Any, Any]] = None,
fileobj: Optional[Any] = None,
+ stack: Optional[List[PdfObject]] = None,
) -> Optional[Dict[str, Any]]:
"""
Extract field data if this PDF contains interactive form fields.
- The *tree* and *retval* parameters are for recursive use.
+ The *tree*, *retval*, *stack* parameters are for recursive use.
Args:
- tree:
- retval:
+ tree: Current object to parse.
+ retval: In-progress list of fields.
fileobj: A file object (usually a text file) to write
a report to on all interactive form fields found.
+ stack: List of already parsed objects.
Returns:
A dictionary where each key is a field name, and each
@@ -515,6 +517,7 @@ def get_fields(
if retval is None:
retval = {}
catalog = self.root_object
+ stack = []
# get the AcroForm tree
if CD.ACRO_FORM in catalog:
tree = cast(Optional[TreeObject], catalog[CD.ACRO_FORM])
@@ -522,19 +525,15 @@ def get_fields(
return None
if tree is None:
return retval
- self._check_kids(tree, retval, fileobj)
- for attr in field_attributes:
- if attr in tree:
- # Tree is a field
- self._build_field(tree, retval, fileobj, field_attributes)
- break
-
+ assert stack is not None
if "/Fields" in tree:
fields = cast(ArrayObject, tree["/Fields"])
for f in fields:
field = f.get_object()
- self._build_field(field, retval, fileobj, field_attributes)
-
+ self._build_field(field, retval, fileobj, field_attributes, stack)
+ elif any(attr in tree for attr in field_attributes):
+ # Tree is a field
+ self._build_field(tree, retval, fileobj, field_attributes, stack)
return retval
def _get_qualified_field_name(self, parent: DictionaryObject) -> str:
@@ -557,25 +556,11 @@ def _build_field(
retval: Dict[Any, Any],
fileobj: Any,
field_attributes: Any,
+ stack: List[PdfObject],
) -> None:
- self._check_kids(field, retval, fileobj)
- try:
- key = cast(str, field["/TM"])
- except KeyError:
- try:
- if "/Parent" in field:
- key = (
- self._get_qualified_field_name(
- cast(DictionaryObject, field["/Parent"])
- )
- + "."
- )
- else:
- key = ""
- key += cast(str, field["/T"])
- except KeyError:
- # Ignore no-name field for now
- return
+ if all(attr not in field for attr in ("/T", "/TM")):
+ return
+ key = self._get_qualified_field_name(field)
if fileobj:
self._write_field(fileobj, field, field_attributes)
fileobj.write("\n")
@@ -604,14 +589,27 @@ def _build_field(
and "/Off" in retval[key]["/_States_"]
):
del retval[key]["/_States_"][retval[key]["/_States_"].index("/Off")]
+ # at last for order
+ self._check_kids(field, retval, fileobj, stack)
def _check_kids(
- self, tree: Union[TreeObject, DictionaryObject], retval: Any, fileobj: Any
+ self,
+ tree: Union[TreeObject, DictionaryObject],
+ retval: Any,
+ fileobj: Any,
+ stack: List[PdfObject],
) -> None:
+ if tree in stack:
+ logger_warning(
+ f"{self._get_qualified_field_name(tree)} already parsed", __name__
+ )
+ return
+ stack.append(tree)
if PA.KIDS in tree:
# recurse down the tree
for kid in tree[PA.KIDS]: # type: ignore
- self.get_fields(kid.get_object(), retval, fileobj)
+ kid = kid.get_object()
+ self.get_fields(kid, retval, fileobj, stack)
def _write_field(self, fileobj: Any, field: Any, field_attributes: Any) -> None:
field_attributes_tuple = FA.attributes()
| diff --git a/tests/test_reader.py b/tests/test_reader.py
index 83b61bc59..4557270bb 100644
--- a/tests/test_reader.py
+++ b/tests/test_reader.py
@@ -1530,3 +1530,30 @@ def test_damaged_pdf():
assert (
exc.value.args[0] == "Expected object ID (21 0) does not match actual (-1 -1)."
)
+
+
[email protected]_socket()
[email protected](10)
+def test_looping_form(caplog):
+ """Cf iss 2643"""
+ url = "https://github.com/py-pdf/pypdf/files/15306053/inheritance.pdf"
+ name = "iss2643.pdf"
+ reader = PdfReader(BytesIO(get_data_from_url(url, name=name)), strict=False)
+ flds = reader.get_fields()
+ assert all(
+ x in flds
+ for x in (
+ "Text10",
+ "Text10.0.0.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1",
+ "amt1.0",
+ "amt1.1",
+ "DSS#3pg3#0hgu7",
+ )
+ )
+ writer = PdfWriter(reader)
+ writer.root_object["/AcroForm"]["/Fields"][5]["/Kids"].append(
+ writer.root_object["/AcroForm"]["/Fields"][5]["/Kids"][0]
+ )
+ flds2 = writer.get_fields()
+ assert "Text68.0 already parsed" in caplog.text
+ assert list(flds.keys()) == list(flds2.keys())
| ROB: reading PDF with multiple forms is really slow
When reading a specific PDF file with a lot of forms, the `get_fields()` seems to loop for a very long time (one hour and a half on my setup).
## Environment
Which environment were you using when you encountered the problem?
```bash
$ python -m platform
Linux-5.4.0-173-generic-x86_64-with-glibc2.29
$ python -c "import pypdf;print(pypdf._debug_versions)"
pypdf==4.2.0, crypt_provider=('pycryptodome', '3.20.0'), PIL=10.3.0
```
## Code + PDF
This is a minimal, complete example that shows the issue:
```python
from pypdf import PdfReader
with open('inheritance.pdf', 'rb') as f:
doc = PdfReader(f)
doc.get_fields()
```
The PDF file (coming from https://www.nj.gov/treasury/taxation/pdf/other_forms/inheritance/itrbk.pdf):
[inheritance.pdf](https://github.com/py-pdf/pypdf/files/15306053/inheritance.pdf)
## Traceback
The `get_fields()` function seems to make a lot of recursive `_check_kids()` calls
```
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1115, 0, 133206044573808), IndirectObject(1865, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1107, 0, 133206044573808), IndirectObject(1866, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1107, 0, 133206044573808), IndirectObject(1866, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1071, 0, 133206044573808), IndirectObject(1864, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1115, 0, 133206044573808), IndirectObject(1865, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1107, 0, 133206044573808), IndirectObject(1866, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1107, 0, 133206044573808), IndirectObject(1866, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1115, 0, 133206044573808), IndirectObject(1865, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1107, 0, 133206044573808), IndirectObject(1866, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(942, 0, 133206044573808), IndirectObject(922, 0, 133206044573808)]
Recursing down 5 kids: [IndirectObject(1120, 0, 133206044573808), IndirectObject(1867, 0, 133206044573808)]
```
| Thanks for the report. That surely does not look right.
If ever useful: another (smaller) file where the same problem happens: https://www.courts.ca.gov/documents/fl410.pdf
@farjasju
You should have a look at the document with PdfBox with debug function to look at the pdf structure. There is a lot of fields that exists in the document (fields can be identified in the field /T. You may see a lot of fields named 0, 1 : they are different from each others : the qualified name are differents.
There might be some optimisation. also can you check the document does not includes some (infinite)loops (where some protection may be required)
I confirm that the file is "corrupted" with some objects being a grand child of itself:

| 1,716,124,612,000 | [] | Performance Issue | [
"pypdf/_doc_common.py:PdfDocCommon.get_fields",
"pypdf/_doc_common.py:PdfDocCommon._build_field",
"pypdf/_doc_common.py:PdfDocCommon._check_kids"
] | [] | 3 |
py-pdf/pypdf | py-pdf__pypdf-2644 | 6226d6663eac5410416484da9348ab2c5fd71973 | diff --git a/pypdf/filters.py b/pypdf/filters.py
index d62cf7842..d573ae30e 100644
--- a/pypdf/filters.py
+++ b/pypdf/filters.py
@@ -80,14 +80,19 @@ def decompress(data: bytes) -> bytes:
try:
return zlib.decompress(data)
except zlib.error:
- d = zlib.decompressobj(zlib.MAX_WBITS | 32)
- result_str = b""
- for b in [data[i : i + 1] for i in range(len(data))]:
- try:
- result_str += d.decompress(b)
- except zlib.error:
- pass
- return result_str
+ try:
+ # For larger files, use Decompress object to enable buffered reading
+ return zlib.decompressobj().decompress(data)
+ except zlib.error:
+ # If still failed, then try with increased window size
+ d = zlib.decompressobj(zlib.MAX_WBITS | 32)
+ result_str = b""
+ for b in [data[i : i + 1] for i in range(len(data))]:
+ try:
+ result_str += d.decompress(b)
+ except zlib.error:
+ pass
+ return result_str
class FlateDecode:
| diff --git a/tests/bench.py b/tests/bench.py
index dcfc30a9b..ea5597f88 100644
--- a/tests/bench.py
+++ b/tests/bench.py
@@ -227,3 +227,15 @@ def test_image_new_property_performance(benchmark):
data = BytesIO(get_data_from_url(url, name=name))
benchmark(image_new_property, data)
+
+
+def image_extraction(data):
+ reader = PdfReader(data)
+ list(reader.pages[0].images)
+
+
[email protected]_socket()
+def test_large_compressed_image_performance(benchmark):
+ url = "https://github.com/py-pdf/pypdf/files/15306199/file_with_large_compressed_image.pdf"
+ data = BytesIO(get_data_from_url(url, name="file_with_large_compressed_image.pdf"))
+ benchmark(image_extraction, data)
diff --git a/tests/test_images.py b/tests/test_images.py
index ad694d669..e77090171 100644
--- a/tests/test_images.py
+++ b/tests/test_images.py
@@ -346,3 +346,11 @@ def test_corrupted_jpeg_iss2266(pdf, pdf_name, images, images_name, filtr):
print(fn) # noqa: T201
img = Image.open(BytesIO(zf.read(fn)))
assert image_similarity(reader.pages[p].images[i].image, img) >= 0.99
+
+
[email protected]_socket()
[email protected](30)
+def test_large_compressed_image():
+ url = "https://github.com/py-pdf/pypdf/files/15306199/file_with_large_compressed_image.pdf"
+ reader = PdfReader(BytesIO(get_data_from_url(url, name="file_with_large_compressed_image.pdf")))
+ list(reader.pages[0].images)
| Performance Bug: Reading large compressed images takes huge time to process
Reading a compressed image takes (>10minutes), if the image is large-ish (>250kb)
## Environment
Which environment were you using when you encountered the problem?
```bash
$ python -m platform
Linux-5.15.146.1-microsoft-standard-WSL2-x86_64-with-glibc2.35
$ python -c "import pypdf;print(pypdf._debug_versions)"
pypdf==4.2.0, crypt_provider=('pycryptodome', '3.19.0'), PIL=9.5.0
```
## Code + PDF
This is a minimal, complete example that shows the issue:
```python
import sys
from datetime import datetime
from pypdf import PdfReader
def log(msg):
print(f"[{datetime.now()}] {msg}\n")
file = sys.argv[1]
log("Reading File PyPDF..")
images = []
pypdf_reader = PdfReader(file)
for pidx, page in enumerate(pypdf_reader.pages, start=1):
log(f"Reading page {pidx}")
for iidx, image in enumerate(page.images, start=1):
log(f"Processing Image {iidx}")
images.append(image.image)
log(f"Competed page {pidx}")
log("Completed Reading File PyPDF!")
```
Attached is a sample PDF I created that can reproduce this error.
[file_with_image.pdf](https://github.com/py-pdf/pypdf/files/15305860/file_with_image.pdf)
The PDF can be added to the tests as well.
## Output
This is the complete output I see:
Look at the time difference between Image 6 and Image 7. It is close to 12 minutes
```
[2024-05-14 14:06:52.927018] Reading File PyPDF..
[2024-05-14 14:06:52.929346] Reading page 1
[2024-05-14 14:06:52.971993] Processing Image 1
[2024-05-14 14:06:52.993297] Processing Image 2
[2024-05-14 14:06:53.007304] Processing Image 3
[2024-05-14 14:06:53.021166] Processing Image 4
[2024-05-14 14:06:53.049529] Processing Image 5
[2024-05-14 14:06:53.051842] Processing Image 6
[2024-05-14 14:18:46.906472] Processing Image 7
[2024-05-14 14:18:47.088749] Processing Image 8
[2024-05-14 14:18:47.092159] Processing Image 9
[2024-05-14 14:18:47.099422] Processing Image 10
[2024-05-14 14:18:47.099513] Competed page 1
```
| 1,715,679,264,000 | [] | Performance Issue | [
"pypdf/filters.py:decompress"
] | [] | 1 |
|
huggingface/trl | huggingface__trl-2450 | 9c5388b69e0842f76edc46a2ff9d0b51e1db4337 | diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py
index d1cc3a0e9d..1122086ca9 100644
--- a/trl/trainer/utils.py
+++ b/trl/trainer/utils.py
@@ -274,7 +274,7 @@ def __call__(self, examples: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
if "input_ids" not in example:
message = example[self.messages_key]
formatted_message = self.tokenizer.apply_chat_template(
- message, tokenize=False, add_generation_prompt=True
+ message, tokenize=False, add_generation_prompt=False
)
tokenized_message = self.tokenizer(
formatted_message,
| diff --git a/tests/test_utils.py b/tests/test_utils.py
index 4d26819058..87404070a8 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -205,54 +205,74 @@ def setUp(self):
ignore_index=self.ignore_index,
)
- # See https://github.com/huggingface/trl/pull/2287#discussion_r1856594421
- @unittest.skip("This test must be updated.")
def test_data_collator_for_chatml(self):
# Process the data
data = self.collator(self.examples)
+ # Verify basic shapes and types
+ self.assertIn("input_ids", data)
+ self.assertIn("attention_mask", data)
+ self.assertIn("labels", data)
+ self.assertIn("prompts", data)
+ self.assertIn("prompt_attention_mask", data)
+
# Decode input_ids and labels for verification
input_ids = data["input_ids"][0].tolist()
labels = data["labels"][0].tolist()
prompt_only = data["prompts"][0].tolist()
- # Verify that input_ids start with optional padding tokens and a single BOS token and there are no extra ones
- first_non_pad = next(token for token in input_ids if token != self.tokenizer.pad_token_id)
- self.assertEqual(
- first_non_pad, self.bos_token_id, "The first non-padding token of input_ids should be BOS token."
- )
- self.assertEqual(input_ids.count(self.bos_token_id), 1, "There should be exactly one BOS token in input_ids.")
-
- # Verify that the assistant's response token is present in input_ids and not in the prompt_only
- last_assistant_response = self.examples[0][self.messages_key][-1]["content"]
- last_assistant_response_tokens = self.tokenizer.encode(last_assistant_response, add_special_tokens=False)
- response_in_input_ids = all(token in input_ids for token in last_assistant_response_tokens)
- self.assertTrue(response_in_input_ids, "The assistant's response should be present in input_ids.")
+ # Get the last assistant's response for comparison
+ last_message = self.examples[0][self.messages_key][-1]
+ self.assertEqual(last_message["role"], "assistant", "Last message should be from assistant")
+ last_assistant_response = last_message["content"]
- # Check if the last assistant's response tokens are not in prompt_only
- response_in_prompt = all(token in prompt_only for token in last_assistant_response_tokens)
- self.assertFalse(response_in_prompt, "The assistant's response should not be present in prompt_only.")
+ # Verify that input_ids contain both prompt and response
+ decoded_input = self.tokenizer.decode(input_ids)
+ self.assertIn(last_assistant_response, decoded_input, "Input should contain assistant's response")
- # Verify that EOS token is at the end of input_ids
- self.assertEqual(input_ids[-1], self.eos_token_id, "The last token of input_ids should be EOS token.")
+ # Verify that prompts only contain the conversation up to the last response
+ decoded_prompt = self.tokenizer.decode(prompt_only)
+ self.assertNotIn(last_assistant_response, decoded_prompt, "Prompt should not contain assistant's response")
- # Verify that the labels preserved the target string (last_assistant_response)
- last_assistant_response = self.examples[0][self.messages_key][-1]["content"]
- last_assistant_response_tokens = self.tokenizer.encode(last_assistant_response, add_special_tokens=False)
+ # Verify labels are -100 for non-assistant parts
+ prompt_length = len(prompt_only)
+ self.assertTrue(
+ all(label == self.ignore_index for label in labels[:prompt_length]),
+ "Labels should be ignore_index for prompt tokens",
+ )
- # Find the start and end of the last assistant's response in the labels
- response_start = next(i for i, label in enumerate(labels) if label != self.ignore_index)
- response_end = next(i for i in range(len(labels) - 1, -1, -1) if labels[i] != self.ignore_index)
+ # Verify labels match assistant response after prompt
+ # Add a filter to remove any trailing tokens after the first <|im_end|>
+ last_assistant_response_with_end = last_assistant_response + self.tokenizer.eos_token
+ last_assistant_response_tokens = self.tokenizer.encode(
+ last_assistant_response_with_end, add_special_tokens=False
+ )
- actual_response = labels[response_start : response_end - 1]
+ response_labels = []
+ for label in labels[prompt_length:]:
+ if label == self.ignore_index:
+ continue
+ response_labels.append(label)
+ if label == self.tokenizer.convert_tokens_to_ids("<|im_end|>"):
+ break
self.assertEqual(
- actual_response,
+ response_labels,
last_assistant_response_tokens,
- "The labels should preserve the last assistant's response tokens.",
+ "Labels should match assistant response tokens",
)
- # Verify that EOS token is at the end of labels
- self.assertEqual(labels[-1], self.eos_token_id, "The last token of labels should be EOS token.")
+ # Verify there isn't a generation prompt at the end
+ generation_prompt = "<|im_start|>assistant"
+ self.assertFalse(
+ decoded_input.strip().endswith(generation_prompt),
+ f"Input should not end with generation prompt '{generation_prompt}'",
+ )
+
+ self.assertEqual(
+ response_labels,
+ last_assistant_response_tokens,
+ "Labels should match assistant response tokens",
+ )
class TestBatchGeneration(unittest.TestCase):
| DataCollatorForChatML unexpected generation prompt
### System Info
- Platform: macOS-15.1.1-arm64-arm-64bit
- Python version: 3.10.15
- PyTorch version: 2.4.1
- CUDA device(s): not available
- Transformers version: 4.45.2
- Accelerate version: 1.0.1
- Accelerate config: not found
- Datasets version: 3.0.1
- HF Hub version: 0.25.2
- TRL version: 0.12.2
- bitsandbytes version: not installed
- DeepSpeed version: 0.15.2
- Diffusers version: 0.30.3
- Liger-Kernel version: not installed
- LLM-Blender version: not installed
- OpenAI version: 1.51.2
- PEFT version: 0.13.2
### Information
- [ ] The official example scripts
- [X] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder
- [X] My own task or dataset (give details below)
### Reproduction
```python
from transformers import AutoTokenizer
from trl.trainer.utils import DataCollatorForChatML
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorForChatML(tokenizer)
examples = [
{
"messages": [
{"role": "system", "content": "You are a professional translator."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi there! How can I help you today?"},
],
},
]
batch = data_collator(examples)
print(tokenizer.decode(batch["input_ids"][0]))
label = batch["labels"][0]
label[label == -100] = tokenizer.eos_token_id
print(tokenizer.decode(label))
```
outputs:
```
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 07 Dec 2024
You are a professional translator.<|eot_id|><|start_header_id|>user<|end_header_id|>
Hello!<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Hi there! How can I help you today?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Hi there! How can I help you today?<|eot_id|><|start_header_id|>assistant<|end_header_id|>
```
### Expected behavior
When processing instruction tuning, the model is not expected to generate `<|start_header_id|>assistant<|end_header_id|>` after `<|eot_id|>`. The correct model response should be `Hi there! How can I help you today?<|eot_id|>`.
We should change `trl/trainer/utils.py L276:277`
```python
formatted_message = self.tokenizer.apply_chat_template(
# message, tokenize=False, add_generation_prompt=True,
message, tokenize=False, add_generation_prompt=False
)
```
### Checklist
- [X] I have checked that my issue isn't already filed (see [open issues](https://github.com/huggingface/trl/issues?q=is%3Aissue))
- [X] I have included my system information
- [X] Any code provided is minimal, complete, and reproducible ([more on MREs](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks))
- [X] Any code provided is properly formatted in code blocks, (no screenshot, [more on code blocks](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks))
- [X] Any traceback provided is complete
| 1,733,578,241,000 | [
"🐛 bug"
] | Bug Report | [
"trl/trainer/utils.py:DataCollatorForChatML.__call__"
] | [] | 1 |
|
huggingface/trl | huggingface__trl-2398 | 9368dccef68dcbaffe847cba3fc73705755dd0b4 | diff --git a/trl/models/modeling_value_head.py b/trl/models/modeling_value_head.py
index 0797794013..592879ae3e 100644
--- a/trl/models/modeling_value_head.py
+++ b/trl/models/modeling_value_head.py
@@ -69,9 +69,6 @@ class AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):
Class attributes:
- **transformers_parent_class** (`transformers.PreTrainedModel`) -- The parent class of the wrapped model. This
should be set to `transformers.AutoModelForCausalLM` for this class.
- - **lm_head_namings** (`tuple`) -- A tuple of strings that are used to identify the language model head of the
- wrapped model. This is set to `("lm_head", "embed_out", "output_layer")` for this class but can be changed
- for other models in the future
- **supported_args** (`tuple`) -- A tuple of strings that are used to identify the arguments that are supported
by the `ValueHead` class. Currently, the supported args are:
- **summary_dropout_prob** (`float`, `optional`, defaults to `None`) -- The dropout probability for the
@@ -86,7 +83,6 @@ class AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):
"""
transformers_parent_class = AutoModelForCausalLM
- lm_head_namings = ["lm_head", "embed_out", "output_layer"]
supported_args = (
"summary_dropout_prob",
"v_head_initializer_range",
@@ -106,12 +102,7 @@ def __init__(self, pretrained_model, **kwargs):
"""
super().__init__(pretrained_model, **kwargs)
v_head_kwargs, _, _ = self._split_kwargs(kwargs)
-
- if not any(hasattr(self.pretrained_model, attribute) for attribute in self.lm_head_namings):
- raise ValueError("The model does not have a language model head, please use a model that has one.")
-
self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs)
-
self._init_weights(**v_head_kwargs)
def _init_weights(self, **kwargs):
| diff --git a/tests/test_modeling_value_head.py b/tests/test_modeling_value_head.py
index ddc4eb850c..be4932e62f 100644
--- a/tests/test_modeling_value_head.py
+++ b/tests/test_modeling_value_head.py
@@ -265,14 +265,6 @@ def test_generate(self, model_name):
# Just check if the generation works
_ = model.generate(input_ids, generation_config=generation_config)
- def test_raise_error_not_causallm(self):
- # Test with a model without a LM head
- model_id = "trl-internal-testing/tiny-GPT2LMHeadModel"
- # This should raise a ValueError
- with self.assertRaises(ValueError):
- pretrained_model = AutoModelForCausalLM.from_pretrained(model_id)
- _ = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model.transformer)
-
def test_transformers_bf16_kwargs(self):
r"""
Test if the transformers kwargs are correctly passed
@@ -283,10 +275,11 @@ def test_transformers_bf16_kwargs(self):
for model_name in self.all_model_names:
trl_model = self.trl_model_class.from_pretrained(model_name, torch_dtype=torch.bfloat16)
- lm_head_namings = self.trl_model_class.lm_head_namings
+ lm_head_namings = ["lm_head", "embed_out", "output_layer"]
self.assertTrue(
- any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings)
+ any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings),
+ "Can't test the model because it doesn't have any of the expected lm_head namings",
)
for lm_head_naming in lm_head_namings:
| Still not supporting for ChatGLM3 maybe
### System Info
trl 0.12.1
transformers 4.46.2
### Information
- [ ] The official example scripts
- [X] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder
- [X] My own task or dataset (give details below)
### Reproduction
```python
from transformers import AutoTokenizer, AutoModel, BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments, AutoModelForCausalLM
import torch
import transformers
from peft import PeftModel
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead
model = AutoModelForCausalLMWithValueHead.from_pretrained("THUDM/chatglm3-6b-128k", trust_remote_code=True)
```
outputs:
```
Traceback (most recent call last):
File "/home/fjy/folders/ERNIE2.0/finetune_chatglm6b.py", line 16, in <module>
model = AutoModelForCausalLMWithValueHead.from_pretrained("THUDM/chatglm3-6b-128k", trust_remote_code=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/fjy/anaconda3/envs/env/lib/python3.12/site-packages/trl/models/modeling_base.py", line 233, in from_pretrained
model = cls(pretrained_model, **trl_model_args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/fjy/anaconda3/envs/env/lib/python3.12/site-packages/trl/models/modeling_value_head.py", line 107, in __init__
raise ValueError("The model does not have a language model head, please use a model that has one.")
ValueError: The model does not have a language model head, please use a model that has one.
```
### Expected behavior
It's shown that ChatGLM3 doesnot have a language model head. Appearly it has.
### Checklist
- [X] I have checked that my issue isn't already filed (see [open issues](https://github.com/huggingface/trl/issues?q=is%3Aissue))
- [X] I have included my system information
- [X] Any code provided is minimal, complete, and reproducible ([more on MREs](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks))
- [X] Any code provided is properly formatted in code blocks, (no screenshot, [more on code blocks](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks))
- [X] Any traceback provided is complete
| 1,732,633,882,000 | [] | Bug Report | [
"trl/models/modeling_value_head.py:AutoModelForCausalLMWithValueHead.__init__"
] | [] | 1 |
|
huggingface/trl | huggingface__trl-2344 | 21d5baf338be52e21af95d8d0c6cbc4968238181 | diff --git a/examples/scripts/ppo/ppo.py b/examples/scripts/ppo/ppo.py
index 19036ca7c1..e0dd07bb5a 100644
--- a/examples/scripts/ppo/ppo.py
+++ b/examples/scripts/ppo/ppo.py
@@ -14,6 +14,7 @@
import shutil
+import torch
from accelerate import PartialState
from datasets import load_dataset
from transformers import (
@@ -23,7 +24,15 @@
HfArgumentParser,
)
-from trl import ModelConfig, PPOConfig, PPOTrainer, ScriptArguments
+from trl import (
+ ModelConfig,
+ PPOConfig,
+ PPOTrainer,
+ ScriptArguments,
+ get_kbit_device_map,
+ get_peft_config,
+ get_quantization_config,
+)
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
@@ -67,6 +76,20 @@
################
# Model & Tokenizer
################
+ torch_dtype = (
+ model_config.torch_dtype
+ if model_config.torch_dtype in ["auto", None]
+ else getattr(torch, model_config.torch_dtype)
+ )
+ quantization_config = get_quantization_config(model_config)
+ model_kwargs = dict(
+ revision=model_config.model_revision,
+ attn_implementation=model_config.attn_implementation,
+ torch_dtype=torch_dtype,
+ device_map=get_kbit_device_map() if quantization_config is not None else None,
+ quantization_config=quantization_config,
+ )
+
tokenizer = AutoTokenizer.from_pretrained(
model_config.model_name_or_path,
padding_side="left",
@@ -81,12 +104,18 @@
reward_model = AutoModelForSequenceClassification.from_pretrained(
training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1
)
- ref_policy = AutoModelForCausalLM.from_pretrained(
- training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code
- )
policy = AutoModelForCausalLM.from_pretrained(
training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code
)
+
+ peft_config = get_peft_config(model_config)
+ if peft_config is None:
+ ref_policy = AutoModelForCausalLM.from_pretrained(
+ training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code
+ )
+ else:
+ ref_policy = None
+
################
# Dataset
################
@@ -131,6 +160,7 @@ def tokenize(element):
value_model=value_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
+ peft_config=peft_config,
)
trainer.train()
diff --git a/examples/scripts/ppo/ppo_tldr.py b/examples/scripts/ppo/ppo_tldr.py
index fd20d6693d..73ea5cd852 100644
--- a/examples/scripts/ppo/ppo_tldr.py
+++ b/examples/scripts/ppo/ppo_tldr.py
@@ -14,6 +14,7 @@
import shutil
+import torch
from accelerate import PartialState
from datasets import load_dataset
from transformers import (
@@ -23,16 +24,24 @@
HfArgumentParser,
)
-from trl import ModelConfig, PPOConfig, PPOTrainer, ScriptArguments
+from trl import (
+ ModelConfig,
+ PPOConfig,
+ PPOTrainer,
+ ScriptArguments,
+ get_kbit_device_map,
+ get_peft_config,
+ get_quantization_config,
+)
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
"""
python examples/scripts/ppo/ppo_tldr.py \
- --dataset_name trl-internal-testing/tldr-preference-sft-trl-style
+ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \
--dataset_test_split validation \
--learning_rate 3e-6 \
- --output_dir models/minimal/ppo \
+ --output_dir models/minimal/ppo_tldr \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 64 \
--total_episodes 30000 \
@@ -41,11 +50,13 @@
--reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \
--missing_eos_penalty 1.0 \
--stop_token eos \
- --response_length 53
+ --response_length 53 \
+ --eval_strategy steps \
+ --eval_steps 100
accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \
examples/scripts/ppo/ppo_tldr.py \
- --dataset_name trl-internal-testing/tldr-preference-sft-trl-style
+ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \
--dataset_test_split validation \
--output_dir models/minimal/ppo_tldr \
--learning_rate 3e-6 \
@@ -57,7 +68,9 @@
--reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \
--local_rollout_forward_batch_size 16 \
--missing_eos_penalty 1.0 \
- --stop_token eos
+ --stop_token eos \
+ --eval_strategy steps \
+ --eval_steps 100
"""
@@ -70,6 +83,20 @@
################
# Model & Tokenizer
################
+ torch_dtype = (
+ model_config.torch_dtype
+ if model_config.torch_dtype in ["auto", None]
+ else getattr(torch, model_config.torch_dtype)
+ )
+ quantization_config = get_quantization_config(model_config)
+ model_kwargs = dict(
+ revision=model_config.model_revision,
+ attn_implementation=model_config.attn_implementation,
+ torch_dtype=torch_dtype,
+ device_map=get_kbit_device_map() if quantization_config is not None else None,
+ quantization_config=quantization_config,
+ )
+
tokenizer = AutoTokenizer.from_pretrained(
model_config.model_name_or_path,
padding_side="left",
@@ -84,12 +111,18 @@
reward_model = AutoModelForSequenceClassification.from_pretrained(
training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1
)
- ref_policy = AutoModelForCausalLM.from_pretrained(
- training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code
- )
policy = AutoModelForCausalLM.from_pretrained(
training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code
)
+
+ peft_config = get_peft_config(model_config)
+ if peft_config is None:
+ ref_policy = AutoModelForCausalLM.from_pretrained(
+ training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code
+ )
+ else:
+ ref_policy = None
+
################
# Dataset
################
@@ -138,6 +171,7 @@ def tokenize(element):
value_model=value_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
+ peft_config=peft_config,
)
trainer.train()
diff --git a/trl/trainer/ppo_config.py b/trl/trainer/ppo_config.py
index 25c2916025..465048184b 100644
--- a/trl/trainer/ppo_config.py
+++ b/trl/trainer/ppo_config.py
@@ -14,6 +14,7 @@
import os
from dataclasses import dataclass
+from typing import Optional
from ..trainer.utils import OnPolicyConfig
@@ -32,6 +33,10 @@ class PPOConfig(OnPolicyConfig):
Name of this experiment.
reward_model_path (`str`, *optional*, defaults to `"EleutherAI/pythia-160m"`):
Path to the reward model.
+ model_adapter_name (`Optional[str]`, *optional*, defaults to `None`):
+ Name of the train target PEFT adapter, when using LoRA with multiple adapters.
+ ref_adapter_name (`Optional[str]`, *optional*, defaults to `None`):
+ Name of the reference PEFT adapter, when using LoRA with multiple adapters.
num_ppo_epochs (`int`, *optional*, defaults to `4`):
Number of epochs to train.
whiten_rewards (`bool`, *optional*, defaults to `False`):
@@ -52,6 +57,8 @@ class PPOConfig(OnPolicyConfig):
exp_name: str = os.path.basename(__file__)[: -len(".py")]
reward_model_path: str = "EleutherAI/pythia-160m"
+ model_adapter_name: Optional[str] = None
+ ref_adapter_name: Optional[str] = None
num_ppo_epochs: int = 4
whiten_rewards: bool = False
kl_coef: float = 0.05
diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py
index 8e2c514a54..2da4eaf981 100644
--- a/trl/trainer/ppo_trainer.py
+++ b/trl/trainer/ppo_trainer.py
@@ -18,6 +18,7 @@
import textwrap
import time
from collections import defaultdict
+from contextlib import contextmanager, nullcontext
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
@@ -44,9 +45,11 @@
from transformers.integrations import get_reporting_integration_callbacks
from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK
from transformers.trainer_callback import CallbackHandler, ExportableState, PrinterCallback
+from transformers.utils import is_peft_available
from transformers.utils.deprecation import deprecate_kwarg
from ..core import masked_mean, masked_whiten
+from ..models import create_reference_model
from ..models.utils import unwrap_model_for_generation
from ..trainer.utils import (
OnlineTrainerState,
@@ -61,9 +64,12 @@
truncate_response,
)
from .ppo_config import PPOConfig
-from .utils import generate_model_card
+from .utils import generate_model_card, peft_module_casting_to_bf16
+if is_peft_available():
+ from peft import PeftConfig, PeftModel, get_peft_model
+
if is_wandb_available():
import wandb
@@ -99,7 +105,7 @@ def __init__(
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
],
policy: nn.Module,
- ref_policy: nn.Module,
+ ref_policy: Optional[nn.Module],
reward_model: nn.Module,
train_dataset: Dataset,
value_model: Optional[nn.Module] = None,
@@ -108,11 +114,12 @@ def __init__(
# less commonly used
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
callbacks: Optional[List[TrainerCallback]] = None,
+ peft_config: Optional["PeftConfig"] = None,
) -> None:
if ref_policy is policy:
raise ValueError(
"`policy` and `ref_policy` cannot be the same object. If you want `ref_policy` to be the "
- "same as `policy`, you must mass a copy of it, or `None` if you use peft."
+ "same as `policy`, you must make a copy of it, or `None` if you use peft."
)
self.args = config
@@ -125,7 +132,32 @@ def __init__(
)
self.policy.generation_config.pad_token_id = None # generate tokens without truncation / padding
- self.ref_policy = ref_policy
+ # peft support
+ if not is_peft_available() and peft_config is not None:
+ raise ImportError(
+ "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
+ )
+ elif is_peft_available() and peft_config is not None:
+ # if model is a peft model and we have a peft_confg, we merge and unload it first
+ if isinstance(self.policy, PeftModel):
+ self.policy = self.policy.merge_and_unload()
+
+ # get peft model with the given config
+ self.policy = get_peft_model(self.policy, peft_config)
+ if args.bf16 and getattr(self.policy, "is_loaded_in_4bit", False):
+ peft_module_casting_to_bf16(self.policy)
+
+ self.is_peft_model = is_peft_available() and isinstance(self.policy, PeftModel)
+ self.model_adapter_name = args.model_adapter_name
+ self.ref_adapter_name = args.ref_adapter_name
+
+ if ref_policy:
+ self.ref_policy = ref_policy
+ elif self.is_peft_model:
+ self.ref_policy = None
+ else:
+ self.ref_policy = create_reference_model(self.policy)
+
self.reward_model = reward_model
self.train_dataset = train_dataset
self.train_dataset_len = len(train_dataset)
@@ -174,12 +206,13 @@ def __init__(
#########
# setup model, optimizer, and others
#########
- for module in [policy, ref_policy, value_model, reward_model]:
- disable_dropout_in_model(module)
+ for module in [self.policy, self.ref_policy, self.value_model, self.reward_model]:
+ if module is not None:
+ disable_dropout_in_model(module)
if args.stop_token and args.stop_token == "eos":
args.stop_token_id = processing_class.eos_token_id
- self.model = PolicyAndValueWrapper(policy, value_model)
- self.model.config = policy.config # needed for pushing to hub
+ self.model = PolicyAndValueWrapper(self.policy, self.value_model)
+ self.model.config = self.policy.config # needed for pushing to hub
self.create_optimizer_and_scheduler(
num_training_steps=args.num_total_batches
) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level
@@ -244,11 +277,20 @@ def __init__(
self.reward_model = prepare_deepspeed(
self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16
)
- self.ref_policy = prepare_deepspeed(
- self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16
- )
+
+ if self.ref_policy is None:
+ if not self.is_peft_model:
+ raise ValueError("No reference model and model is not a Peft model.")
+ else:
+ self.ref_policy = prepare_deepspeed(
+ self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16
+ )
else:
- self.ref_policy = self.ref_policy.to(self.accelerator.device)
+ if self.ref_policy is None:
+ if not self.is_peft_model:
+ raise ValueError("No reference model and model is not a Peft model.")
+ else:
+ self.ref_policy = self.ref_policy.to(self.accelerator.device)
self.reward_model = self.reward_model.to(self.accelerator.device)
def get_train_dataloader(self) -> DataLoader:
@@ -257,6 +299,18 @@ def get_train_dataloader(self) -> DataLoader:
def get_eval_dataloader(self) -> DataLoader:
return self.eval_dataloader
+ @contextmanager
+ def null_ref_context(self):
+ """Context manager for handling null reference model (that is, peft adapter manipulation)."""
+ with self.accelerator.unwrap_model(
+ self.model.policy
+ ).disable_adapter() if self.is_peft_model and not self.ref_adapter_name else nullcontext():
+ if self.ref_adapter_name:
+ self.model.policy.set_adapter(self.ref_adapter_name)
+ yield
+ if self.ref_adapter_name:
+ self.model.policy.set_adapter(self.model_adapter_name or "default")
+
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
backup_model = self.model
self.model = self.model.policy # save only the policy
@@ -368,7 +422,11 @@ def repeat_generator():
del logits, all_logprob
torch.cuda.empty_cache()
- ref_output = forward(ref_policy, query_response, processing_class.pad_token_id)
+ if ref_policy is None:
+ with self.null_ref_context():
+ ref_output = forward(model.policy, query_response, processing_class.pad_token_id)
+ else:
+ ref_output = forward(ref_policy, query_response, processing_class.pad_token_id)
ref_logits = ref_output.logits[:, context_length - 1 : -1]
ref_logits /= args.temperature + 1e-7
ref_all_logprob = F.log_softmax(ref_logits, dim=-1)
| diff --git a/tests/test_ppo_trainer.py b/tests/test_ppo_trainer.py
index c530f05b86..aacf0f2a1c 100644
--- a/tests/test_ppo_trainer.py
+++ b/tests/test_ppo_trainer.py
@@ -14,6 +14,8 @@
import platform
import subprocess
+from transformers.testing_utils import require_peft
+
def test():
command = """\
@@ -65,3 +67,34 @@ def test_num_train_epochs():
shell=True,
check=True,
)
+
+
+@require_peft
+def test_peft_support():
+ command = """\
+python examples/scripts/ppo/ppo.py \
+ --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \
+ --dataset_train_split descriptiveness \
+ --learning_rate 3e-6 \
+ --output_dir models/minimal/ppo \
+ --per_device_train_batch_size 4 \
+ --gradient_accumulation_steps 1 \
+ --total_episodes 10 \
+ --model_name_or_path EleutherAI/pythia-14m \
+ --missing_eos_penalty 1.0 \
+ --save_strategy no \
+ --stop_token eos \
+ --use_peft \
+ --lora_r 32 \
+ --lora_alpha 16 \
+ --lora_target_modules query_key_value dense
+"""
+ if platform.system() == "Windows":
+ # windows CI does not work with subprocesses for some reason
+ # e.g., https://github.com/huggingface/trl/actions/runs/9600036224/job/26475286210?pr=1743
+ return
+ subprocess.run(
+ command,
+ shell=True,
+ check=True,
+ )
| How to Add PEFT to PPO Trainer or PPO Config
I am trying to realize RLHF through PPO.
May I ask how can I realize PEFT in RLHF/PPO. I can see this parameter in DPOTrainer. However, I cannot see that in PPOTrainer.
| PPO does not yet support PEFT. But it may be a good enhancement.
Hi @qgallouedec, I'd love to contribute to this enhancement! I noticed that this issue has been open for three months, and I'd like to help bring it forward.
Is there anything specific I should be mindful of as I start on this? Any guidance or notes on the current status would be greatly appreciated.
Thank you! | 1,731,310,921,000 | [] | Feature Request | [
"trl/trainer/ppo_trainer.py:PPOTrainer.__init__",
"trl/trainer/ppo_trainer.py:PPOTrainer.train"
] | [
"trl/trainer/ppo_trainer.py:PPOTrainer.null_ref_context"
] | 2 |
rq/rq | rq__rq-2138 | bb7f34053730da924486f97baa0d34fee9b1918b | diff --git a/rq/job.py b/rq/job.py
index 918f7fdc1..7a964aef0 100644
--- a/rq/job.py
+++ b/rq/job.py
@@ -729,6 +729,10 @@ def set_id(self, value: str) -> None:
"""
if not isinstance(value, str):
raise TypeError('id must be a string, not {0}'.format(type(value)))
+
+ if ":" in value:
+ raise ValueError('id must not contain ":"')
+
self._id = value
def heartbeat(self, timestamp: datetime, ttl: int, pipeline: Optional['Pipeline'] = None, xx: bool = False):
| diff --git a/tests/test_job.py b/tests/test_job.py
index 36c356e50..9f93cfbf8 100644
--- a/tests/test_job.py
+++ b/tests/test_job.py
@@ -900,6 +900,13 @@ def test_create_job_with_id(self):
self.assertRaises(TypeError, queue.enqueue, fixtures.say_hello, job_id=1234)
+ def test_create_job_with_invalid_id(self):
+ """test creating jobs with a custom invalid ID (with character :)"""
+ queue = Queue(connection=self.connection)
+
+ with self.assertRaises(ValueError):
+ queue.enqueue(fixtures.say_hello, job_id="1234:4321")
+
def test_create_job_with_async(self):
"""test creating jobs with async function"""
queue = Queue(connection=self.connection)
| Dangling job occurs when job_id contains the character ":"
Hi,
After updating to v2.0.0, job_id cannot contain the character ":"; otherwise, the job is never picked up or started by a worker.
I'll investigate this over the next few days, but I suspect that [PR #1964](https://github.com/rq/rq/pull/1964) might restrict the job_id format. We may need to add validation when creating jobs to address this.
| Thanks, mind opening a PR when you're free? | 1,730,770,766,000 | [] | Bug Report | [
"rq/job.py:Job.set_id"
] | [] | 1 |
cython/cython | cython__cython-6508 | 033ae2eb614f9fcb526c6049a751706df561db88 | diff --git a/Cython/Compiler/FusedNode.py b/Cython/Compiler/FusedNode.py
index 1b0ceb72ca0..9cb5f4e9059 100644
--- a/Cython/Compiler/FusedNode.py
+++ b/Cython/Compiler/FusedNode.py
@@ -643,11 +643,25 @@ def _fused_signature_index(self, pyx_code):
Generate Cython code for constructing a persistent nested dictionary index of
fused type specialization signatures.
"""
+ # Note on thread-safety:
+ # Filling in "fused_sigindex" should only happen once. However, in a multi-threaded
+ # environment it's possible that multiple threads can all start to fill it in
+ # independently (especially on freehtreading builds).
+ # Therefore:
+ # * "_fused_sigindex_ref" is a list of length 1 where the first element is either None,
+ # or a dictionary of signatures to lookup.
+ # * We rely on being able to get/set list elements atomically (which is true on
+ # freethreading and regular Python).
+ # * It doesn't really matter if multiple threads start generating their own version
+ # of this - the contents will end up the same. The main point is that no thread
+ # sees a half filled-in sigindex
pyx_code.put_chunk(
"""
- if not _fused_sigindex:
+ fused_sigindex = <dict> _fused_sigindex_ref[0]
+ if fused_sigindex is None:
+ fused_sigindex = {}
for sig in <dict> signatures:
- sigindex_node = <dict> _fused_sigindex
+ sigindex_node = fused_sigindex
*sig_series, last_type = sig.strip('()').split('|')
for sig_type in sig_series:
if sig_type not in sigindex_node:
@@ -655,6 +669,7 @@ def _fused_signature_index(self, pyx_code):
else:
sigindex_node = <dict> sigindex_node[sig_type]
sigindex_node[last_type] = sig
+ _fused_sigindex_ref[0] = fused_sigindex
"""
)
@@ -694,7 +709,7 @@ def make_fused_cpdef(self, orig_py_func, env, is_def):
pyx_code.put_chunk(
"""
- def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}):
+ def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex_ref=[None]):
# FIXME: use a typed signature - currently fails badly because
# default arguments inherit the types we specify here!
@@ -773,7 +788,7 @@ def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}):
pyx_code.put_chunk(
"""
sigindex_matches = []
- sigindex_candidates = [_fused_sigindex]
+ sigindex_candidates = [fused_sigindex]
for dst_type in dest_sig:
found_matches = []
| diff --git a/tests/run/fused_def.pyx b/tests/run/fused_def.pyx
index 96fcb93ea2d..2da9ddb89cb 100644
--- a/tests/run/fused_def.pyx
+++ b/tests/run/fused_def.pyx
@@ -461,3 +461,53 @@ cdef class HasBound:
func = bind_me[float]
func_fused = bind_me
+
+
+
+ctypedef fused IntOrFloat1:
+ int
+ float
+
+ctypedef fused IntOrFloat2:
+ int
+ float
+
+ctypedef fused IntOrFloat3:
+ int
+ float
+
+def really_simple_fused_function(IntOrFloat1 a, IntOrFloat2 b, IntOrFloat3 c):
+ # Don't use this function for anything except the thread safety stress test.
+ # The first call should be from that.
+ return (a + 1) * 2 + (b*c)
+
+def run_really_simple_fused_function(start_barrier, n_iters, failed_list):
+ # Maximize the chance of failure by waiting until all threads are ready to start
+ args = [ n if n % 2 else float(n) for n in range(n_iters) ]
+ try:
+ start_barrier.wait()
+ for a in args:
+ really_simple_fused_function(a, a, a)
+ except:
+ failed_list.append(True)
+
+
+def stress_test_thread_safety(n_threads):
+ """
+ >>> stress_test_thread_safety(20)
+ """
+ from threading import Barrier, Thread
+ start_barrier = Barrier(n_threads)
+
+ failed_list = []
+
+ threads = [
+ Thread(
+ target=run_really_simple_fused_function,
+ args=[start_barrier, 30, failed_list]
+ ) for _ in range(n_threads) ]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+ assert not failed_list, len(failed_list)
| [BUG] No matching signature found on free-threaded Python code generation
### Describe the bug
Whilst testing SciPy for issues against free-threaded CPython 3.13 https://github.com/scipy/scipy/pull/21496, we have observed several instances of errors on functions that fuse dtypes, for some reason, these errors are being flagged when running tests under parallel conditions, and not under single threading.
```
__________________________________________________________________ test_label03[numpy] __________________________________________________________________
scipy/ndimage/tests/test_measurements.py:169: in test_label03
out, n = ndimage.label(data)
data = array([1.])
xp = <module 'numpy' from '/home/rgommers/code/pixi-dev-scipystack/scipy/.pixi/envs/free-threading/lib/python3.13t/site-packages/numpy/__init__.py'>
scipy/ndimage/_support_alternative_backends.py:37: in wrapper
result = func(*args, **kwds)
args = (array([1.]),)
delegator = <function binary_fill_holes_signature at 0x385421dfa00>
func = <function label at 0x385421de9c0>
kwds = {}
module_name = 'ndimage'
xp = <module 'scipy._lib.array_api_compat.numpy' from '/home/rgommers/code/pixi-dev-scipystack/scipy/scipy/build-nogil-install/lib/python3.13t/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py'>
scipy/ndimage/_measurements.py:218: in label
max_label = _ni_label._label(input, structure, output)
caller_provided_output = False
ii = 3
input = array([1.])
need_64bits = False
output = array([-889028608], dtype=int32)
structure = array([ True, True, True])
_ni_label.pyx:200: in _ni_label._label
???
_ni_label.pyx:237: in _ni_label._label
???
_ni_label.pyx:93: in _ni_label.__pyx_fused_cpdef
???
E TypeError: No matching signature found
```
The generated C code for the Cython instance is described in the following comment: https://github.com/scipy/scipy/pull/21496#discussion_r1850947457
I will provide a minimal reproducible example soon.
cc @rgommers
### Code to reproduce the behaviour:
_No response_
### Expected behaviour
_No response_
### OS
Linux
### Python version
3.13t
### Cython version
_No response_
### Additional context
_No response_
| There's definitely some caching inside the fused dispatch function (so it's quicker to match on subsequent calls). On a quick look, I'm not convinced that caching is currently thread-safe. I'll look into it.
(There's also some code that imports numpy and gets the dtype. That's less likely to be an issue, but probably still needs a close look)
> The generated C code for the Cython instance is described in the following comment: [...]
That's probably actually a lower level than it's worth looking at. In this case it goes "fused function"->"generated Cython code"->"generated C code" and the issue is in the intermediate Cython code.
I think this should apply to any cpdef or def fused function so there isn't too much value trying to prepare a specific reproducible example. | 1,732,362,796,000 | [
"defect",
"Code Generation",
"freethreading CPython"
] | Bug Report | [
"Cython/Compiler/FusedNode.py:FusedCFuncDefNode._fused_signature_index",
"Cython/Compiler/FusedNode.py:FusedCFuncDefNode.make_fused_cpdef"
] | [] | 2 |
tobymao/sqlglot | tobymao__sqlglot-4524 | 992f6e9fc867aa5ad60a255be593b8982a0fbcba | diff --git a/sqlglot/optimizer/qualify_columns.py b/sqlglot/optimizer/qualify_columns.py
index ceb9ceb44f..871bb365ba 100644
--- a/sqlglot/optimizer/qualify_columns.py
+++ b/sqlglot/optimizer/qualify_columns.py
@@ -898,10 +898,22 @@ def get_source_columns(self, name: str, only_visible: bool = False) -> t.Sequenc
for (name, alias) in itertools.zip_longest(columns, column_aliases)
]
+ pseudocolumns = self._get_source_pseudocolumns(name)
+ if pseudocolumns:
+ columns = list(columns)
+ columns.extend(c for c in pseudocolumns if c not in columns)
+
self._get_source_columns_cache[cache_key] = columns
return self._get_source_columns_cache[cache_key]
+ def _get_source_pseudocolumns(self, name: str) -> t.Sequence[str]:
+ if self.schema.dialect == "snowflake" and self.scope.expression.args.get("connect"):
+ # When there is a CONNECT BY clause, there is only one table being scanned
+ # See: https://docs.snowflake.com/en/sql-reference/constructs/connect-by
+ return ["LEVEL"]
+ return []
+
def _get_all_source_columns(self) -> t.Dict[str, t.Sequence[str]]:
if self._source_columns is None:
self._source_columns = {
| diff --git a/tests/fixtures/optimizer/qualify_columns.sql b/tests/fixtures/optimizer/qualify_columns.sql
index eeaf8b3555..ecb6eee5ce 100644
--- a/tests/fixtures/optimizer/qualify_columns.sql
+++ b/tests/fixtures/optimizer/qualify_columns.sql
@@ -205,6 +205,30 @@ SELECT x.a + 1 AS i, missing_column AS missing_column FROM x AS x;
SELECT s, arr1, arr2 FROM arrays_test LEFT ARRAY JOIN arr1, arrays_test.arr2;
SELECT arrays_test.s AS s, arrays_test.arr1 AS arr1, arrays_test.arr2 AS arr2 FROM arrays_test AS arrays_test LEFT ARRAY JOIN arrays_test.arr1, arrays_test.arr2;
+# execute: false
+# dialect: snowflake
+WITH employees AS (
+ SELECT *
+ FROM (VALUES ('President', 1, NULL),
+ ('Vice President Engineering', 10, 1),
+ ('Programmer', 100, 10),
+ ('QA Engineer', 101, 10),
+ ('Vice President HR', 20, 1),
+ ('Health Insurance Analyst', 200, 20)
+ ) AS t(title, employee_ID, manager_ID)
+)
+SELECT
+ employee_ID,
+ manager_ID,
+ title,
+ level
+FROM employees
+START WITH title = 'President'
+CONNECT BY manager_ID = PRIOR employee_id
+ORDER BY
+ employee_ID NULLS LAST;
+WITH EMPLOYEES AS (SELECT T.TITLE AS TITLE, T.EMPLOYEE_ID AS EMPLOYEE_ID, T.MANAGER_ID AS MANAGER_ID FROM (VALUES ('President', 1, NULL), ('Vice President Engineering', 10, 1), ('Programmer', 100, 10), ('QA Engineer', 101, 10), ('Vice President HR', 20, 1), ('Health Insurance Analyst', 200, 20)) AS T(TITLE, EMPLOYEE_ID, MANAGER_ID)) SELECT EMPLOYEES.EMPLOYEE_ID AS EMPLOYEE_ID, EMPLOYEES.MANAGER_ID AS MANAGER_ID, EMPLOYEES.TITLE AS TITLE, EMPLOYEES.LEVEL AS LEVEL FROM EMPLOYEES AS EMPLOYEES START WITH EMPLOYEES.TITLE = 'President' CONNECT BY EMPLOYEES.MANAGER_ID = PRIOR EMPLOYEES.EMPLOYEE_ID ORDER BY EMPLOYEE_ID;
+
--------------------------------------
-- Derived tables
--------------------------------------
| `sqlglot.optimizer.qualify.qualify` fails when using `CONNECT BY` + `level` from a CTE
## Fully reproducible code snippet
In Snowflake, the `SELECT FROM <data_source> START WITH ... CONNECT BY` construct allows a `<level_expression>` by basically allowing to select a `level` column in addition to the existing columns:
```sql
SELECT
employee_ID,
manager_ID,
title,
level --<-- This isn't a column from "employees", but a column allowed by Snowflake in `CONNECT BY` constructs
FROM employees
START WITH title = 'President'
CONNECT BY manager_ID = PRIOR employee_id
ORDER BY
employee_ID NULLS LAST
```
`sqlglot.optimizer.qualify.qualify` handles this fine ✅
However, if the `SELECT` is `FROM` a CTE, it fails!
Taking the same example as before **but** putting all the data in a CTE (instead of a table) breaks `qualify`:
```py
from sqlglot import parse_one
from sqlglot.optimizer.qualify import qualify
SQL_DIALECT = "snowflake"
print(qualify(parse_one("""
WITH employees AS (
SELECT *
FROM (VALUES ('President', 1, NULL),
('Vice President Engineering', 10, 1),
('Programmer', 100, 10),
('QA Engineer', 101, 10),
('Vice President HR', 20, 1),
('Health Insurance Analyst', 200, 20)
) AS t(title, employee_ID, manager_ID)
)
SELECT
employee_ID,
manager_ID,
title,
level
FROM employees
START WITH title = 'President'
CONNECT BY manager_ID = PRIOR employee_id
ORDER BY
employee_ID NULLS LAST
""", dialect="snowflake"), schema=None, dialect="snowflake").sql(pretty=True, dialect=SQL_DIALECT))
```
Output:
```
Traceback (most recent call last):
File "test_sqlglot.py", line 7, in <module>
print(qualify(parse_one("""
^^^^^^^^^^^^^^^^^^^^^
File "/sqlglot/optimizer/qualify.py", line 102, in qualify
validate_qualify_columns_func(expression)
File "sqlglot/optimizer/qualify_columns.py", line 109, in validate_qualify_columns
raise OptimizeError(f"Column '{column}' could not be resolved{for_table}")
sqlglot.errors.OptimizeError: Column '"LEVEL"' could not be resolved
```
## Official Documentation
(see [documentation](https://docs.snowflake.com/en/sql-reference/constructs/connect-by#examples)).
| 1,734,360,638,000 | [] | Bug Report | [
"sqlglot/optimizer/qualify_columns.py:Resolver.get_source_columns"
] | [
"sqlglot/optimizer/qualify_columns.py:Resolver._get_source_pseudocolumns"
] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4523 | 98906d4520a0c582a0534384ee3d0c1449846ee6 | diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 856142edc4..1acc5ca8a7 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -232,7 +232,7 @@ def _builder(args: t.List) -> E:
if start_date and start_date.is_number:
# Numeric types are valid DATETIME values
if start_date.is_int:
- adds = DEFAULT_START_DATE + datetime.timedelta(days=int(start_date.this))
+ adds = DEFAULT_START_DATE + datetime.timedelta(days=start_date.to_py())
start_date = exp.Literal.string(adds.strftime("%F"))
else:
# We currently don't handle float values, i.e. they're not converted to equivalent DATETIMEs.
| diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 61365994bd..e8cd69648b 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -1579,6 +1579,11 @@ def test_date_diff(self):
},
)
+ self.validate_identity(
+ "SELECT DATEADD(DAY, DATEDIFF(DAY, -3, GETDATE()), '08:00:00')",
+ "SELECT DATEADD(DAY, DATEDIFF(DAY, CAST('1899-12-29' AS DATETIME2), CAST(GETDATE() AS DATETIME2)), '08:00:00')",
+ )
+
def test_lateral_subquery(self):
self.validate_all(
"SELECT x.a, x.b, t.v, t.y FROM x CROSS APPLY (SELECT v, y FROM t) t(v, y)",
| TSQL fails on parsing DATEDIFF function with literals
Hello,
I just discovered that this code generates an error while parsing DATEDIFF second argument:
```
import sqlglot
statement = """
SELECT DATEADD(DAY, DATEDIFF(DAY, -3, CREATION_TIME_NEW), '08:00:00')
FROM y
"""
ast = sqlglot.parse_one(statement, 'tsql')
print(ast.sql('tsql'))
```
Error:
```
Python312\Lib\site-packages\sqlglot\dialects\tsql.py:235, in _build_date_delta.<locals>._builder(args)
232 if start_date and start_date.is_number:
233 # Numeric types are valid DATETIME values
234 if start_date.is_int:
--> 235 adds = DEFAULT_START_DATE + datetime.timedelta(days=int(start_date.this))
236 start_date = exp.Literal.string(adds.strftime("%F"))
237 else:
238 # We currently don't handle float values, i.e. they're not converted to equivalent DATETIMEs.
239 # This is not a problem when generating T-SQL code, it is when transpiling to other dialects.
TypeError: int() argument must be a string, a bytes-like object or a real number, not 'Literal'
```
start_date at this point is:
```
Neg(
this=Literal(this=3, is_string=False))
```
So `int(start_date.this)` fails.
| 1,734,354,762,000 | [] | Bug Report | [
"sqlglot/dialects/tsql.py:_build_date_delta"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4519 | e15cd0be1c66e0e72d9815575fa9b210e66cf7c9 | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index fbbc8f14d4..48b59ecfaa 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4620,14 +4620,14 @@ def _parse_interval(self, match_interval: bool = True) -> t.Optional[exp.Add | e
this = exp.Literal.string(this.to_py())
elif this and this.is_string:
parts = exp.INTERVAL_STRING_RE.findall(this.name)
- if len(parts) == 1:
- if unit:
- # Unconsume the eagerly-parsed unit, since the real unit was part of the string
- self._retreat(self._index - 1)
+ if parts and unit:
+ # Unconsume the eagerly-parsed unit, since the real unit was part of the string
+ unit = None
+ self._retreat(self._index - 1)
+ if len(parts) == 1:
this = exp.Literal.string(parts[0][0])
unit = self.expression(exp.Var, this=parts[0][1].upper())
-
if self.INTERVAL_SPANS and self._match_text_seq("TO"):
unit = self.expression(
exp.IntervalSpan, this=unit, expression=self._parse_var(any_token=True, upper=True)
| diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 204151ed38..acdb2d4a83 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -71,6 +71,9 @@ def test_postgres(self):
self.validate_identity("EXEC AS myfunc @id = 123", check_command_warning=True)
self.validate_identity("SELECT CURRENT_USER")
self.validate_identity("SELECT * FROM ONLY t1")
+ self.validate_identity(
+ "SELECT * FROM t WHERE some_column >= CURRENT_DATE + INTERVAL '1 day 1 hour' AND some_another_column IS TRUE"
+ )
self.validate_identity(
"""UPDATE "x" SET "y" = CAST('0 days 60.000000 seconds' AS INTERVAL) WHERE "x"."id" IN (2, 3)"""
)
| interval parsing for postgresql
Hi! I have some problems with parsing postgresql statements that has more that one date units in intervals...
lib version 25.34.0
```python3
import sqlglot
sql = """
select *
from table
where
some_column >= (current_date + interval '1 day 1 hour')
and some_another_column is True;
"""
parsed = sqlglot.parse_one(sql, dialect="postgres")
```
This works fine and we have
```
expression=Paren(
this=Add(
this=CurrentDate(),
expression=Interval(this=Literal(this=1 day 1 hour, is_string=True))
)
)
```
But if we drop brackets in where statement
```sql
select *
from table
where
some_column >= current_date + interval '1 day 1 hour'
and some_another_column is True;
```
parser throw exception `sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 6, Col: 24.`
```
expression=Add(
this=CurrentDate(),
expression=Interval(
this=Literal(this=1 day 1 hour, is_string=True),
unit=Var(this=AND)
)
)
```
**Official Documentation**
https://www.postgresql.org/docs/current/functions-datetime.html
| Shall I check this?
One more example that has the expected result, but strange (for postgres) syntax
```sql
select *
from table
where
some_column >= current_date + (interval '1 day 1 hour')
and some_another_column is True;
```
@ankur334 Feel free to take a stab at it if that's what you mean, thank you!
Sure @georgesittas and @VaggelisD I am looking into this. I hope to solve it. I will inform the team if I get stuck somewhere
Really appreciate it @ankur334, don't hesitate to ping us on slack for quicker iterations. | 1,734,102,016,000 | [] | Bug Report | [
"sqlglot/parser.py:Parser._parse_interval"
] | [] | 1 |
tobymao/sqlglot | tobymao__sqlglot-4515 | 5d3ee4cac1c5c9e45cbf6263c32c87fda78f9854 | diff --git a/sqlglot/optimizer/qualify_columns.py b/sqlglot/optimizer/qualify_columns.py
index 4880a615c6..ca5684407c 100644
--- a/sqlglot/optimizer/qualify_columns.py
+++ b/sqlglot/optimizer/qualify_columns.py
@@ -75,7 +75,7 @@ def qualify_columns(
if not schema.empty and expand_alias_refs:
_expand_alias_refs(scope, resolver)
- if not isinstance(scope.expression, exp.UDTF):
+ if isinstance(scope.expression, exp.Select):
if expand_stars:
_expand_stars(
scope,
| diff --git a/tests/fixtures/optimizer/qualify_columns.sql b/tests/fixtures/optimizer/qualify_columns.sql
index 2640145bed..735c71a5e7 100644
--- a/tests/fixtures/optimizer/qualify_columns.sql
+++ b/tests/fixtures/optimizer/qualify_columns.sql
@@ -190,6 +190,10 @@ SELECT x._col_0 AS _col_0, x._col_1 AS _col_1 FROM (VALUES (1, 2)) AS x(_col_0,
SELECT SOME_UDF(data).* FROM t;
SELECT SOME_UDF(t.data).* FROM t AS t;
+# execute: false
+SELECT p.* FROM p UNION ALL SELECT p2.* FROM p2;
+SELECT p.* FROM p AS p UNION ALL SELECT p2.* FROM p2 AS p2;
+
# execute: false
# allow_partial_qualification: true
# validate_qualify_columns: false
| `sqlglot.optimizer.qualify_columns.qualify_columns` fails on `table.*` inside `UNION ALL`
### Explanation
When using `qualify_columns` on a SQL query that contains a `UNION ALL` and one of the statement uses the `SELECT table.*` notation, `qualify_columns` raises an `sqlglot.errors.OptimizeError: Unknown table: p` error.
### Fully reproducible code snippet
```py
from sqlglot import parse_one
from sqlglot.schema import MappingSchema
from sqlglot.optimizer.qualify_columns import qualify_columns
SQL_DIALECT = "snowflake"
print(qualify_columns(parse_one("""
SELECT p.*
FROM p
UNION ALL
SELECT p2.*
FROM p2
""", dialect="snowflake"), schema=MappingSchema(dialect=SQL_DIALECT)).sql(pretty=True, dialect=SQL_DIALECT))
```
Running this snippet raises an error:
```
Traceback (most recent call last):
File "test_sqlglot.py", line 7, in <module>
print(qualify_columns(parse_one("""
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "sqlglot/optimizer/qualify_columns.py", line 81, in qualify_columns
_expand_stars(
File "sqlglot/optimizer/qualify_columns.py", line 623, in _expand_stars
raise OptimizeError(f"Unknown table: {table}")
sqlglot.errors.OptimizeError: Unknown table: p
```
Looks like, inside `_expand_stars`, `scope.sources` is an empty list. In that exact case, `scope` represents the entire query (with both SELECT and the UNION ALL in between), and `scope_type` is ROOT (not UNION).
Again, happy to make a PR.
| 1,734,017,130,000 | [] | Bug Report | [
"sqlglot/optimizer/qualify_columns.py:qualify_columns"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4513 | 5d3ee4cac1c5c9e45cbf6263c32c87fda78f9854 | diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 39975d7d57..150cbe1af4 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -6665,6 +6665,11 @@ class Week(Func):
arg_types = {"this": True, "mode": False}
+class XMLElement(Func):
+ _sql_names = ["XMLELEMENT"]
+ arg_types = {"this": True, "expressions": False}
+
+
class XMLTable(Func):
arg_types = {"this": True, "passing": False, "columns": False, "by_ref": False}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 635a5f5fa7..5ba3f57ce0 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -4612,3 +4612,7 @@ def includeproperty_sql(self, expression: exp.IncludeProperty) -> str:
include = f"{include} AS {alias}"
return include
+
+ def xmlelement_sql(self, expression: exp.XMLElement) -> str:
+ name = f"NAME {self.sql(expression, 'this')}"
+ return self.func("XMLELEMENT", name, *expression.expressions)
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 4cd3d30560..fbbc8f14d4 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1143,6 +1143,11 @@ class Parser(metaclass=_Parser):
"TRIM": lambda self: self._parse_trim(),
"TRY_CAST": lambda self: self._parse_cast(False, safe=True),
"TRY_CONVERT": lambda self: self._parse_convert(False, safe=True),
+ "XMLELEMENT": lambda self: self.expression(
+ exp.XMLElement,
+ this=self._match_text_seq("NAME") and self._parse_id_var(),
+ expressions=self._match(TokenType.COMMA) and self._parse_csv(self._parse_expression),
+ ),
}
QUERY_MODIFIER_PARSERS = {
@@ -5351,18 +5356,21 @@ def _parse_function_call(
functions = self.FUNCTIONS
function = functions.get(upper)
+ known_function = function and not anonymous
- alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS
+ alias = not known_function or upper in self.FUNCTIONS_WITH_ALIASED_ARGS
args = self._parse_csv(lambda: self._parse_lambda(alias=alias))
- if alias:
+ if alias and known_function:
args = self._kv_to_prop_eq(args)
- if function and not anonymous:
- if "dialect" in function.__code__.co_varnames:
- func = function(args, dialect=self.dialect)
+ if known_function:
+ func_builder = t.cast(t.Callable, function)
+
+ if "dialect" in func_builder.__code__.co_varnames:
+ func = func_builder(args, dialect=self.dialect)
else:
- func = function(args)
+ func = func_builder(args)
func = self.validate_expression(func, args)
if self.dialect.PRESERVE_ORIGINAL_NAMES:
@@ -6730,7 +6738,9 @@ def _parse_expressions(self) -> t.List[exp.Expression]:
def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]:
return self._parse_select() or self._parse_set_operations(
- self._parse_expression() if alias else self._parse_assignment()
+ self._parse_alias(self._parse_assignment(), explicit=True)
+ if alias
+ else self._parse_assignment()
)
def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
| diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 66ded239a3..204151ed38 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -1289,3 +1289,17 @@ def test_array_length(self):
"clickhouse": UnsupportedError,
},
)
+
+ def test_xmlelement(self):
+ self.validate_identity("SELECT XMLELEMENT(NAME foo)")
+ self.validate_identity("SELECT XMLELEMENT(NAME foo, XMLATTRIBUTES('xyz' AS bar))")
+ self.validate_identity("SELECT XMLELEMENT(NAME test, XMLATTRIBUTES(a, b)) FROM test")
+ self.validate_identity(
+ "SELECT XMLELEMENT(NAME foo, XMLATTRIBUTES(CURRENT_DATE AS bar), 'cont', 'ent')"
+ )
+ self.validate_identity(
+ """SELECT XMLELEMENT(NAME "foo$bar", XMLATTRIBUTES('xyz' AS "a&b"))"""
+ )
+ self.validate_identity(
+ "SELECT XMLELEMENT(NAME foo, XMLATTRIBUTES('xyz' AS bar), XMLELEMENT(NAME abc), XMLCOMMENT('test'), XMLELEMENT(NAME xyz))"
+ )
| [Postgres] Parse error for XMLElement
Postgres XMLElement returns parse error:
```
SELECT xmlelement(name foo);
# > <foo/>
```
**Fully reproducible code snippet**
```
from sqlglot import parse_one
from sqlglot.optimizer.qualify import qualify
qualify(parse_one(sql = """
SELECT xmlelement(name "foo")
""", dialect="postgres"), schema=None)
# > sqlglot.errors.ParseError: Expecting ). Line 2, Col: 29.
```
**Official Documentation**
https://www.postgresql.org/docs/current/functions-xml.html#FUNCTIONS-PRODUCING-XML-XMLELEMENT
| 1,734,016,226,000 | [] | Bug Report | [
"sqlglot/parser.py:Parser._parse_function_call",
"sqlglot/parser.py:Parser._parse_select_or_expression"
] | [
"sqlglot/generator.py:Generator.xmlelement_sql"
] | 2 |
|
tobymao/sqlglot | tobymao__sqlglot-4480 | 41c6d24c99e130b3c8e35e348a25a59e9e3d5553 | diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 842df5a753..9aee03c1ca 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2399,18 +2399,21 @@ def ordered_sql(self, expression: exp.Ordered) -> str:
f"'{nulls_sort_change.strip()}' translation not supported in window functions"
)
nulls_sort_change = ""
- elif (
- self.NULL_ORDERING_SUPPORTED is False
- and (isinstance(expression.find_ancestor(exp.AggFunc, exp.Select), exp.AggFunc))
- and (
- (asc and nulls_sort_change == " NULLS LAST")
- or (desc and nulls_sort_change == " NULLS FIRST")
- )
+ elif self.NULL_ORDERING_SUPPORTED is False and (
+ (asc and nulls_sort_change == " NULLS LAST")
+ or (desc and nulls_sort_change == " NULLS FIRST")
):
- self.unsupported(
- f"'{nulls_sort_change.strip()}' translation not supported for aggregate functions with {sort_order} sort order"
- )
- nulls_sort_change = ""
+ # BigQuery does not allow these ordering/nulls combinations when used under
+ # an aggregation func or under a window containing one
+ ancestor = expression.find_ancestor(exp.AggFunc, exp.Window, exp.Select)
+
+ if isinstance(ancestor, exp.Window):
+ ancestor = ancestor.this
+ if isinstance(ancestor, exp.AggFunc):
+ self.unsupported(
+ f"'{nulls_sort_change.strip()}' translation not supported for aggregate functions with {sort_order} sort order"
+ )
+ nulls_sort_change = ""
elif self.NULL_ORDERING_SUPPORTED is None:
if expression.this.is_int:
self.unsupported(
| diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 80a5dcc387..ec16dba243 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -2131,6 +2131,16 @@ def test_null_ordering(self):
},
)
+ self.validate_all(
+ f"SELECT SUM(f1) OVER (ORDER BY f2 {sort_order}) FROM t",
+ read={
+ "": f"SELECT SUM(f1) OVER (ORDER BY f2 {sort_order} {null_order}) FROM t",
+ },
+ write={
+ "bigquery": f"SELECT SUM(f1) OVER (ORDER BY f2 {sort_order}) FROM t",
+ },
+ )
+
def test_json_extract(self):
self.validate_all(
"""SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 13caf1b71f..a9d6330228 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -79,7 +79,7 @@ def test_duckdb(self):
self.validate_all(
"SELECT SUM(X) OVER (ORDER BY x)",
write={
- "bigquery": "SELECT SUM(X) OVER (ORDER BY x NULLS LAST)",
+ "bigquery": "SELECT SUM(X) OVER (ORDER BY x)",
"duckdb": "SELECT SUM(X) OVER (ORDER BY x)",
"mysql": "SELECT SUM(X) OVER (ORDER BY CASE WHEN x IS NULL THEN 1 ELSE 0 END, x)",
},
| Wrong result for transpiling `OVER (ORDER BY .. )` from `trino` to `bigquery`
**Fully reproducible code snippet**
It can be reproduced by the following code:
```python
import sqlglot
sql = """
WITH t1 as (
select 1 f1, 2 f2 union all select 2 f1, 4 f2 union all select 3 f1, 6 f2
)
select sum(f1) over (order by f2) from t1;
"""
result = sqlglot.transpile(sql, read="trino", write="bigquery")[0]
print(result)
# the output is
# WITH t1 AS (SELECT 1 AS f1, 2 AS f2 UNION ALL SELECT 2 AS f1, 4 AS f2 UNION ALL SELECT 3 AS f1, 6 AS f2)
# SELECT SUM(f1) OVER (ORDER BY f2 NULLS LAST) FROM t1
```
This SQL isn't valid for BigQuery. It will get the error message from BigQuery:
```
NULLS LAST not supported with ascending sort order in RANGE clauses of analytic functions.
```
It seems that `NULLS LAST` shouldn't be added in the `OVER` clause.
If I don't assign the `read` dialect, it works well.
```python
import sqlglot
sql = """
WITH t1 as (
select 1 f1, 2 f2 union all select 2 f1, 4 f2 union all select 3 f1, 6 f2
)
select sum(f1) over (order by f2) from t1;
"""
result = sqlglot.transpile(sql, read=None, write="bigquery")[0]
print(result)
# WITH t1 AS (SELECT 1 AS f1, 2 AS f2 UNION ALL SELECT 2 AS f1, 4 AS f2 UNION ALL SELECT 3 AS f1, 6 AS f2)
# SELECT SUM(f1) OVER (ORDER BY f2) FROM t1
```
**Official Documentation**
[Window function syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls#syntax)
| 1,733,395,877,000 | [] | Bug Report | [
"sqlglot/generator.py:Generator.ordered_sql"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4448 | 74dc39ba8649fd8292c97c82088b39b08f531702 | diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index 5b12f56c0c..eca2bf592a 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -411,6 +411,9 @@ class Dialect(metaclass=_Dialect):
is cast to x's type to match it instead.
"""
+ SUPPORTS_VALUES_DEFAULT = True
+ """Whether the DEFAULT keyword is supported in the VALUES clause."""
+
REGEXP_EXTRACT_DEFAULT_GROUP = 0
"""The default value for the capturing group."""
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index ec702a097c..056d974c27 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -198,6 +198,7 @@ class Presto(Dialect):
TYPED_DIVISION = True
TABLESAMPLE_SIZE_IS_PERCENT = True
LOG_BASE_FIRST: t.Optional[bool] = None
+ SUPPORTS_VALUES_DEFAULT = False
TIME_MAPPING = MySQL.TIME_MAPPING
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 2a477d9b90..12419e8c21 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2946,8 +2946,13 @@ def _parse_partition(self) -> t.Optional[exp.Partition]:
)
def _parse_value(self) -> t.Optional[exp.Tuple]:
+ def _parse_value_expression() -> t.Optional[exp.Expression]:
+ if self.dialect.SUPPORTS_VALUES_DEFAULT and self._match(TokenType.DEFAULT):
+ return exp.var(self._prev.text.upper())
+ return self._parse_expression()
+
if self._match(TokenType.L_PAREN):
- expressions = self._parse_csv(self._parse_expression)
+ expressions = self._parse_csv(_parse_value_expression)
self._match_r_paren()
return self.expression(exp.Tuple, expressions=expressions)
| diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index fd6b36f1be..9e5b74e0bb 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -118,6 +118,13 @@ def test_ddl(self):
"CREATE TABLE `foo` (a VARCHAR(10), INDEX idx_a (a DESC))",
)
+ self.validate_all(
+ "insert into t(i) values (default)",
+ write={
+ "duckdb": "INSERT INTO t (i) VALUES (DEFAULT)",
+ "mysql": "INSERT INTO t (i) VALUES (DEFAULT)",
+ },
+ )
self.validate_all(
"CREATE TABLE t (id INT UNSIGNED)",
write={
| “Default” is a keyword, but it was converted into a string value after translation.
```
Python 3.13.0 | packaged by Anaconda, Inc. | (main, Oct 7 2024, 16:25:56) [Clang 14.0.6 ] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
>>> sqlglot.transpile("insert into t(i) values (default)", read="mysql", write="duckdb")[0]
'INSERT INTO t (i) VALUES ("default")'
```
“Default” is a keyword, but it was converted into a string value after translation.
| 1,732,633,123,000 | [] | Bug Report | [
"sqlglot/parser.py:Parser._parse_value"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4447 | 954d8fd12740071e0951d1df3a405a4b9634868d | diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 5eb02a1492..2f3ac53efb 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -40,6 +40,11 @@
logger = logging.getLogger("sqlglot")
+JSON_EXTRACT_TYPE = t.Union[exp.JSONExtract, exp.JSONExtractScalar, exp.JSONExtractArray]
+
+DQUOTES_ESCAPING_JSON_FUNCTIONS = ("JSON_QUERY", "JSON_VALUE", "JSON_QUERY_ARRAY")
+
+
def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
if not expression.find_ancestor(exp.From, exp.Join):
return self.values_sql(expression)
@@ -324,6 +329,23 @@ def _build_contains_substring(args: t.List) -> exp.Contains | exp.Anonymous:
return exp.Contains(this=this, expression=expr)
+def _json_extract_sql(self: BigQuery.Generator, expression: JSON_EXTRACT_TYPE) -> str:
+ name = (expression._meta and expression.meta.get("name")) or expression.sql_name()
+ upper = name.upper()
+
+ dquote_escaping = upper in DQUOTES_ESCAPING_JSON_FUNCTIONS
+
+ if dquote_escaping:
+ self._quote_json_path_key_using_brackets = False
+
+ sql = rename_func(upper)(self, expression)
+
+ if dquote_escaping:
+ self._quote_json_path_key_using_brackets = True
+
+ return sql
+
+
class BigQuery(Dialect):
WEEK_OFFSET = -1
UNNEST_COLUMN_ONLY = True
@@ -869,6 +891,9 @@ class Generator(generator.Generator):
exp.ILike: no_ilike_sql,
exp.IntDiv: rename_func("DIV"),
exp.Int64: rename_func("INT64"),
+ exp.JSONExtract: _json_extract_sql,
+ exp.JSONExtractArray: _json_extract_sql,
+ exp.JSONExtractScalar: _json_extract_sql,
exp.JSONFormat: rename_func("TO_JSON_STRING"),
exp.Levenshtein: _levenshtein_sql,
exp.Max: max_or_greatest,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 5e8e1dccba..842df5a753 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -658,6 +658,7 @@ class Generator(metaclass=_Generator):
"_next_name",
"_identifier_start",
"_identifier_end",
+ "_quote_json_path_key_using_brackets",
)
def __init__(
@@ -706,6 +707,8 @@ def __init__(
self._identifier_start = self.dialect.IDENTIFIER_START
self._identifier_end = self.dialect.IDENTIFIER_END
+ self._quote_json_path_key_using_brackets = True
+
def generate(self, expression: exp.Expression, copy: bool = True) -> str:
"""
Generates the SQL string corresponding to the given syntax tree.
@@ -2871,7 +2874,7 @@ def json_path_part(self, expression: int | str | exp.JSONPathPart) -> str:
if isinstance(expression, int):
return str(expression)
- if self.JSON_PATH_SINGLE_QUOTE_ESCAPE:
+ if self._quote_json_path_key_using_brackets and self.JSON_PATH_SINGLE_QUOTE_ESCAPE:
escaped = expression.replace("'", "\\'")
escaped = f"\\'{expression}\\'"
else:
@@ -4072,7 +4075,11 @@ def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
return f".{this}"
this = self.json_path_part(this)
- return f"[{this}]" if self.JSON_PATH_BRACKETED_KEY_SUPPORTED else f".{this}"
+ return (
+ f"[{this}]"
+ if self._quote_json_path_key_using_brackets and self.JSON_PATH_BRACKETED_KEY_SUPPORTED
+ else f".{this}"
+ )
def _jsonpathsubscript_sql(self, expression: exp.JSONPathSubscript) -> str:
this = self.json_path_part(expression.this)
diff --git a/sqlglot/jsonpath.py b/sqlglot/jsonpath.py
index 911debe4b5..115bd15941 100644
--- a/sqlglot/jsonpath.py
+++ b/sqlglot/jsonpath.py
@@ -146,6 +146,28 @@ def _parse_bracket() -> exp.JSONPathPart:
return node
+ def _parse_var_text() -> str:
+ """
+ Consumes & returns the text for a var. In BigQuery it's valid to have a key with spaces
+ in it, e.g JSON_QUERY(..., '$. a b c ') should produce a single JSONPathKey(' a b c ').
+ This is done by merging "consecutive" vars until a key separator is found (dot, colon etc)
+ or the path string is exhausted.
+ """
+ prev_index = i - 2
+
+ while _match(TokenType.VAR):
+ pass
+
+ start = 0 if prev_index < 0 else tokens[prev_index].end + 1
+
+ if i >= len(tokens):
+ # This key is the last token for the path, so it's text is the remaining path
+ text = path[start:]
+ else:
+ text = path[start : tokens[i].start]
+
+ return text
+
# We canonicalize the JSON path AST so that it always starts with a
# "root" element, so paths like "field" will be generated as "$.field"
_match(TokenType.DOLLAR)
@@ -155,8 +177,10 @@ def _parse_bracket() -> exp.JSONPathPart:
if _match(TokenType.DOT) or _match(TokenType.COLON):
recursive = _prev().text == ".."
- if _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
- value: t.Optional[str | exp.JSONPathWildcard] = _prev().text
+ if _match(TokenType.VAR):
+ value: t.Optional[str | exp.JSONPathWildcard] = _parse_var_text()
+ elif _match(TokenType.IDENTIFIER):
+ value = _prev().text
elif _match(TokenType.STAR):
value = exp.JSONPathWildcard()
else:
@@ -170,7 +194,9 @@ def _parse_bracket() -> exp.JSONPathPart:
raise ParseError(_error("Expected key name or * after DOT"))
elif _match(TokenType.L_BRACKET):
expressions.append(_parse_bracket())
- elif _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
+ elif _match(TokenType.VAR):
+ expressions.append(exp.JSONPathKey(this=_parse_var_text()))
+ elif _match(TokenType.IDENTIFIER):
expressions.append(exp.JSONPathKey(this=_prev().text))
elif _match(TokenType.STAR):
expressions.append(exp.JSONPathWildcard())
| diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 366cadedbe..26b12a15fa 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -1574,14 +1574,6 @@ def test_bigquery(self):
"snowflake": "IFF((y) <> 0, (x) / (y), NULL)",
},
)
- self.validate_all(
- """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
- write={
- "bigquery": """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
- "duckdb": """SELECT '{"class": {"students": []}}' -> '$.class'""",
- "snowflake": """SELECT GET_PATH(PARSE_JSON('{"class": {"students": []}}'), 'class')""",
- },
- )
self.validate_all(
"""SELECT JSON_VALUE_ARRAY('{"arr": [1, "a"]}', '$.arr')""",
write={
@@ -2139,7 +2131,16 @@ def test_null_ordering(self):
},
)
- def test_json_extract_scalar(self):
+ def test_json_extract(self):
+ self.validate_all(
+ """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
+ write={
+ "bigquery": """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
+ "duckdb": """SELECT '{"class": {"students": []}}' -> '$.class'""",
+ "snowflake": """SELECT GET_PATH(PARSE_JSON('{"class": {"students": []}}'), 'class')""",
+ },
+ )
+
for func in ("JSON_EXTRACT_SCALAR", "JSON_VALUE"):
with self.subTest(f"Testing BigQuery's {func}"):
self.validate_all(
@@ -2164,6 +2165,18 @@ def test_json_extract_scalar(self):
self.parse_one(sql).sql("bigquery", normalize_functions="upper"), sql
)
+ # Test double quote escaping
+ for func in ("JSON_VALUE", "JSON_QUERY", "JSON_QUERY_ARRAY"):
+ self.validate_identity(
+ f"{func}(doc, '$. a b c .d')", f"""{func}(doc, '$." a b c ".d')"""
+ )
+
+ # Test single quote & bracket escaping
+ for func in ("JSON_EXTRACT", "JSON_EXTRACT_SCALAR", "JSON_EXTRACT_ARRAY"):
+ self.validate_identity(
+ f"{func}(doc, '$. a b c .d')", f"""{func}(doc, '$[\\' a b c \\'].d')"""
+ )
+
def test_json_extract_array(self):
for func in ("JSON_QUERY_ARRAY", "JSON_EXTRACT_ARRAY"):
with self.subTest(f"Testing BigQuery's {func}"):
| bigquery: json_value transpilation gives incorrect result or fails due to escaping
noticed via sqlmesh, which recently updated `sqlglot (25.19.0 -> 25.31.4)`.
We have an expression like `select json_value('{"fu bar": 42}', "$.fu bar")` which runs correctly on bigquery yielding 42. Previously this was transpiled unchanged, but now results in an incorrect selection (note the added `.`s):
```
>>> sqlglot.__version__
'25.31.4'
>>> print(sqlglot.transpile("""select json_value('{"fu bar": 42}', "$.fu bar")""", read="bigquery", write="bigquery")[0])
SELECT json_value('{"fu bar": 42}', '$.fu.bar')
```
This query now yields `null` instead of 42 in bigquery.
The space-containing-key can be escaped in bigquery with double-quotes for [json_value](https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_value) which is apparently different from the bracketed escaping used by [json_extract](https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_extract)... 🤯 So this also works successfully in bigquery: `SELECT json_value('{"fu bar": 42}', '$."fu bar"')`.
But it now gets transpiled to sql which throws an error, namely `Invalid token in JSONPath at: ['fu bar']`:
```
>>> print(sqlglot.transpile("""SELECT json_value('{"fu bar": 42}', '$."fu bar"')""", read="bigquery", write="bigquery")[0])
SELECT json_value('{"fu bar": 42}', '$[\'fu bar\']')
```
a workaround is apparently to use the deprecated json_extract[_scalar] functions. i.e. the query `SELECT json_extract('{"fu bar": 42}', '$[\'fu bar\']')` works correctly and is transpiled unchanged:
```
>>> print(sqlglot.transpile("""SELECT json_extract('{"fu bar": 42}', '$[\\'fu bar\\']')""", read="bigquery", write="bigquery")[0])
SELECT json_extract('{"fu bar": 42}', '$[\'fu bar\']')
```
| this is tricky, there are a couple of things we should do @VaggelisD / @georgesittas
1. make it so json tokenizer is sqlglot only, we can do that with a new constant USE_RS_TOKENIZER = USE_RS_TOKENIZER inside the tokenizer
2. Override _scan_var so that it doesn't do a strip() on the peek so it can accept spaces
3. modify _scan so that it doesn't strip white spaces
this is legal in bigquery... json_value('...', '$. a b c '), notice the leading and trailing spaces
we also need to override the escaping rules. for json_value, it uses double quotes to escape, and for json_extract it uses [ to escape. | 1,732,614,209,000 | [] | Bug Report | [
"sqlglot/generator.py:Generator.__init__",
"sqlglot/generator.py:Generator.json_path_part",
"sqlglot/generator.py:Generator._jsonpathkey_sql",
"sqlglot/jsonpath.py:parse"
] | [
"sqlglot/dialects/bigquery.py:_json_extract_sql"
] | 4 |
tobymao/sqlglot | tobymao__sqlglot-4434 | 38e2e19ac3e20224dc07128994a47340aa56e635 | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 1d2b246e5d..350f7773b5 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -465,6 +465,12 @@ class Parser(parser.Parser):
PROPERTY_PARSERS = {
**parser.Parser.PROPERTY_PARSERS,
"LOCATION": lambda self: self._parse_location_property(),
+ "TAG": lambda self: self._parse_dict_property(
+ this="TAG",
+ has_kind=False,
+ separator=(TokenType.COMMA, ","),
+ delimiter=(TokenType.EQ, "="),
+ ),
}
TYPE_CONVERTERS = {
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index a69e8362f9..de88fef6b8 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2778,10 +2778,11 @@ class ClusteredByProperty(Property):
class DictProperty(Property):
- arg_types = {"this": True, "kind": True, "settings": False}
+ arg_types = {"this": True, "kind": False, "settings": False, "separator": False}
class DictSubProperty(Property):
+ arg_types = {"this": True, "value": True, "delimiter": False}
pass
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index ae9dc35e49..c20e5ed222 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -3720,9 +3720,16 @@ def tonumber_sql(self, expression: exp.ToNumber) -> str:
def dictproperty_sql(self, expression: exp.DictProperty) -> str:
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
- settings_sql = self.expressions(expression, key="settings", sep=" ")
- args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
- return f"{this}({kind}{args})"
+ separator = self.sql(expression, "separator")
+ settings_sql = self.expressions(expression, key="settings", sep=f"{separator} ")
+ if kind:
+ settings_section = (
+ f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
+ )
+ args = f"{kind}{settings_section}"
+ else:
+ args = f"{self.sep('')}{settings_sql}{self.sep(sep='')}"
+ return f"{this} ({args})"
def dictrange_sql(self, expression: exp.DictRange) -> str:
this = self.sql(expression, "this")
@@ -3731,7 +3738,8 @@ def dictrange_sql(self, expression: exp.DictRange) -> str:
return f"{this}(MIN {min} MAX {max})"
def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str:
- return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}"
+ delimiter = self.sql(expression, "delimiter") or " "
+ return f"{self.sql(expression, 'this')}{delimiter}{self.sql(expression, 'value')}"
def duplicatekeyproperty_sql(self, expression: exp.DuplicateKeyProperty) -> str:
return f"DUPLICATE KEY ({self.expressions(expression, flat=True)})"
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index cbbeffe5e1..860425825c 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -936,7 +936,7 @@ class Parser(metaclass=_Parser):
"INPUT": lambda self: self.expression(exp.InputModelProperty, this=self._parse_schema()),
"JOURNAL": lambda self, **kwargs: self._parse_journal(**kwargs),
"LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty),
- "LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT"),
+ "LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT", has_kind=True),
"LIFETIME": lambda self: self._parse_dict_range(this="LIFETIME"),
"LIKE": lambda self: self._parse_create_like(),
"LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
@@ -973,7 +973,7 @@ class Parser(metaclass=_Parser):
"SETTINGS": lambda self: self._parse_settings_property(),
"SHARING": lambda self: self._parse_property_assignment(exp.SharingProperty),
"SORTKEY": lambda self: self._parse_sortkey(),
- "SOURCE": lambda self: self._parse_dict_property(this="SOURCE"),
+ "SOURCE": lambda self: self._parse_dict_property(this="SOURCE", has_kind=True),
"STABLE": lambda self: self.expression(
exp.StabilityProperty, this=exp.Literal.string("STABLE")
),
@@ -7108,21 +7108,39 @@ def _parse_as_command(self, start: Token) -> exp.Command:
self._warn_unsupported()
return exp.Command(this=text[:size], expression=text[size:])
- def _parse_dict_property(self, this: str) -> exp.DictProperty:
+ def _parse_dict_property(
+ self,
+ this: str,
+ has_kind: bool,
+ separator: t.Optional[t.Tuple[TokenType, str]] = None,
+ delimiter: t.Optional[t.Tuple[TokenType, str]] = None,
+ ) -> exp.DictProperty:
+ """
+ Parses a dictionary property, which is a key-value pair enclosed in parentheses.
+
+ Args:
+ this: The name of the property.
+ has_kind: Whether the property is labeled, e.g. is of the form `PROPERTY (KIND (k1=v1, k2=v2, ...))`.
+ separator: The separator token between key-value pairs, and its string representation.
+ delimiter: The delimiter token between key and value, and its string representation.
+ """
+ separator_token = None
+ separator_str = None
+ if separator:
+ separator_token, separator_str = separator
+
settings = []
self._match_l_paren()
- kind = self._parse_id_var()
- if self._match(TokenType.L_PAREN):
- while True:
- key = self._parse_id_var()
- value = self._parse_primary()
-
- if not key and value is None:
- break
- settings.append(self.expression(exp.DictSubProperty, this=key, value=value))
- self._match(TokenType.R_PAREN)
+ if has_kind:
+ kind = self._parse_id_var()
+ if self._match(TokenType.L_PAREN):
+ settings = self._parse_key_value_list(this, separator_token, delimiter)
+ self._match(TokenType.R_PAREN)
+ else:
+ kind = None
+ settings = self._parse_key_value_list(this, separator_token, delimiter)
self._match_r_paren()
@@ -7131,8 +7149,39 @@ def _parse_dict_property(self, this: str) -> exp.DictProperty:
this=this,
kind=kind.this if kind else None,
settings=settings,
+ separator=separator_str,
)
+ def _parse_key_value_list(
+ self,
+ this: str,
+ separator_token: t.Optional[TokenType],
+ delimiter: t.Optional[t.Tuple[TokenType, str]],
+ ) -> t.List[exp.DictSubProperty]:
+ delimiter_token = None
+ delimiter_str = None
+ if delimiter:
+ delimiter_token, delimiter_str = delimiter
+
+ lst = []
+
+ while True:
+ key = self._parse_id_var()
+ if delimiter_token:
+ self._match(delimiter_token)
+ value = self._parse_primary()
+
+ if not key and value is None:
+ break
+ lst.append(
+ self.expression(exp.DictSubProperty, this=key, value=value, delimiter=delimiter_str)
+ )
+
+ if separator_token:
+ self._match(separator_token)
+
+ return lst
+
def _parse_dict_range(self, this: str) -> exp.DictRange:
self._match_l_paren()
has_min = self._match_text_seq("MIN")
| diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 19b3ce3934..65b12bbd58 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -974,11 +974,11 @@ def test_ddl(self):
amount Float64
)
PRIMARY KEY (id)
-SOURCE(CLICKHOUSE(
+SOURCE (CLICKHOUSE(
TABLE 'discounts'
))
LIFETIME(MIN 1 MAX 1000)
-LAYOUT(RANGE_HASHED(
+LAYOUT (RANGE_HASHED(
range_lookup_strategy 'max'
))
RANGE(MIN discount_start_date MAX discount_end_date)""",
@@ -1004,10 +1004,10 @@ def test_ddl(self):
cca2 String DEFAULT '??'
)
PRIMARY KEY (prefix)
-SOURCE(CLICKHOUSE(
+SOURCE (CLICKHOUSE(
TABLE 'my_ip_addresses'
))
-LAYOUT(IP_TRIE())
+LAYOUT (IP_TRIE())
LIFETIME(MIN 0 MAX 3600)""",
},
pretty=True,
@@ -1030,10 +1030,10 @@ def test_ddl(self):
name String
)
PRIMARY KEY (key)
-SOURCE(CLICKHOUSE(
+SOURCE (CLICKHOUSE(
TABLE 'polygons_test_table'
))
-LAYOUT(POLYGON(
+LAYOUT (POLYGON(
STORE_POLYGON_KEY_COLUMN 1
))
LIFETIME(MIN 0 MAX 0)""",
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 157947df46..8058bcf6de 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -1479,13 +1479,20 @@ def test_ddl(self):
"snowflake": "CREATE OR REPLACE TRANSIENT TABLE a (id INT)",
},
)
-
self.validate_all(
"CREATE TABLE a (b INT)",
read={"teradata": "CREATE MULTISET TABLE a (b INT)"},
write={"snowflake": "CREATE TABLE a (b INT)"},
)
+ self.validate_identity("CREATE TABLE a TAG (key1='value_1', key2='value_2')")
+ self.validate_all(
+ "CREATE TABLE a TAG (key1='value_1')",
+ read={
+ "snowflake": "CREATE TABLE a WITH TAG (key1='value_1')",
+ },
+ )
+
for action in ("SET", "DROP"):
with self.subTest(f"ALTER COLUMN {action} NOT NULL"):
self.validate_all(
| Support Snowflake WITH TAG syntax
**Is your feature request related to a problem? Please describe.**
This library does not support snowflake's WITH TAG syntax for setting tags while creating an object.
For example,
```
sqlglot.parse_one("CREATE VIEW my_view WITH TAG (my_tag = 'tag')", dialect="snowflake")
'CREATE VIEW my_view WITH TAG (my_tag = 'tag')' contains unsupported syntax. Falling back to parsing as a 'Command'.
> Command(this=CREATE, expression=VIEW my_view WITH TAG (my_tag = 'tag'))
```
Note that multiple tags can be specified in the same statement.
**Describe the solution you'd like**
Parse the above statement into something like:
```
Create(
this=Table(
this=Identifier(this=my_view, quoted=False)),
kind=VIEW,
actions=[tag=[
Paren(
this=EQ(
this=Column(
this=Identifier(this=my_tag, quoted=False)),
expression=Literal(this=tag, is_string=True)))])
]
)
```
This was pulled from an `ALTER VIEW ... SET TAG` statement, idk if this is actually the correct Create object. My primary desire is to avoid the Command parsing fallback.
**Additional context**
Snowflake CREATE TABLE reference: https://docs.snowflake.com/en/sql-reference/sql/create-table
Example CREATE WAREHOUSE statement: https://docs.snowflake.com/en/user-guide/object-tagging
There are similar `WITH ...` syntaxes in snowflake, e.g. `WITH ROW ACCESS POLICY` and `WITH AGGREGATION POLICY`, or `WITH MASKING POLICY` and `WITH PROJECTION POLICY` for columns. This feature request is not specifically asking for support for that syntax.
| This is low priority for us right now, but we'll be happy to accept a well-crafted & tested PR. Shouldn't be too hard to support. | 1,732,138,295,000 | [] | Feature Request | [
"sqlglot/generator.py:Generator.dictproperty_sql",
"sqlglot/generator.py:Generator.dictsubproperty_sql",
"sqlglot/parser.py:Parser._parse_dict_property"
] | [
"sqlglot/parser.py:Parser._parse_key_value_list"
] | 3 |
tobymao/sqlglot | tobymao__sqlglot-4431 | 69d4a8ccdf5954f293acbdf61c420b72dde5b8af | diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index 38a3723f24..2fbd4e6864 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -397,6 +397,13 @@ class Dialect(metaclass=_Dialect):
ARRAY_AGG_INCLUDES_NULLS: t.Optional[bool] = True
"""Whether ArrayAgg needs to filter NULL values."""
+ PROMOTE_TO_INFERRED_DATETIME_TYPE = False
+ """
+ This flag is used in the optimizer's canonicalize rule and determines whether x will be promoted
+ to the literal's type in x::DATE < '2020-01-01 12:05:03' (i.e., DATETIME). When false, the literal
+ is cast to x's type to match it instead.
+ """
+
REGEXP_EXTRACT_DEFAULT_GROUP = 0
"""The default value for the capturing group."""
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 1404f0fc32..ce266d5c11 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -145,6 +145,8 @@ def func(self: MySQL.Generator, expression: exp.Func) -> str:
class MySQL(Dialect):
+ PROMOTE_TO_INFERRED_DATETIME_TYPE = True
+
# https://dev.mysql.com/doc/refman/8.0/en/identifiers.html
IDENTIFIERS_CAN_START_WITH_DIGIT = True
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index a69e8362f9..3f771bc98d 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -7847,6 +7847,7 @@ def cast(
types_are_equivalent = type_mapping.get(
existing_cast_type, existing_cast_type.value
) == type_mapping.get(new_cast_type, new_cast_type.value)
+
if expr.is_type(data_type) or types_are_equivalent:
return expr
diff --git a/sqlglot/optimizer/canonicalize.py b/sqlglot/optimizer/canonicalize.py
index fd0026228b..b8a399505b 100644
--- a/sqlglot/optimizer/canonicalize.py
+++ b/sqlglot/optimizer/canonicalize.py
@@ -4,10 +4,12 @@
import typing as t
from sqlglot import exp
+from sqlglot.dialects.dialect import Dialect, DialectType
from sqlglot.helper import is_date_unit, is_iso_date, is_iso_datetime
+from sqlglot.optimizer.annotate_types import TypeAnnotator
-def canonicalize(expression: exp.Expression) -> exp.Expression:
+def canonicalize(expression: exp.Expression, dialect: DialectType = None) -> exp.Expression:
"""Converts a sql expression into a standard form.
This method relies on annotate_types because many of the
@@ -17,10 +19,12 @@ def canonicalize(expression: exp.Expression) -> exp.Expression:
expression: The expression to canonicalize.
"""
+ dialect = Dialect.get_or_raise(dialect)
+
def _canonicalize(expression: exp.Expression) -> exp.Expression:
expression = add_text_to_concat(expression)
expression = replace_date_funcs(expression)
- expression = coerce_type(expression)
+ expression = coerce_type(expression, dialect.PROMOTE_TO_INFERRED_DATETIME_TYPE)
expression = remove_redundant_casts(expression)
expression = ensure_bools(expression, _replace_int_predicate)
expression = remove_ascending_order(expression)
@@ -68,11 +72,11 @@ def replace_date_funcs(node: exp.Expression) -> exp.Expression:
)
-def coerce_type(node: exp.Expression) -> exp.Expression:
+def coerce_type(node: exp.Expression, promote_to_inferred_datetime_type: bool) -> exp.Expression:
if isinstance(node, COERCIBLE_DATE_OPS):
- _coerce_date(node.left, node.right)
+ _coerce_date(node.left, node.right, promote_to_inferred_datetime_type)
elif isinstance(node, exp.Between):
- _coerce_date(node.this, node.args["low"])
+ _coerce_date(node.this, node.args["low"], promote_to_inferred_datetime_type)
elif isinstance(node, exp.Extract) and not node.expression.type.is_type(
*exp.DataType.TEMPORAL_TYPES
):
@@ -128,17 +132,48 @@ def remove_ascending_order(expression: exp.Expression) -> exp.Expression:
return expression
-def _coerce_date(a: exp.Expression, b: exp.Expression) -> None:
+def _coerce_date(
+ a: exp.Expression,
+ b: exp.Expression,
+ promote_to_inferred_datetime_type: bool,
+) -> None:
for a, b in itertools.permutations([a, b]):
if isinstance(b, exp.Interval):
a = _coerce_timeunit_arg(a, b.unit)
+
+ a_type = a.type
if (
- a.type
- and a.type.this in exp.DataType.TEMPORAL_TYPES
- and b.type
- and b.type.this in exp.DataType.TEXT_TYPES
+ not a_type
+ or a_type.this not in exp.DataType.TEMPORAL_TYPES
+ or not b.type
+ or b.type.this not in exp.DataType.TEXT_TYPES
):
- _replace_cast(b, exp.DataType.Type.DATETIME)
+ continue
+
+ if promote_to_inferred_datetime_type:
+ if b.is_string:
+ date_text = b.name
+ if is_iso_date(date_text):
+ b_type = exp.DataType.Type.DATE
+ elif is_iso_datetime(date_text):
+ b_type = exp.DataType.Type.DATETIME
+ else:
+ b_type = a_type.this
+ else:
+ # If b is not a datetime string, we conservatively promote it to a DATETIME,
+ # in order to ensure there are no surprising truncations due to downcasting
+ b_type = exp.DataType.Type.DATETIME
+
+ target_type = (
+ b_type if b_type in TypeAnnotator.COERCES_TO.get(a_type.this, {}) else a_type
+ )
+ else:
+ target_type = a_type
+
+ if target_type != a_type:
+ _replace_cast(a, target_type)
+
+ _replace_cast(b, target_type)
def _coerce_timeunit_arg(arg: exp.Expression, unit: t.Optional[exp.Expression]) -> exp.Expression:
@@ -168,7 +203,7 @@ def _coerce_datediff_args(node: exp.DateDiff) -> None:
e.replace(exp.cast(e.copy(), to=exp.DataType.Type.DATETIME))
-def _replace_cast(node: exp.Expression, to: exp.DataType.Type) -> None:
+def _replace_cast(node: exp.Expression, to: exp.DATA_TYPE) -> None:
node.replace(exp.cast(node.copy(), to=to))
| diff --git a/tests/fixtures/optimizer/canonicalize.sql b/tests/fixtures/optimizer/canonicalize.sql
index 66c6c95c93..3610f1628d 100644
--- a/tests/fixtures/optimizer/canonicalize.sql
+++ b/tests/fixtures/optimizer/canonicalize.sql
@@ -2,7 +2,7 @@ SELECT w.d + w.e AS c FROM w AS w;
SELECT CONCAT("w"."d", "w"."e") AS "c" FROM "w" AS "w";
SELECT CAST(w.d AS DATE) > w.e AS a FROM w AS w;
-SELECT CAST("w"."d" AS DATE) > CAST("w"."e" AS DATETIME) AS "a" FROM "w" AS "w";
+SELECT CAST("w"."d" AS DATE) > CAST("w"."e" AS DATE) AS "a" FROM "w" AS "w";
SELECT CAST(1 AS VARCHAR) AS a FROM w AS w;
SELECT CAST(1 AS VARCHAR) AS "a" FROM "w" AS "w";
@@ -102,7 +102,7 @@ DATEDIFF('2023-01-01', '2023-01-02', DAY);
DATEDIFF(CAST('2023-01-01' AS DATETIME), CAST('2023-01-02' AS DATETIME), DAY);
SELECT "t"."d" > '2023-01-01' AS "d" FROM "temporal" AS "t";
-SELECT "t"."d" > CAST('2023-01-01' AS DATETIME) AS "d" FROM "temporal" AS "t";
+SELECT "t"."d" > CAST('2023-01-01' AS DATE) AS "d" FROM "temporal" AS "t";
SELECT "t"."d" > CAST('2023-01-01' AS DATETIME) AS "d" FROM "temporal" AS "t";
SELECT "t"."d" > CAST('2023-01-01' AS DATETIME) AS "d" FROM "temporal" AS "t";
@@ -110,6 +110,17 @@ SELECT "t"."d" > CAST('2023-01-01' AS DATETIME) AS "d" FROM "temporal" AS "t";
SELECT "t"."t" > '2023-01-01 00:00:01' AS "t" FROM "temporal" AS "t";
SELECT "t"."t" > CAST('2023-01-01 00:00:01' AS DATETIME) AS "t" FROM "temporal" AS "t";
+WITH "t" AS (SELECT CAST("ext"."created_at" AS TIMESTAMP) AS "created_at" FROM "ext" AS "ext") SELECT "t"."created_at" > '2024-10-01 12:05:02' AS "col" FROM "t" AS "t";
+WITH "t" AS (SELECT CAST("ext"."created_at" AS TIMESTAMP) AS "created_at" FROM "ext" AS "ext") SELECT "t"."created_at" > CAST('2024-10-01 12:05:02' AS TIMESTAMP) AS "col" FROM "t" AS "t";
+
+# dialect: mysql
+SELECT `t`.`d` < '2023-01-01 00:00:01' AS `col` FROM `temporal` AS `t`;
+SELECT CAST(`t`.`d` AS DATETIME) < CAST('2023-01-01 00:00:01' AS DATETIME) AS `col` FROM `temporal` AS `t`;
+
+# dialect: mysql
+SELECT CAST(`t`.`some_col` AS DATE) < CAST(`t`.`other_col` AS CHAR) AS `col` FROM `other_table` AS `t`;
+SELECT CAST(CAST(`t`.`some_col` AS DATE) AS DATETIME) < CAST(CAST(`t`.`other_col` AS CHAR) AS DATETIME) AS `col` FROM `other_table` AS `t`;
+
--------------------------------------
-- Remove redundant casts
--------------------------------------
diff --git a/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql b/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql
index 290d2760b8..59bc432fdd 100644
--- a/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql
+++ b/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql
@@ -736,8 +736,8 @@ WITH "salesreturns" AS (
"date_dim"."d_date" AS "d_date"
FROM "date_dim" AS "date_dim"
WHERE
- CAST("date_dim"."d_date" AS DATETIME) <= CAST('2002-09-05' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2002-08-22' AS DATE)
+ CAST("date_dim"."d_date" AS DATE) <= CAST('2002-09-05' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2002-08-22' AS DATE)
), "ssr" AS (
SELECT
"store"."s_store_id" AS "s_store_id",
@@ -1853,8 +1853,8 @@ SELECT
FROM "web_sales" AS "web_sales"
JOIN "date_dim" AS "date_dim"
ON "date_dim"."d_date_sk" = "web_sales"."ws_sold_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2000-06-10' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2000-05-11' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2000-06-10' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-05-11' AS DATE)
JOIN "item" AS "item"
ON "item"."i_category" IN ('Home', 'Men', 'Women')
AND "item"."i_item_sk" = "web_sales"."ws_item_sk"
@@ -2422,7 +2422,7 @@ JOIN "date_dim" AS "date_dim"
AND "date_dim"."d_date" >= '2002-3-01'
AND (
CAST('2002-3-01' AS DATE) + INTERVAL '60' DAY
- ) >= CAST("date_dim"."d_date" AS DATETIME)
+ ) >= CAST("date_dim"."d_date" AS DATE)
WHERE
"_u_3"."_u_4" IS NULL
AND ARRAY_ANY("_u_0"."_u_2", "_x" -> "cs1"."cs_warehouse_sk" <> "_x")
@@ -2731,8 +2731,8 @@ SELECT
FROM "catalog_sales" AS "catalog_sales"
JOIN "date_dim" AS "date_dim"
ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2001-03-05' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2001-02-03' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2001-03-05' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2001-02-03' AS DATE)
JOIN "item" AS "item"
ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk"
AND "item"."i_category" IN ('Children', 'Women', 'Electronics')
@@ -2811,8 +2811,8 @@ WITH "x" AS (
FROM "inventory" AS "inventory"
JOIN "date_dim" AS "date_dim"
ON "date_dim"."d_date_sk" = "inventory"."inv_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2000-06-12' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2000-04-13' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2000-06-12' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-04-13' AS DATE)
JOIN "item" AS "item"
ON "inventory"."inv_item_sk" = "item"."i_item_sk"
AND "item"."i_current_price" <= 1.49
@@ -3944,7 +3944,7 @@ WITH "catalog_sales_2" AS (
FROM "date_dim" AS "date_dim"
WHERE
"date_dim"."d_date" >= '2001-03-04'
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2001-06-02' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2001-06-02' AS DATE)
), "_u_0" AS (
SELECT
1.3 * AVG("catalog_sales"."cs_ext_discount_amt") AS "_col_0",
@@ -4510,8 +4510,8 @@ JOIN "inventory" AS "inventory"
AND "inventory"."inv_quantity_on_hand" >= 100
JOIN "date_dim" AS "date_dim"
ON "date_dim"."d_date_sk" = "inventory"."inv_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('1999-05-05' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('1999-03-06' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('1999-05-05' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('1999-03-06' AS DATE)
WHERE
"item"."i_current_price" <= 50
AND "item"."i_current_price" >= 20
@@ -4787,8 +4787,8 @@ LEFT JOIN "catalog_returns" AS "catalog_returns"
AND "catalog_returns"."cr_order_number" = "catalog_sales"."cs_order_number"
JOIN "date_dim" AS "date_dim"
ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2002-07-01' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2002-05-02' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2002-07-01' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2002-05-02' AS DATE)
JOIN "item" AS "item"
ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk"
AND "item"."i_current_price" <= 1.49
@@ -10318,8 +10318,8 @@ WITH "date_dim_2" AS (
"date_dim"."d_date" AS "d_date"
FROM "date_dim" AS "date_dim"
WHERE
- CAST("date_dim"."d_date" AS DATETIME) <= CAST('2001-09-15' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2001-08-16' AS DATE)
+ CAST("date_dim"."d_date" AS DATE) <= CAST('2001-09-15' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2001-08-16' AS DATE)
), "store_2" AS (
SELECT
"store"."s_store_sk" AS "s_store_sk"
@@ -10828,8 +10828,8 @@ WITH "date_dim_2" AS (
"date_dim"."d_date" AS "d_date"
FROM "date_dim" AS "date_dim"
WHERE
- CAST("date_dim"."d_date" AS DATETIME) <= CAST('2000-09-25' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2000-08-26' AS DATE)
+ CAST("date_dim"."d_date" AS DATE) <= CAST('2000-09-25' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-08-26' AS DATE)
), "item_2" AS (
SELECT
"item"."i_item_sk" AS "i_item_sk",
@@ -11109,8 +11109,8 @@ JOIN "store_sales" AS "store_sales"
ON "item"."i_item_sk" = "store_sales"."ss_item_sk"
JOIN "date_dim" AS "date_dim"
ON "date_dim"."d_date_sk" = "inventory"."inv_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('1998-06-26' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('1998-04-27' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('1998-06-26' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('1998-04-27' AS DATE)
WHERE
"item"."i_current_price" <= 93
AND "item"."i_current_price" >= 63
@@ -12180,7 +12180,7 @@ WITH "web_sales_2" AS (
FROM "date_dim" AS "date_dim"
WHERE
"date_dim"."d_date" >= '2002-03-29'
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2002-06-27' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2002-06-27' AS DATE)
), "_u_0" AS (
SELECT
1.3 * AVG("web_sales"."ws_ext_discount_amt") AS "_col_0",
@@ -12321,7 +12321,7 @@ JOIN "date_dim" AS "date_dim"
AND "date_dim"."d_date_sk" = "ws1"."ws_ship_date_sk"
AND (
CAST('2000-3-01' AS DATE) + INTERVAL '60' DAY
- ) >= CAST("date_dim"."d_date" AS DATETIME)
+ ) >= CAST("date_dim"."d_date" AS DATE)
JOIN "web_site" AS "web_site"
ON "web_site"."web_company_name" = 'pri'
AND "web_site"."web_site_sk" = "ws1"."ws_web_site_sk"
@@ -12411,7 +12411,7 @@ JOIN "date_dim" AS "date_dim"
AND "date_dim"."d_date_sk" = "ws1"."ws_ship_date_sk"
AND (
CAST('2000-4-01' AS DATE) + INTERVAL '60' DAY
- ) >= CAST("date_dim"."d_date" AS DATETIME)
+ ) >= CAST("date_dim"."d_date" AS DATE)
JOIN "web_site" AS "web_site"
ON "web_site"."web_company_name" = 'pri'
AND "web_site"."web_site_sk" = "ws1"."ws_web_site_sk"
@@ -12595,8 +12595,8 @@ SELECT
FROM "store_sales" AS "store_sales"
JOIN "date_dim" AS "date_dim"
ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk"
- AND CAST("date_dim"."d_date" AS DATETIME) <= CAST('2000-06-17' AS DATE)
- AND CAST("date_dim"."d_date" AS DATETIME) >= CAST('2000-05-18' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) <= CAST('2000-06-17' AS DATE)
+ AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-05-18' AS DATE)
JOIN "item" AS "item"
ON "item"."i_category" IN ('Men', 'Home', 'Electronics')
AND "item"."i_item_sk" = "store_sales"."ss_item_sk"
diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py
index 0fa4ff6ccc..4a41e4a774 100644
--- a/tests/test_optimizer.py
+++ b/tests/test_optimizer.py
@@ -132,7 +132,6 @@ def check_file(
func,
pretty=False,
execute=False,
- set_dialect=False,
only=None,
**kwargs,
):
@@ -158,7 +157,7 @@ def check_file(
validate_qualify_columns
)
- if set_dialect and dialect:
+ if dialect:
func_kwargs["dialect"] = dialect
future = pool.submit(parse_and_optimize, func, sql, dialect, **func_kwargs)
@@ -207,7 +206,6 @@ def test_optimize(self, logger):
pretty=True,
execute=True,
schema=schema,
- set_dialect=True,
)
def test_isolate_table_selects(self):
@@ -235,7 +233,6 @@ def test_qualify_tables(self):
optimizer.qualify_tables.qualify_tables,
db="db",
catalog="c",
- set_dialect=True,
)
def test_normalize(self):
@@ -446,11 +443,8 @@ def test_qualify_columns(self, logger):
qualify_columns,
execute=True,
schema=self.schema,
- set_dialect=True,
- )
- self.check_file(
- "qualify_columns_ddl", qualify_columns, schema=self.schema, set_dialect=True
)
+ self.check_file("qualify_columns_ddl", qualify_columns, schema=self.schema)
def test_qualify_columns__with_invisible(self):
schema = MappingSchema(self.schema, {"x": {"a"}, "y": {"b"}, "z": {"b"}})
@@ -475,7 +469,6 @@ def test_normalize_identifiers(self):
self.check_file(
"normalize_identifiers",
optimizer.normalize_identifiers.normalize_identifiers,
- set_dialect=True,
)
self.assertEqual(optimizer.normalize_identifiers.normalize_identifiers("a%").sql(), '"a%"')
@@ -484,14 +477,13 @@ def test_quote_identifiers(self):
self.check_file(
"quote_identifiers",
optimizer.qualify_columns.quote_identifiers,
- set_dialect=True,
)
def test_pushdown_projection(self):
self.check_file("pushdown_projections", pushdown_projections, schema=self.schema)
def test_simplify(self):
- self.check_file("simplify", simplify, set_dialect=True)
+ self.check_file("simplify", simplify)
expression = parse_one("SELECT a, c, b FROM table1 WHERE 1 = 1")
self.assertEqual(simplify(simplify(expression.find(exp.Where))).sql(), "WHERE TRUE")
| Bigquery annotate_types incorrectly sets TIMESTAMP as DATETIME even when schema type is provided
```python
import sqlglot
import sqlglot.errors
from sqlglot.optimizer import optimize
dialect = "bigquery"
schema = {"test": {"products": {"created_at": "TIMESTAMP"}}}
expression = sqlglot.parse_one("SELECT * FROM test.products WHERE created_at > '2024-10-01'", read=dialect)
optimized_expression = optimize(
expression,
dialect=dialect,
schema=schema,
)
formatted_sql = optimized_expression.sql(dialect=dialect, pretty=True)
print("@formatted_sql", formatted_sql)
```
formatted_sql is
```SQL
SELECT
`products`.`created_at` AS `created_at`
FROM `test`.`products` AS `products`
WHERE
`products`.`created_at` > CAST('2024-10-01' AS DATETIME)
```
which cannot be ran due to
No matching signature for operator < for argument types: TIMESTAMP, DATETIME.
| ```
diff --git a/sqlglot/optimizer/canonicalize.py b/sqlglot/optimizer/canonicalize.py
index fd002622..4382dc70 100644
--- a/sqlglot/optimizer/canonicalize.py
+++ b/sqlglot/optimizer/canonicalize.py
@@ -138,7 +138,7 @@ def _coerce_date(a: exp.Expression, b: exp.Expression) -> None:
and b.type
and b.type.this in exp.DataType.TEXT_TYPES
):
- _replace_cast(b, exp.DataType.Type.DATETIME)
+ _replace_cast(b, a.type)
```
cc @barakalon | 1,732,096,964,000 | [] | Bug Report | [
"sqlglot/optimizer/canonicalize.py:canonicalize",
"sqlglot/optimizer/canonicalize.py:coerce_type",
"sqlglot/optimizer/canonicalize.py:_coerce_date",
"sqlglot/optimizer/canonicalize.py:_replace_cast"
] | [] | 4 |
tobymao/sqlglot | tobymao__sqlglot-4415 | 122ef5f41c4e29347026a81e6f6460ccf8e910ed | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 5fa7d1ef13..6e7b21a6cc 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -5109,9 +5109,8 @@ def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.
else:
field = self._parse_field(any_token=True, anonymous_func=True)
- if isinstance(field, exp.Func) and this:
- # bigquery allows function calls like x.y.count(...)
- # SAFE.SUBSTR(...)
+ if isinstance(field, (exp.Func, exp.Window)) and this:
+ # BQ & snowflake allow function calls like x.y.count(...), SAFE.SUBSTR(...) etc
# https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
this = exp.replace_tree(
this,
@@ -5135,6 +5134,11 @@ def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.
db=this.args.get("table"),
catalog=this.args.get("db"),
)
+ elif isinstance(field, exp.Window):
+ # Move the exp.Dot's to the window's function
+ window_func = self.expression(exp.Dot, this=this, expression=field.this)
+ field.set("this", window_func)
+ this = field
else:
this = self.expression(exp.Dot, this=this, expression=field)
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 515a07c4f4..157947df46 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -2250,3 +2250,13 @@ def test_grant(self):
self.validate_identity(
"GRANT ALL PRIVILEGES ON FUNCTION mydb.myschema.ADD5(number) TO ROLE analyst"
)
+
+ def test_window_function_arg(self):
+ query = "SELECT * FROM TABLE(db.schema.FUNC(a) OVER ())"
+
+ ast = self.parse_one(query)
+ window = ast.find(exp.Window)
+
+ self.assertEqual(ast.sql("snowflake"), query)
+ self.assertEqual(len(list(ast.find_all(exp.Column))), 1)
+ self.assertEqual(window.this.sql("snowflake"), "db.schema.FUNC(a)")
| User defined function recognized as column (Snowflake dialect)
This query:
`select COL1,COL2 from some_table,TABLE(SOME_DB.SOME_SCHEMA.TABLE_FUNC(value1, value2) over (PARTITION BY value1))`
`p = sqlglot.parse_one(query, dialect=sqlglot.Dialects.SNOWFLAKE)`
creates this ast:
`Select(
expressions=[
Column(
this=Identifier(this=COL1, quoted=False)),
Column(
this=Identifier(this=COL2, quoted=False))],
from=From(
this=Table(
this=Identifier(this=some_table, quoted=False))),
joins=[
Join(
this=Table(
this=Anonymous(
this=TABLE,
expressions=[
Column(
this=Window(
this=Anonymous(
this=TABLE_FUNC,
expressions=[
Column(
this=Identifier(this=value1, quoted=False)),
Column(
this=Identifier(this=value2, quoted=False))]),
partition_by=[
Column(
this=Identifier(this=value1, quoted=False))],
over=OVER),
table=Identifier(this=SOME_SCHEMA, quoted=False),
db=Identifier(this=SOME_DB, quoted=False))])))])`
Inside the anonymous TABLE (which is also a function that returns values as a table you can select on) we can see the expressions contain only one expression which should be another function but is recognized as a Column.
Verified this still happens on 25.31.4.
dialect: Snowflake
| 1,731,927,378,000 | [] | Bug Report | [
"sqlglot/parser.py:Parser._parse_column_ops"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4390 | e7b67e0c280179188ce1bca650735978b758dca1 | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index a2f118fb5c..5fa7d1ef13 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1738,9 +1738,13 @@ def _parse_drop(self, exists: bool = False) -> exp.Drop | exp.Command:
concurrently = self._match_text_seq("CONCURRENTLY")
if_exists = exists or self._parse_exists()
- table = self._parse_table_parts(
- schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
- )
+
+ if kind == "COLUMN":
+ this = self._parse_column()
+ else:
+ this = self._parse_table_parts(
+ schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
+ )
cluster = self._parse_on_property() if self._match(TokenType.ON) else None
@@ -1752,7 +1756,7 @@ def _parse_drop(self, exists: bool = False) -> exp.Drop | exp.Command:
return self.expression(
exp.Drop,
exists=if_exists,
- this=table,
+ this=this,
expressions=expressions,
kind=self.dialect.CREATABLE_KIND_MAPPING.get(kind) or kind,
temporary=temporary,
| diff --git a/tests/test_parser.py b/tests/test_parser.py
index ba1240c4f7..b60d719341 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -879,3 +879,8 @@ def test_odbc_date_literals(self):
expr = parse_one(sql)
self.assertIsInstance(expr, exp.Insert)
self.assertIsInstance(expr.expression.expressions[0].expressions[0], cls)
+
+ def test_drop_column(self):
+ ast = parse_one("ALTER TABLE tbl DROP COLUMN col")
+ self.assertEqual(len(list(ast.find_all(exp.Table))), 1)
+ self.assertEqual(len(list(ast.find_all(exp.Column))), 1)
| Simple query, wrong tables list
Very simple query, wrong tables list! it considers `activity_id` as a table!
```python
sql='ALTER TABLE ride DROP COLUMN activity_id'
list(sqlglot.parse_one(sql, read='mysql').find_all(sqlglot.exp.Table))
# list(sqlglot.parse_one(sql, read='mysql').find_all(sqlglot.exp.Table))
# 0 = {Table} Table(\n this=Identifier(this=ride, quoted=False))
# 1 = {Table} Table(\n this=Identifier(this=activity_id, quoted=False))
```
| 1,731,576,547,000 | [] | Bug Report | [
"sqlglot/parser.py:Parser._parse_drop"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-4377 | 358cafa56ce4b896e2473e8ba96c08ba36cdec80 | diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 613c3b39fc..f578d79ed1 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -301,7 +301,7 @@ def copy(self):
"""
return deepcopy(self)
- def add_comments(self, comments: t.Optional[t.List[str]] = None) -> None:
+ def add_comments(self, comments: t.Optional[t.List[str]] = None, prepend: bool = False) -> None:
if self.comments is None:
self.comments = []
@@ -313,7 +313,12 @@ def add_comments(self, comments: t.Optional[t.List[str]] = None) -> None:
k, *v = kv.split("=")
value = v[0].strip() if v else True
self.meta[k.strip()] = value
- self.comments.append(comment)
+
+ if not prepend:
+ self.comments.append(comment)
+
+ if prepend:
+ self.comments = comments + self.comments
def pop_comments(self) -> t.List[str]:
comments = self.comments or []
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index a55712c01e..498c4f549e 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -593,6 +593,7 @@ class Generator(metaclass=_Generator):
WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Command,
exp.Create,
+ exp.Describe,
exp.Delete,
exp.Drop,
exp.From,
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index df68d4a152..a2f118fb5c 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -806,7 +806,7 @@ class Parser(metaclass=_Parser):
kind=self._parse_var_from_options(self.USABLES, raise_unmatched=False),
this=self._parse_table(schema=False),
),
- TokenType.SEMICOLON: lambda self: self.expression(exp.Semicolon),
+ TokenType.SEMICOLON: lambda self: exp.Semicolon(),
}
UNARY_PARSERS = {
@@ -1715,7 +1715,10 @@ def _parse_statement(self) -> t.Optional[exp.Expression]:
return None
if self._match_set(self.STATEMENT_PARSERS):
- return self.STATEMENT_PARSERS[self._prev.token_type](self)
+ comments = self._prev_comments
+ stmt = self.STATEMENT_PARSERS[self._prev.token_type](self)
+ stmt.add_comments(comments, prepend=True)
+ return stmt
if self._match_set(self.dialect.tokenizer.COMMANDS):
return self._parse_command()
@@ -1748,7 +1751,6 @@ def _parse_drop(self, exists: bool = False) -> exp.Drop | exp.Command:
return self.expression(
exp.Drop,
- comments=start.comments,
exists=if_exists,
this=table,
expressions=expressions,
@@ -1772,7 +1774,6 @@ def _parse_exists(self, not_: bool = False) -> t.Optional[bool]:
def _parse_create(self) -> exp.Create | exp.Command:
# Note: this can't be None because we've matched a statement parser
start = self._prev
- comments = self._prev_comments
replace = (
start.token_type == TokenType.REPLACE
@@ -1919,7 +1920,6 @@ def extend_props(temp_props: t.Optional[exp.Properties]) -> None:
create_kind_text = create_token.text.upper()
return self.expression(
exp.Create,
- comments=comments,
this=this,
kind=self.dialect.CREATABLE_KIND_MAPPING.get(create_kind_text) or create_kind_text,
replace=replace,
@@ -2659,7 +2659,7 @@ def parse_conditional_insert() -> t.Optional[exp.ConditionalInsert]:
)
def _parse_insert(self) -> t.Union[exp.Insert, exp.MultitableInserts]:
- comments = ensure_list(self._prev_comments)
+ comments = []
hint = self._parse_hint()
overwrite = self._match(TokenType.OVERWRITE)
ignore = self._match(TokenType.IGNORE)
@@ -2845,7 +2845,6 @@ def _parse_delete(self) -> exp.Delete:
# This handles MySQL's "Multiple-Table Syntax"
# https://dev.mysql.com/doc/refman/8.0/en/delete.html
tables = None
- comments = self._prev_comments
if not self._match(TokenType.FROM, advance=False):
tables = self._parse_csv(self._parse_table) or None
@@ -2853,7 +2852,6 @@ def _parse_delete(self) -> exp.Delete:
return self.expression(
exp.Delete,
- comments=comments,
tables=tables,
this=self._match(TokenType.FROM) and self._parse_table(joins=True),
using=self._match(TokenType.USING) and self._parse_table(joins=True),
@@ -2864,13 +2862,11 @@ def _parse_delete(self) -> exp.Delete:
)
def _parse_update(self) -> exp.Update:
- comments = self._prev_comments
this = self._parse_table(joins=True, alias_tokens=self.UPDATE_ALIAS_TOKENS)
expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality)
returning = self._parse_returning()
return self.expression(
exp.Update,
- comments=comments,
**{ # type: ignore
"this": this,
"expressions": expressions,
| diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 52b04eae84..d7aef4f8d9 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -81,6 +81,10 @@ def test_ddl(self):
self.validate_identity(
"CREATE OR REPLACE VIEW my_view AS SELECT column1 AS `boo`, column2 AS `foo` FROM my_table WHERE column3 = 'some_value' UNION SELECT q.* FROM fruits_table, JSON_TABLE(Fruits, '$[*]' COLUMNS(id VARCHAR(255) PATH '$.$id', value VARCHAR(255) PATH '$.value')) AS q",
)
+ self.validate_identity(
+ "/*left*/ EXPLAIN SELECT /*hint*/ col FROM t1 /*right*/",
+ "/* left */ DESCRIBE /* hint */ SELECT col FROM t1 /* right */",
+ )
self.validate_identity(
"CREATE TABLE t (name VARCHAR)",
"CREATE TABLE t (name TEXT)",
diff --git a/tests/fixtures/identity.sql b/tests/fixtures/identity.sql
index 33199de377..0e13a64fd6 100644
--- a/tests/fixtures/identity.sql
+++ b/tests/fixtures/identity.sql
@@ -882,4 +882,4 @@ GRANT SELECT ON TABLE tbl TO user
GRANT SELECT, INSERT ON FUNCTION tbl TO user
GRANT SELECT ON orders TO ROLE PUBLIC
GRANT SELECT ON nation TO alice WITH GRANT OPTION
-GRANT DELETE ON SCHEMA finance TO bob
\ No newline at end of file
+GRANT DELETE ON SCHEMA finance TO bob
| Issue with Comment Handling in `Describe` Output
Hi,
I encountered an issue with the [`Describe`](https://sqlglot.com/sqlglot/expressions.html#Describe) functionality in `sqlglot`. The comments in the SQL query are not being preserved as expected.
Here’s a minimal example to illustrate the problem:
```python
import os
# os.environ['SQLGLOTRS_TOKENIZER'] = '0'
import importlib.metadata
import sqlglot
package_name = 'sqlglot'
version = importlib.metadata.version(package_name)
print(f"{package_name} version: {version}\n")
sql = '/*left*/ EXPLAIN SELECT /*hint*/ col FROM t1 /*right*/'
ast = sqlglot.parse(sql, read='mysql')
print(ast)
print(list(map(lambda x: x.sql(dialect='mysql'), ast)))
```
### Output:
```
sqlglot version: 25.30.0
[Describe(
this=Select(
expressions=[
Column(
this=Identifier(this=col, quoted=False))],
from=From(
this=Table(
this=Identifier(this=t1, quoted=False),
_comments=[
right])),
_comments=[
hint]))]
['DESCRIBE /* hint */ SELECT col FROM t1 /* right */']
```
### Expected Output:
```sql
/*left*/ DESCRIBE /* hint */ SELECT col FROM t1 /* right */
```
### Actual Output:
```sql
DESCRIBE /* hint */ SELECT col FROM t1 /* right */
```
| 1,731,400,910,000 | [] | Bug Report | [
"sqlglot/expressions.py:Expression.add_comments",
"sqlglot/parser.py:Parser._parse_statement",
"sqlglot/parser.py:Parser._parse_drop",
"sqlglot/parser.py:Parser._parse_create",
"sqlglot/parser.py:Parser._parse_insert",
"sqlglot/parser.py:Parser._parse_delete",
"sqlglot/parser.py:Parser._parse_update"
] | [] | 7 |
|
tobymao/sqlglot | tobymao__sqlglot-4366 | 79c675a49fb44a6a7a97ea0de79822d8571724be | diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index bf1abe2f1d..a183a883f5 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -156,18 +156,24 @@ def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str:
# BigQuery allows inline construction such as "STRUCT<a STRING, b INTEGER>('str', 1)" which is
# canonicalized to "ROW('str', 1) AS STRUCT(a TEXT, b INT)" in DuckDB
- # The transformation to ROW will take place if a cast to STRUCT / ARRAY of STRUCTs is found
+ # The transformation to ROW will take place if:
+ # 1. The STRUCT itself does not have proper fields (key := value) as a "proper" STRUCT would
+ # 2. A cast to STRUCT / ARRAY of STRUCTs is found
ancestor_cast = expression.find_ancestor(exp.Cast)
- is_struct_cast = ancestor_cast and any(
- casted_type.is_type(exp.DataType.Type.STRUCT)
- for casted_type in ancestor_cast.find_all(exp.DataType)
+ is_bq_inline_struct = (
+ (expression.find(exp.PropertyEQ) is None)
+ and ancestor_cast
+ and any(
+ casted_type.is_type(exp.DataType.Type.STRUCT)
+ for casted_type in ancestor_cast.find_all(exp.DataType)
+ )
)
for i, expr in enumerate(expression.expressions):
is_property_eq = isinstance(expr, exp.PropertyEQ)
value = expr.expression if is_property_eq else expr
- if is_struct_cast:
+ if is_bq_inline_struct:
args.append(self.sql(value))
else:
key = expr.name if is_property_eq else f"_{i}"
@@ -175,7 +181,7 @@ def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str:
csv_args = ", ".join(args)
- return f"ROW({csv_args})" if is_struct_cast else f"{{{csv_args}}}"
+ return f"ROW({csv_args})" if is_bq_inline_struct else f"{{{csv_args}}}"
def _datatype_sql(self: DuckDB.Generator, expression: exp.DataType) -> str:
| diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 5b3b2a4ff4..3d4fe9cc4a 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -1154,6 +1154,7 @@ def test_cast(self):
self.validate_identity("CAST(x AS BINARY)", "CAST(x AS BLOB)")
self.validate_identity("CAST(x AS VARBINARY)", "CAST(x AS BLOB)")
self.validate_identity("CAST(x AS LOGICAL)", "CAST(x AS BOOLEAN)")
+ self.validate_identity("""CAST({'i': 1, 's': 'foo'} AS STRUCT("s" TEXT, "i" INT))""")
self.validate_identity(
"CAST(ROW(1, ROW(1)) AS STRUCT(number BIGINT, row STRUCT(number BIGINT)))"
)
@@ -1163,11 +1164,11 @@ def test_cast(self):
)
self.validate_identity(
"CAST([[STRUCT_PACK(a := 1)]] AS STRUCT(a BIGINT)[][])",
- "CAST([[ROW(1)]] AS STRUCT(a BIGINT)[][])",
+ "CAST([[{'a': 1}]] AS STRUCT(a BIGINT)[][])",
)
self.validate_identity(
"CAST([STRUCT_PACK(a := 1)] AS STRUCT(a BIGINT)[])",
- "CAST([ROW(1)] AS STRUCT(a BIGINT)[])",
+ "CAST([{'a': 1}] AS STRUCT(a BIGINT)[])",
)
self.validate_identity(
"STRUCT_PACK(a := 'b')::json",
@@ -1175,7 +1176,7 @@ def test_cast(self):
)
self.validate_identity(
"STRUCT_PACK(a := 'b')::STRUCT(a TEXT)",
- "CAST(ROW('b') AS STRUCT(a TEXT))",
+ "CAST({'a': 'b'} AS STRUCT(a TEXT))",
)
self.validate_all(
| BUG(duckdb): Compiling STRUCTs throws away field name info, which is sometimes needed
**Fully reproducible code snippet**
```python
import sqlglot
print(sqlglot.__version__)
# 25.28.1.dev12
inp = """SELECT CAST({'i':1, 's':'foo'} AS STRUCT("s" TEXT, "i" INT));"""
e = sqlglot.parse_one(inp, "duckdb")
actual = e.sql("duckdb")
print(actual)
# SELECT CAST(ROW(1, 'foo') AS STRUCT("s" TEXT, "i" INT));
```
If you run `actual` on https://shell.duckdb.org/, you get "Conversion Error: Could not convert string 'foo' to INT32".
This is because the names of the fields is important, and the simple `(ROW(1, 'foo')` that is currently given as output isn't adequate for this usage.
I would expect that `actual == inp`. I think this should work even if the struct values are columns instead of literals, eg `{'i':my_int_col, 's':my_str_col}` should roundtrip exactly.
I originally found this in [this ibis issue](https://github.com/ibis-project/ibis/issues/10417), but I think this should be fixed at the sqlglot level. Curious if you will agree.
| 1,731,317,324,000 | [] | Bug Report | [
"sqlglot/dialects/duckdb.py:_struct_sql"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-3901 | e0cd7e20298f84dc245676ecded6f174cf1c9c3e | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index c6b5198977..11a58346aa 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3267,8 +3267,10 @@ def _parse_join(
kwargs["on"] = self._parse_assignment()
elif self._match(TokenType.USING):
kwargs["using"] = self._parse_using_identifiers()
- elif not isinstance(kwargs["this"], exp.Unnest) and not (
- kind and kind.token_type == TokenType.CROSS
+ elif (
+ not (outer_apply or cross_apply)
+ and not isinstance(kwargs["this"], exp.Unnest)
+ and not (kind and kind.token_type == TokenType.CROSS)
):
index = self._index
joins: t.Optional[list] = list(self._parse_joins())
| diff --git a/tests/test_parser.py b/tests/test_parser.py
index f06f1e11b5..ff82e08cc5 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -699,77 +699,19 @@ def test_pivot_columns(self):
def test_parse_nested(self):
now = time.time()
- query = parse_one(
- """
- SELECT *
- FROM a
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- LEFT JOIN b ON a.id = b.id
- """
- )
-
+ query = parse_one("SELECT * FROM a " + ("LEFT JOIN b ON a.id = b.id " * 38))
self.assertIsNotNone(query)
+ self.assertLessEqual(time.time() - now, 0.1)
- query = parse_one(
- """
- SELECT *
- FROM a
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- LEFT JOIN UNNEST(ARRAY[])
- """
- )
+ now = time.time()
+ query = parse_one("SELECT * FROM a " + ("LEFT JOIN UNNEST(ARRAY[]) " * 15))
+ self.assertIsNotNone(query)
+ self.assertLessEqual(time.time() - now, 0.1)
+ now = time.time()
+ query = parse_one("SELECT * FROM a " + ("OUTER APPLY (SELECT * FROM b) " * 30))
self.assertIsNotNone(query)
- self.assertLessEqual(time.time() - now, 0.2)
+ self.assertLessEqual(time.time() - now, 0.1)
def test_parse_properties(self):
self.assertEqual(
| Parsing APPLY doubles execution time per APPLY statement
SQLGlot versions: tested with 25.9.0 and 25.8.1
Dialect: "tsql"
Description: When parsing SQL using an APPLY statement, each successive APPLY seems to double parsing time.
I was parsing a (really badly written) SQL query and noticed my code sort of just stopped. I realized that the query had an excessive number of APPLY statements (a really ridiculous number). I broke it down and wrote a dummy query where I slowly started adding apply statements one by one and noticed the query times doubled every time I added an APPLY statement.
Some quick numbers slowly adding 0..15 APPLY statements to the query:
- Testing with 0 APPLY statements
- test_apply executed in 0.001636 seconds
- Testing with 1 APPLY statements
- test_apply executed in 0.002330 seconds
- Testing with 2 APPLY statements
- test_apply executed in 0.004563 seconds
- Testing with 3 APPLY statements
- test_apply executed in 0.009903 seconds
- Testing with 4 APPLY statements
- test_apply executed in 0.019378 seconds
- Testing with 5 APPLY statements
- test_apply executed in 0.035283 seconds
- Testing with 6 APPLY statements
- test_apply executed in 0.065637 seconds
- Testing with 7 APPLY statements
- test_apply executed in 0.128544 seconds
- Testing with 8 APPLY statements
- test_apply executed in 0.258765 seconds
- Testing with 9 APPLY statements
- test_apply executed in 0.499099 seconds
- Testing with 10 APPLY statements
- test_apply executed in 1.008456 seconds
- Testing with 11 APPLY statements
- test_apply executed in 2.009916 seconds
- Testing with 12 APPLY statements
- test_apply executed in 3.927725 seconds
- Testing with 13 APPLY statements
- test_apply executed in 7.998525 seconds
- Testing with 14 APPLY statements
- test_apply executed in 16.154987 seconds
- Testing with 15 APPLY statements
- test_apply executed in 31.792223 seconds
Each time I add one more APPLY, it takes 2x longer than the prior run.
I'm attaching my sample "apply generator" that just spins out N APPLY blocks and does some very basic timing.
```
from sqlglot import parse_one
import time
num_apply = 15
def generate_apply(apply_n=0):
apply_template = """
OUTER APPLY (
SELECT TOP 1
*
FROM my_other_catalog.my_other_schema.my_other_table_{i} b
WHERE
b.id = a.id
ORDER BY
b.a_date desc
) b{i}
"""
apply = ""
for i in range(apply_n):
apply = apply + eval(f'f"""{apply_template}"""')
query_template = f"""
SELECT
*
FROM
my_catalog.my_schema.my_table a
{apply}
"""
return query_template
def timing_decorator(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"{func.__name__} executed in {end_time - start_time:.6f} seconds")
return result
return wrapper
@timing_decorator
def test_apply(sql):
x = parse_one(sql=sql, dialect="tsql")
for i in range(num_apply+1):
sql = generate_apply(apply_n=i)
print(f"Testing with {i} APPLY statements")
#print(sql)
test_apply(sql=sql)
```
| Also tested this using a LEFT JOIN LATERAL and dialect set to "databricks" - same timing
Thanks for the detailed report, I can reproduce these times. We'll take a look. | 1,723,554,402,000 | [] | Performance Issue | [
"sqlglot/parser.py:Parser._parse_join"
] | [] | 1 |
tobymao/sqlglot | tobymao__sqlglot-3436 | 30f9d30d8ab3727a43b1e6f363f28631cbfa7f92 | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 229af18aec..8f5050ee31 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1678,6 +1678,7 @@ def _parse_sequence_properties(self) -> t.Optional[exp.SequenceProperties]:
index = self._index
while self._curr:
+ self._match(TokenType.COMMA)
if self._match_text_seq("INCREMENT"):
self._match_text_seq("BY")
self._match_text_seq("=")
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index d03412cace..c9150ff3ee 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -1223,6 +1223,14 @@ def test_ddl(self):
"CREATE OR REPLACE FUNCTION my_udtf(foo BOOLEAN) RETURNS TABLE(col1 ARRAY(INT)) AS $$ WITH t AS (SELECT CAST([1, 2, 3] AS ARRAY(INT)) AS c) SELECT c FROM t $$",
"CREATE OR REPLACE FUNCTION my_udtf(foo BOOLEAN) RETURNS TABLE (col1 ARRAY(INT)) AS ' WITH t AS (SELECT CAST([1, 2, 3] AS ARRAY(INT)) AS c) SELECT c FROM t '",
)
+ self.validate_identity(
+ "CREATE SEQUENCE seq1 WITH START=1, INCREMENT=1 ORDER",
+ "CREATE SEQUENCE seq1 START=1 INCREMENT BY 1 ORDER",
+ )
+ self.validate_identity(
+ "CREATE SEQUENCE seq1 WITH START=1 INCREMENT=1 ORDER",
+ "CREATE SEQUENCE seq1 START=1 INCREMENT=1 ORDER",
+ )
self.validate_all(
"CREATE TABLE orders_clone CLONE orders",
| Allow comma-separated values for `CREATE SEQUENCE` properties (Snowflake)
Allow comma-separated values for `CREATE SEQUENCE` properties in Snowflake queries.
This query can be parsed successfully (results is an instance of `exp.Create`):
```
query = "CREATE SEQUENCE seq1 WITH START=1 INCREMENT=1 ORDER"
result = parse_one(query, dialect="snowflake")
print(type(result))
--> <class 'sqlglot.expressions.Create'>
```
... whereas the following query cannot be fully parsed and is returned as a generic `exp.Command` as fallback:
```
query = "CREATE SEQUENCE seq1 WITH START=1, INCREMENT=1 ORDER"
result = parse_one(query, dialect="snowflake")
print(type(result))
--> <class 'sqlglot.expressions.Command'>
```
(note - the only subtle difference is the comma `,` between the `START` and `INCREMENT` properties)
Note that the second query syntax may not be covered in the Snowflake docs, but it is indeed a valid syntax that can be executed against Snowflake (and some Snowflake client apps/SDKs are generating queries that are using this syntax, unfortunately).
| 1,715,204,998,000 | [] | Feature Request | [
"sqlglot/parser.py:Parser._parse_sequence_properties"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-3417 | e1b6483d5e26d556f6a3dd82c6d35f475c189c2b | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index b0a25167ba..51022bbe6b 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -5974,7 +5974,7 @@ def _parse_set_item_assignment(
if kind in ("GLOBAL", "SESSION") and self._match_text_seq("TRANSACTION"):
return self._parse_set_transaction(global_=kind == "GLOBAL")
- left = self._parse_primary() or self._parse_id_var()
+ left = self._parse_primary() or self._parse_column()
assignment_delimiter = self._match_texts(("=", "TO"))
if not left or (self.SET_REQUIRES_ASSIGNMENT_DELIMITER and not assignment_delimiter):
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index dae835574b..97b83ab930 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -10,14 +10,6 @@ class TestSnowflake(Validator):
dialect = "snowflake"
def test_snowflake(self):
- self.validate_identity(
- "MERGE INTO my_db AS ids USING (SELECT new_id FROM my_model WHERE NOT col IS NULL) AS new_ids ON ids.type = new_ids.type AND ids.source = new_ids.source WHEN NOT MATCHED THEN INSERT VALUES (new_ids.new_id)"
- )
- self.validate_identity("ALTER TABLE table1 CLUSTER BY (name DESC)")
- self.validate_identity(
- "INSERT OVERWRITE TABLE t SELECT 1", "INSERT OVERWRITE INTO t SELECT 1"
- )
- self.validate_identity("SELECT rename, replace")
expr = parse_one("SELECT APPROX_TOP_K(C4, 3, 5) FROM t")
expr.selects[0].assert_is(exp.AggFunc)
self.assertEqual(expr.sql(dialect="snowflake"), "SELECT APPROX_TOP_K(C4, 3, 5) FROM t")
@@ -43,6 +35,9 @@ def test_snowflake(self):
)""",
)
+ self.validate_identity("ALTER TABLE table1 CLUSTER BY (name DESC)")
+ self.validate_identity("SELECT rename, replace")
+ self.validate_identity("ALTER TABLE table1 SET TAG foo.bar = 'baz'")
self.validate_identity("SELECT TIMEADD(HOUR, 2, CAST('09:05:03' AS TIME))")
self.validate_identity("SELECT CAST(OBJECT_CONSTRUCT('a', 1) AS MAP(VARCHAR, INT))")
self.validate_identity("SELECT CAST(OBJECT_CONSTRUCT('a', 1) AS OBJECT(a CHAR NOT NULL))")
@@ -95,6 +90,12 @@ def test_snowflake(self):
self.validate_identity("SELECT CONVERT_TIMEZONE('UTC', 'America/Los_Angeles', col)")
self.validate_identity("ALTER TABLE a SWAP WITH b")
self.validate_identity("SELECT MATCH_CONDITION")
+ self.validate_identity(
+ "MERGE INTO my_db AS ids USING (SELECT new_id FROM my_model WHERE NOT col IS NULL) AS new_ids ON ids.type = new_ids.type AND ids.source = new_ids.source WHEN NOT MATCHED THEN INSERT VALUES (new_ids.new_id)"
+ )
+ self.validate_identity(
+ "INSERT OVERWRITE TABLE t SELECT 1", "INSERT OVERWRITE INTO t SELECT 1"
+ )
self.validate_identity(
'DESCRIBE TABLE "SNOWFLAKE_SAMPLE_DATA"."TPCDS_SF100TCL"."WEB_SITE" type=stage'
)
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 6bcdb643a5..1b9639a785 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -503,7 +503,7 @@ def test_set_expression(self):
self.assertIsInstance(set_item, exp.SetItem)
self.assertIsInstance(set_item.this, exp.EQ)
- self.assertIsInstance(set_item.this.this, exp.Identifier)
+ self.assertIsInstance(set_item.this.this, exp.Column)
self.assertIsInstance(set_item.this.expression, exp.Literal)
self.assertEqual(set_item.args.get("kind"), "SESSION")
| SET TAG identifier containing dots is parsed as Command
Would love for an identifier containing dots to be parsed as a `SetItem` ie: like an identifier without dots.
Without dots, is parsed as SetItem:
```
❯ sqlglot.parse_one("ALTER TABLE table1 SET TAG foo='baz'", read="snowflake")
AlterTable(
this=Table(
this=Identifier(this=table1, quoted=False)),
actions=[
Set(
expressions=[
SetItem(
this=EQ(
this=Identifier(this=foo, quoted=False),
expression=Literal(this=baz, is_string=True)))],
unset=False,
tag=True)])
```
vs with dots:
```
❯ sqlglot.parse_one("ALTER TABLE table1 SET TAG foo.bar='baz'", read="snowflake")
AlterTable(
this=Table(
this=Identifier(this=table1, quoted=False)),
actions=[
Command(this=TAG, expression=foo.bar='baz')])
```
In sqlglot 15 this was parsed as SetTag:
```
❯ python -c 'import sqlglot; print(repr(sqlglot.parse_one("ALTER TABLE table1 SET TAG foo.bar=''baz''", read="snowflake")));'
(ALTERTABLE this:
(TABLE this:
(IDENTIFIER this: table1, quoted: False)), actions:
(SETTAG expressions:
(EQ this:
(COLUMN this:
(IDENTIFIER this: bar, quoted: False), table:
(IDENTIFIER this: foo, quoted: False)), expression:
(LITERAL this: baz, is_string: True)), unset: False))
```
| 1,715,084,497,000 | [] | Feature Request | [
"sqlglot/parser.py:Parser._parse_set_item_assignment"
] | [] | 1 |
|
tobymao/sqlglot | tobymao__sqlglot-3167 | df4ce17f24bbb16a64172e351f4e27ac74de668a | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 6022132354..0ffe4cf0a3 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -48,10 +48,12 @@ def _builder(args: t.List) -> exp.Func:
return exp.UnixToTime(this=value, scale=seq_get(args, 1))
if not is_float(value.this):
return build_formatted_time(exp.StrToTime, "snowflake")(args)
- if kind == exp.DataType.Type.DATE and not int_value:
- formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
- formatted_exp.set("safe", safe)
- return formatted_exp
+
+ if len(args) == 2 and kind == exp.DataType.Type.DATE:
+ formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
+ formatted_exp.set("safe", safe)
+ return formatted_exp
+
return exp.Anonymous(this=name, expressions=args)
return _builder
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 19a1eb58a9..00e6169ac0 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -985,6 +985,17 @@ def test_timestamps(self):
"SELECT CAST('2019-02-28' AS DATE) + INTERVAL '1 day, 1 year'",
)
+ self.validate_identity("DATE(x)").assert_is(exp.Anonymous)
+ self.validate_identity("TO_DATE(x)").assert_is(exp.Anonymous)
+ self.validate_identity("TRY_TO_DATE(x)").assert_is(exp.Anonymous)
+
+ self.validate_all(
+ "TO_DATE(x, 'MM-DD-YYYY')",
+ write={
+ "snowflake": "TO_DATE(x, 'mm-DD-yyyy')",
+ "duckdb": "CAST(STRPTIME(x, '%m-%d-%Y') AS DATE)",
+ },
+ )
self.validate_all(
"DATE('01-01-2000', 'MM-DD-YYYY')",
write={
| feat(snowflake): Adding support for DATE, TO_DATE, TRY_TO_DATE functions
Fixes #3152
Add support for the mentioned functions by:
- Converting all of them to `exp.TsOrDsToDate` expressions if the format is present _or_ to date casts otherwise
- Adding a new argument `safe` in `exp.TsOrDsToDate` to preserve the roundtrip of `TRY_TO_DATE`, which has a different error handling behavior compared to `TO_DATE / DATE`
For the transpilation issue #3152, no change was needed in DuckDB as it already transpiles the aforementioned expression to `strptime` (note that it produces a `TIMESTAMP`, thus the cast to `DATE`):
```
>>> import sqlglot
>>> sqlglot.transpile("SELECT try_to_date('01-01-1999', 'MM-DD-YYYY')", read="snowflake", write="duckdb")
SELECT CAST(STRPTIME('01-01-1999', '%m-%d-%Y') AS DATE)
```
Docs
-----------
1. [Snowflake TRY_TO_DATE](https://docs.snowflake.com/en/sql-reference/functions/try_to_date)
2. [Snowflake DATE / TO_DATE](https://docs.snowflake.com/en/sql-reference/functions/to_date)
3. [DuckDB strptime](https://duckdb.org/docs/sql/functions/dateformat.html#strptime-examples)
| the command above works great.
```
>>> import sqlglot
>>> sqlglot.transpile("SELECT try_to_date('01-01-1999', 'MM-DD-YYYY')", read="snowflake", write="duckdb")
SELECT CAST(STRPTIME('01-01-1999', '%m-%d-%Y') AS DATE)
```
but seems this command still not work, when the parameter is a column_name instead of a string
```
>>> sqlglot.transpile("SELECT try_to_date(x, 'MMDDYYYY')", read="snowflake", write="duckdb")
SELECT TRY_TO_DATE(x, 'MMDDYYYY')
```
@VaggelisD this is because we've reused the logic that we have for `TO_TIMESTAMP`. In that function you can basically have both a numeric expression and a string expression as the first argument, so we couldn't safely convert it to a `StrToTime` without having a literal. It seems though that for `TO_DATE` and `TRY_TO_DATE` there's only one variant with two arguments - the `<str>, <fmt>` one - so it should be safe to convert it accordingly. Wanna take a look? | 1,710,868,748,000 | [] | Feature Request | [
"sqlglot/dialects/snowflake.py:_build_datetime"
] | [] | 1 |
sphinx-doc/sphinx | sphinx-doc__sphinx-12206 | 6d6feb240fa670597229b7c42de74711cc42a680 | diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py
index 9178458b140..7d75cac9885 100644
--- a/sphinx/builders/linkcheck.py
+++ b/sphinx/builders/linkcheck.py
@@ -13,7 +13,7 @@
from queue import PriorityQueue, Queue
from threading import Thread
from typing import TYPE_CHECKING, NamedTuple, cast
-from urllib.parse import unquote, urlparse, urlsplit, urlunparse
+from urllib.parse import quote, unquote, urlparse, urlsplit, urlunparse
from docutils import nodes
from requests.exceptions import ConnectionError, HTTPError, SSLError, TooManyRedirects
@@ -409,6 +409,7 @@ def _check_uri(self, uri: str, hyperlink: Hyperlink) -> tuple[str, str, int]:
if rex.match(req_url):
anchor = ''
break
+ anchor = unquote(anchor)
# handle non-ASCII URIs
try:
@@ -446,7 +447,7 @@ def _check_uri(self, uri: str, hyperlink: Hyperlink) -> tuple[str, str, int]:
) as response:
if (self.check_anchors and response.ok and anchor
and not contains_anchor(response, anchor)):
- raise Exception(__(f'Anchor {anchor!r} not found'))
+ raise Exception(__(f'Anchor {quote(anchor)!r} not found'))
# Copy data we need from the (closed) response
status_code = response.status_code
@@ -592,7 +593,7 @@ def _get_request_headers(
def contains_anchor(response: Response, anchor: str) -> bool:
"""Determine if an anchor is contained within an HTTP response."""
- parser = AnchorCheckParser(unquote(anchor))
+ parser = AnchorCheckParser(anchor)
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
| diff --git a/tests/roots/test-linkcheck-anchors-ignore-for-url/index.rst b/tests/roots/test-linkcheck-anchors-ignore-for-url/index.rst
index df287b4c425..02969b63e31 100644
--- a/tests/roots/test-linkcheck-anchors-ignore-for-url/index.rst
+++ b/tests/roots/test-linkcheck-anchors-ignore-for-url/index.rst
@@ -1,5 +1,6 @@
* `Example valid url, no anchor <http://localhost:7777/valid>`_
* `Example valid url, valid anchor <http://localhost:7777/valid#valid-anchor>`_
+* `Example valid url, valid quotable anchor <http://localhost:7777/valid#py:module::urllib.parse>`_
* `Example valid url, invalid anchor <http://localhost:7777/valid#invalid-anchor>`_
* `Example ignored url, no anchor <http://localhost:7777/ignored>`_
* `Example ignored url, invalid anchor <http://localhost:7777/ignored#invalid-anchor>`_
diff --git a/tests/test_builders/test_build_linkcheck.py b/tests/test_builders/test_build_linkcheck.py
index c8d8515af16..f3ff64c083e 100644
--- a/tests/test_builders/test_build_linkcheck.py
+++ b/tests/test_builders/test_build_linkcheck.py
@@ -295,7 +295,7 @@ def test_anchors_ignored_for_url(app):
attrs = ('filename', 'lineno', 'status', 'code', 'uri', 'info')
data = [json.loads(x) for x in content.splitlines()]
- assert len(data) == 7
+ assert len(data) == 8
assert all(all(attr in row for attr in attrs) for row in data)
# rows may be unsorted due to network latency or
@@ -304,6 +304,7 @@ def test_anchors_ignored_for_url(app):
assert rows[f'http://{address}/valid']['status'] == 'working'
assert rows[f'http://{address}/valid#valid-anchor']['status'] == 'working'
+ assert rows['http://localhost:7777/valid#py:module::urllib.parse']['status'] == 'broken'
assert rows[f'http://{address}/valid#invalid-anchor'] == {
'status': 'broken',
'info': "Anchor 'invalid-anchor' not found",
| linkcheck performance: downloading page multiple times when checking anchors
### Problem
- If my sphinx documentation contains multiple links with anchors to a web page with multiple anchors, it will download the page multiple times, once per anchor to check
- This scales very badly. If I have many hundreds or thousands of anchors (e.g. for automatically generated documentation), it might download several megabytes × the number of links. This can end up being multiple gigabytes
#### Procedure to reproduce the problem
- create a document with links to anchors on the same web page
- run the link checker; it will fetch the page multiple times
#### Expected results
- I would suggest that the link checker could cache the anchors on webpages, so that it only downloads each page once, and only checks each link once. It could build a dictionary of pages to check, and store the anchors as a list or dict within it? Since we know up front which of our links have anchors, we can skip storing them when we know it's unnecessary.
- There may be other better ways of doing this; I'm not familiar with the internals of the link checker.
### Reproducible project / your project
- https://github.com/openmicroscopy/bioformats/tree/develop/docs/sphinx
- contains lots of links to https://www.openmicroscopy.org/Schemas/Documentation/Generated/OME-2016-06/ome_xsd.html
### Environment info
- OS: Any
- Python version: Any
- Sphinx version: Any
| cc @jayaddison | 1,711,393,478,000 | [
"builder:linkcheck"
] | Performance Issue | [
"sphinx/builders/linkcheck.py:HyperlinkAvailabilityCheckWorker._check_uri",
"sphinx/builders/linkcheck.py:contains_anchor"
] | [] | 2 |
prowler-cloud/prowler | prowler-cloud__prowler-6128 | 9c089756c3cc745f6cecb3aa1f091cf603834f10 | diff --git a/prowler/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py b/prowler/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py
index 3de61166c5a..225b92381df 100644
--- a/prowler/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py
+++ b/prowler/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py
@@ -49,7 +49,7 @@ def execute(self) -> Check_Report_AWS:
old_access_keys = True
report = Check_Report_AWS(self.metadata())
report.region = iam_client.region
- report.resource_id = user["user"]
+ report.resource_id = f"{user['user']}-access-key-1"
report.resource_arn = user["arn"]
report.resource_tags = user_tags
report.status = "FAIL"
@@ -66,7 +66,7 @@ def execute(self) -> Check_Report_AWS:
old_access_keys = True
report = Check_Report_AWS(self.metadata())
report.region = iam_client.region
- report.resource_id = user["user"]
+ report.resource_id = f"{user['user']}-access-key-2"
report.resource_arn = user["arn"]
report.resource_tags = user_tags
report.status = "FAIL"
| diff --git a/tests/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days_test.py b/tests/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days_test.py
index 57c697062f2..8205cd583c1 100644
--- a/tests/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days_test.py
+++ b/tests/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days_test.py
@@ -21,13 +21,16 @@ def test_user_no_access_keys(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
- new=IAM(aws_provider),
- ) as service_client:
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
+ new=IAM(aws_provider),
+ ) as service_client,
+ ):
from prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days import (
iam_rotate_access_key_90_days,
)
@@ -62,13 +65,16 @@ def test_user_access_key_1_not_rotated(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
- new=IAM(aws_provider),
- ) as service_client:
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
+ new=IAM(aws_provider),
+ ) as service_client,
+ ):
from prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days import (
iam_rotate_access_key_90_days,
)
@@ -86,7 +92,7 @@ def test_user_access_key_1_not_rotated(self):
result[0].status_extended
== f"User {user} has not rotated access key 1 in over 90 days (100 days)."
)
- assert result[0].resource_id == user
+ assert result[0].resource_id == f"{user}-access-key-1"
assert result[0].resource_arn == arn
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == [{"Key": "test-tag", "Value": "test"}]
@@ -106,13 +112,16 @@ def test_user_access_key_2_not_rotated(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
- new=IAM(aws_provider),
- ) as service_client:
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
+ new=IAM(aws_provider),
+ ) as service_client,
+ ):
from prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days import (
iam_rotate_access_key_90_days,
)
@@ -130,7 +139,7 @@ def test_user_access_key_2_not_rotated(self):
result[0].status_extended
== f"User {user} has not rotated access key 2 in over 90 days (100 days)."
)
- assert result[0].resource_id == user
+ assert result[0].resource_id == f"{user}-access-key-2"
assert result[0].resource_arn == arn
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == [{"Key": "test-tag", "Value": "test"}]
@@ -150,13 +159,16 @@ def test_user_both_access_keys_not_rotated(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
- new=IAM(aws_provider),
- ) as service_client:
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days.iam_client",
+ new=IAM(aws_provider),
+ ) as service_client,
+ ):
from prowler.providers.aws.services.iam.iam_rotate_access_key_90_days.iam_rotate_access_key_90_days import (
iam_rotate_access_key_90_days,
)
@@ -179,7 +191,7 @@ def test_user_both_access_keys_not_rotated(self):
result[0].status_extended
== f"User {user} has not rotated access key 1 in over 90 days (100 days)."
)
- assert result[0].resource_id == user
+ assert result[0].resource_id == f"{user}-access-key-1"
assert result[0].resource_arn == arn
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == [{"Key": "test-tag", "Value": "test"}]
@@ -188,7 +200,7 @@ def test_user_both_access_keys_not_rotated(self):
result[1].status_extended
== f"User {user} has not rotated access key 2 in over 90 days (100 days)."
)
- assert result[1].resource_id == user
+ assert result[1].resource_id == f"{user}-access-key-2"
assert result[1].resource_arn == arn
assert result[1].region == AWS_REGION_US_EAST_1
assert result[1].resource_tags == [{"Key": "test-tag", "Value": "test"}]
| `finding_uid` is not unique in `iam_rotate_access_key_90_days`
### Steps to Reproduce
python prowler.py aws -s iam
### Expected behavior
The program output CSV file with the same finding_uid like that
prowler-aws-iam_rotate_access_key_90_days-<AWS ACCOUNT_ID>-us-east-1-<IAM_USER>
This IAM_USER has 2 access keys
### Actual Result with Screenshots or Logs
No log
### How did you install Prowler?
Cloning the repository from github.com (git clone)
### Environment Resource
8. Other (Local PC)
### OS used
2. MacOS
### Prowler version
Prowler 5.1.0 (You are running the latest version, yay!)
### Pip version
pip 23.3
### Context
The source code of [finding.py ](https://github.com/prowler-cloud/prowler/blob/master/prowler/lib/outputs/finding.py#L238-L241)
```
output_data["uid"] = (
f"prowler-{provider.type}-{check_output.check_metadata.CheckID}-{output_data['account_uid']}-"
f"{output_data['region']}-{output_data['resource_name']}"
)
```
Do we need add UUID to above line to avoid problem?
| Hello @banv, that's a great catch!
The Prowler Finding UID has the above format for historical reasons and can't be an UUID since it can't be changed because it'll introduce a breaking change. I think in this case the check needs to be adjusted to add a index for each access key, like `prowler-aws-iam_rotate_access_key_90_days--us-east-1-<IAM_USER>-access-key-[0,1]`.
With that we'll achieve uniqueness in the `finding_uid`. Let me know what you think about it.
Thanks for using Prowler! 🚀 | 1,733,864,257,000 | [
"provider/aws",
"was-backported",
"backport-to-v4.6",
"backport-to-v5.0"
] | Bug Report | [
"prowler/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py:iam_rotate_access_key_90_days.execute"
] | [] | 1 |
prowler-cloud/prowler | prowler-cloud__prowler-6119 | b984f0423a8ad80fffd5b9dc4853b034f4d5211e | diff --git a/prowler/providers/aws/services/wafv2/wafv2_service.py b/prowler/providers/aws/services/wafv2/wafv2_service.py
index 85feed76f62..c867691b771 100644
--- a/prowler/providers/aws/services/wafv2/wafv2_service.py
+++ b/prowler/providers/aws/services/wafv2/wafv2_service.py
@@ -150,6 +150,22 @@ def _get_web_acl(self, acl: str):
else:
acl.rules.append(new_rule)
+ firewall_manager_managed_rg = get_web_acl.get("WebACL", {}).get(
+ "PreProcessFirewallManagerRuleGroups", []
+ ) + get_web_acl.get("WebACL", {}).get(
+ "PostProcessFirewallManagerRuleGroups", []
+ )
+
+ for rule in firewall_manager_managed_rg:
+ acl.rule_groups.append(
+ Rule(
+ name=rule.get("Name", ""),
+ cloudwatch_metrics_enabled=rule.get(
+ "VisibilityConfig", {}
+ ).get("CloudWatchMetricsEnabled", False),
+ )
+ )
+
except Exception as error:
logger.error(
f"{acl.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -193,13 +209,6 @@ class Rule(BaseModel):
cloudwatch_metrics_enabled: bool = False
-class FirewallManagerRuleGroup(BaseModel):
- """Model representing a rule group for the Web ACL."""
-
- name: str
- cloudwatch_metrics_enabled: bool = False
-
-
class WebAclv2(BaseModel):
"""Model representing a Web ACL for WAFv2."""
| diff --git a/tests/providers/aws/services/wafv2/wafv2_webacl_with_rules/wafv2_webacl_with_rules_test.py b/tests/providers/aws/services/wafv2/wafv2_webacl_with_rules/wafv2_webacl_with_rules_test.py
index 6d476d0f9be..19cde553a77 100644
--- a/tests/providers/aws/services/wafv2/wafv2_webacl_with_rules/wafv2_webacl_with_rules_test.py
+++ b/tests/providers/aws/services/wafv2/wafv2_webacl_with_rules/wafv2_webacl_with_rules_test.py
@@ -1,10 +1,61 @@
from unittest import mock
+from unittest.mock import patch
+import botocore
from boto3 import client
from moto import mock_aws
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
+# Original botocore _make_api_call function
+orig = botocore.client.BaseClient._make_api_call
+
+FM_RG_NAME = "test-firewall-managed-rule-group"
+FM_RG_ARN = "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/test-firewall-managed-rule-group"
+
+
+# Mocked botocore _make_api_call function
+def mock_make_api_call(self, operation_name, kwarg):
+ if operation_name == "ListWebACLs":
+ return {
+ "WebACLs": [
+ {
+ "Name": FM_RG_NAME,
+ "Id": FM_RG_NAME,
+ "ARN": FM_RG_ARN,
+ }
+ ]
+ }
+ elif operation_name == "GetWebACL":
+ return {
+ "WebACL": {
+ "PostProcessFirewallManagerRuleGroups": [
+ {
+ "Name": FM_RG_NAME,
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": True,
+ "CloudWatchMetricsEnabled": True,
+ "MetricName": "web-acl-test-metric",
+ },
+ }
+ ]
+ }
+ }
+ elif operation_name == "ListResourcesForWebACL":
+ return {
+ "ResourceArns": [
+ FM_RG_ARN,
+ ]
+ }
+ elif operation_name == "ListTagsForResource":
+ return {
+ "TagInfoForResource": {
+ "ResourceARN": FM_RG_ARN,
+ "TagList": [{"Key": "Name", "Value": FM_RG_NAME}],
+ }
+ }
+ return orig(self, operation_name, kwarg)
+
class Test_wafv2_webacl_with_rules:
@mock_aws
@@ -13,12 +64,15 @@ def test_no_web_acls(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
- new=WAFv2(aws_provider),
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
+ new=WAFv2(aws_provider),
+ ),
):
from prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules import (
wafv2_webacl_with_rules,
@@ -69,12 +123,15 @@ def test_wafv2_web_acl_with_rule(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
- new=WAFv2(aws_provider),
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
+ new=WAFv2(aws_provider),
+ ),
):
from prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules import (
wafv2_webacl_with_rules,
@@ -137,12 +194,15 @@ def test_wafv2_web_acl_with_rule_group(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
- new=WAFv2(aws_provider),
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
+ new=WAFv2(aws_provider),
+ ),
):
from prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules import (
wafv2_webacl_with_rules,
@@ -161,6 +221,43 @@ def test_wafv2_web_acl_with_rule_group(self):
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == [{"Key": "Name", "Value": waf_name}]
+ @patch(
+ "botocore.client.BaseClient._make_api_call",
+ new=mock_make_api_call,
+ )
+ @mock_aws
+ def test_wafv2_web_acl_with_firewall_manager_managed_rule_group(self):
+ from prowler.providers.aws.services.wafv2.wafv2_service import WAFv2
+
+ aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
+
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
+ new=WAFv2(aws_provider),
+ ),
+ ):
+ from prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules import (
+ wafv2_webacl_with_rules,
+ )
+
+ check = wafv2_webacl_with_rules()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"AWS WAFv2 Web ACL {FM_RG_NAME} does have rules or rule groups attached."
+ )
+ assert result[0].resource_id == FM_RG_NAME
+ assert result[0].resource_arn == FM_RG_ARN
+ assert result[0].region == AWS_REGION_US_EAST_1
+ assert result[0].resource_tags == [{"Key": "Name", "Value": FM_RG_NAME}]
+
@mock_aws
def test_wafv2_web_acl_without_rule_or_rule_group(self):
wafv2_client = client("wafv2", region_name=AWS_REGION_US_EAST_1)
@@ -184,12 +281,15 @@ def test_wafv2_web_acl_without_rule_or_rule_group(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
- new=WAFv2(aws_provider),
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules.wafv2_client",
+ new=WAFv2(aws_provider),
+ ),
):
from prowler.providers.aws.services.wafv2.wafv2_webacl_with_rules.wafv2_webacl_with_rules import (
wafv2_webacl_with_rules,
| False positive on `wafv2_webacl_with_rules` when when ACL provisioned with AWS Firewall Manager
### Steps to Reproduce
1. Depoloy ACL with AWS Firewall Manager including any rule group
2. Run prowler against AWS account where ACL is provisioned with Firewall Manager
3. Prowler will add that ACL as failed because the list of managed rules is empty, but it has AWS Firewall Manager provisioned rules
### Expected behavior
it's expected that ACL is managed centrally and may not have rules added directly
### Actual Result with Screenshots or Logs


### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
4. EKS
### OS used
1. Amazon Linux 2023
### Prowler version
5.0.0
### Pip version
24.0
### Context
_No response_
| Hello @ivan-morhun , we will review it as soon as possible and get back to you once we have an update.
Thanks for using Prowler 🚀 | 1,733,850,101,000 | [
"provider/aws",
"was-backported",
"backport-to-v4.6",
"backport-to-v5.0"
] | Bug Report | [
"prowler/providers/aws/services/wafv2/wafv2_service.py:WAFv2._get_web_acl"
] | [] | 1 |
prowler-cloud/prowler | prowler-cloud__prowler-6108 | 38a0d2d740e886f905a047791b22274fe741d60d | diff --git a/prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.py b/prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.py
index fe63558366a..0430b3d1847 100644
--- a/prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.py
+++ b/prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.py
@@ -31,10 +31,7 @@ def execute(self) -> List[Check_Report_AWS]:
f"Firehose Stream {stream.name} does have at rest encryption enabled."
)
- if (
- stream.kms_encryption != EncryptionStatus.ENABLED
- or not stream.kms_key_arn
- ):
+ if stream.kms_encryption != EncryptionStatus.ENABLED:
report.status = "FAIL"
report.status_extended = f"Firehose Stream {stream.name} does not have at rest encryption enabled."
| diff --git a/tests/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest_test.py b/tests/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest_test.py
index d2a1fa40a9a..da00aa89006 100644
--- a/tests/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest_test.py
+++ b/tests/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest_test.py
@@ -17,12 +17,15 @@ def test_no_streams(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
- with mock.patch(
- "prowler.providers.common.provider.Provider.get_global_provider",
- return_value=aws_provider,
- ), mock.patch(
- "prowler.providers.aws.services.firehose.firehose_stream_encrypted_at_rest.firehose_stream_encrypted_at_rest.firehose_client",
- new=Firehose(aws_provider),
+ with (
+ mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ),
+ mock.patch(
+ "prowler.providers.aws.services.firehose.firehose_stream_encrypted_at_rest.firehose_stream_encrypted_at_rest.firehose_client",
+ new=Firehose(aws_provider),
+ ),
):
# Test Check
from prowler.providers.aws.services.firehose.firehose_stream_encrypted_at_rest.firehose_stream_encrypted_at_rest import (
@@ -94,6 +97,65 @@ def test_stream_kms_encryption_enabled(self):
== f"Firehose Stream {stream_name} does have at rest encryption enabled."
)
+ @mock_aws
+ def test_stream_kms_encryption_enabled_aws_managed_key(self):
+ # Generate S3 client
+ s3_client = client("s3", region_name=AWS_REGION_EU_WEST_1)
+ s3_client.create_bucket(
+ Bucket="test-bucket",
+ CreateBucketConfiguration={"LocationConstraint": AWS_REGION_EU_WEST_1},
+ )
+
+ # Generate Firehose client
+ firehose_client = client("firehose", region_name=AWS_REGION_EU_WEST_1)
+ delivery_stream = firehose_client.create_delivery_stream(
+ DeliveryStreamName="test-delivery-stream",
+ DeliveryStreamType="DirectPut",
+ S3DestinationConfiguration={
+ "RoleARN": "arn:aws:iam::012345678901:role/firehose-role",
+ "BucketARN": "arn:aws:s3:::test-bucket",
+ "Prefix": "",
+ "BufferingHints": {"IntervalInSeconds": 300, "SizeInMBs": 5},
+ "CompressionFormat": "UNCOMPRESSED",
+ },
+ Tags=[{"Key": "key", "Value": "value"}],
+ )
+ arn = delivery_stream["DeliveryStreamARN"]
+ stream_name = arn.split("/")[-1]
+
+ firehose_client.start_delivery_stream_encryption(
+ DeliveryStreamName=stream_name,
+ DeliveryStreamEncryptionConfigurationInput={
+ "KeyType": "AWS_OWNED_CMK",
+ },
+ )
+
+ from prowler.providers.aws.services.firehose.firehose_service import Firehose
+
+ aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
+ with mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=aws_provider,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.firehose.firehose_stream_encrypted_at_rest.firehose_stream_encrypted_at_rest.firehose_client",
+ new=Firehose(aws_provider),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.firehose.firehose_stream_encrypted_at_rest.firehose_stream_encrypted_at_rest import (
+ firehose_stream_encrypted_at_rest,
+ )
+
+ check = firehose_stream_encrypted_at_rest()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"Firehose Stream {stream_name} does have at rest encryption enabled."
+ )
+
@mock_aws
def test_stream_kms_encryption_not_enabled(self):
# Generate Firehose client
| False positive alert on firehose_stream_encrypted_at_rest
### Steps to Reproduce
Hi! There is a bug with the check `firehose_stream_encrypted_at_rest`
### Expected behavior
The check should not alert if the Firehose Server-side encryption (SSE) is enabled
### Actual Result with Screenshots or Logs

### How did you install Prowler?
Cloning the repository from github.com (git clone)
### Environment Resource
Workstation
### OS used
Windows
### Prowler version
Prowler 5.0.0 (You are running the latest version, yay!)
### Pip version
pip 24.2 (python 3.12)
### Context
_No response_
| Hello @serhii-ciq, we will review it as soon as possible and get back to you once we have an update.
Thanks for using Prowler 🚀 | 1,733,827,600,000 | [
"provider/aws",
"was-backported",
"backport-to-v4.6",
"backport-to-v5.0"
] | Bug Report | [
"prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.py:firehose_stream_encrypted_at_rest.execute"
] | [] | 1 |
prowler-cloud/prowler | prowler-cloud__prowler-6004 | 32d8da213137fc6f050a824e185e9cd717f60465 | diff --git a/prowler/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12.py b/prowler/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12.py
index 32020e2ec2c..519335a016e 100644
--- a/prowler/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12.py
+++ b/prowler/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12.py
@@ -19,12 +19,11 @@ def execute(self) -> Check_Report_Azure:
report.location = app.location
report.status_extended = f"Minimum TLS version is not set to 1.2 for app '{app_name}' in subscription '{subscription_name}'."
- if (
- app.configurations
- and getattr(app.configurations, "min_tls_version", "") == "1.2"
- ):
+ if app.configurations and getattr(
+ app.configurations, "min_tls_version", ""
+ ) in ["1.2", "1.3"]:
report.status = "PASS"
- report.status_extended = f"Minimum TLS version is set to 1.2 for app '{app_name}' in subscription '{subscription_name}'."
+ report.status_extended = f"Minimum TLS version is set to {app.configurations.min_tls_version} for app '{app_name}' in subscription '{subscription_name}'."
findings.append(report)
| diff --git a/tests/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12_test.py b/tests/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12_test.py
index da0271cbc07..12a149015e3 100644
--- a/tests/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12_test.py
+++ b/tests/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12_test.py
@@ -171,3 +171,45 @@ def test_app_min_tls_version_10(self):
assert result[0].resource_name == "app_id-1"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].location == "West Europe"
+
+ def test_app_min_tls_version_13(self):
+ resource_id = f"/subscriptions/{uuid4()}"
+ app_client = mock.MagicMock
+
+ with mock.patch(
+ "prowler.providers.common.provider.Provider.get_global_provider",
+ return_value=set_mocked_azure_provider(),
+ ), mock.patch(
+ "prowler.providers.azure.services.app.app_minimum_tls_version_12.app_minimum_tls_version_12.app_client",
+ new=app_client,
+ ):
+ from prowler.providers.azure.services.app.app_minimum_tls_version_12.app_minimum_tls_version_12 import (
+ app_minimum_tls_version_12,
+ )
+ from prowler.providers.azure.services.app.app_service import WebApp
+
+ app_client.apps = {
+ AZURE_SUBSCRIPTION_ID: {
+ "app_id-1": WebApp(
+ resource_id=resource_id,
+ auth_enabled=False,
+ configurations=mock.MagicMock(min_tls_version="1.3"),
+ client_cert_mode="Ignore",
+ https_only=False,
+ identity=None,
+ location="West Europe",
+ )
+ }
+ }
+ check = app_minimum_tls_version_12()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"Minimum TLS version is set to 1.3 for app 'app_id-1' in subscription '{AZURE_SUBSCRIPTION_ID}'."
+ )
+ assert result[0].resource_id == resource_id
+ assert result[0].resource_name == "app_id-1"
+ assert result[0].subscription == AZURE_SUBSCRIPTION_ID
+ assert result[0].location == "West Europe"
| Minimum TLS 1.2 fails for Azure Web App when TLS 1.3 is enabled
### Steps to Reproduce
1. prowler azure --subscription-ids xx-88ae-4fe8-901a-16e33871e7c7 xx-5c28-4e32-94df-591a5baedf69 --az-cli-auth
2. Azure Web App with TLS 1.3 enabled
### Expected behavior
in file prowler/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12.py
When checking for minimum TLS 1.2, please don't fail on TLS 1.3
### Actual Result with Screenshots or Logs
"Minimum TLS version is not set to 1.2 for app"
```
if (
app.configurations
and getattr(app.configurations, "min_tls_version", "") == "1.2"
):
report.status = "PASS"
report.status_extended = f"Minimum TLS version is set to 1.2 for app '{app_name}' in subscription '{subscription_name}'."
```
### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Azure Cloud Shell
### OS used
Azure Cloud Shell
### Prowler version
4.5.3
### Pip version
pip 23.0.1 from /usr/lib/python3.9/site-packages/pip (python 3.9)
### Context
_No response_
| 1,733,241,577,000 | [
"provider/azure",
"was-backported",
"backport-to-v4.6",
"backport-to-v5.0"
] | Bug Report | [
"prowler/providers/azure/services/app/app_minimum_tls_version_12/app_minimum_tls_version_12.py:app_minimum_tls_version_12.execute"
] | [] | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.