This commit is contained in:
Untriex Programming
2021-03-17 08:57:57 +01:00
parent 339be0ccd8
commit ed6afdb5c9
3074 changed files with 423348 additions and 0 deletions

View File

@@ -0,0 +1,111 @@
# flake8: noqa
from . import dataclasses
from .annotated_types import create_model_from_namedtuple, create_model_from_typeddict
from .class_validators import root_validator, validator
from .decorator import validate_arguments
from .env_settings import BaseSettings
from .error_wrappers import ValidationError
from .errors import *
from .fields import Field, PrivateAttr, Required
from .main import *
from .networks import *
from .parse import Protocol
from .tools import *
from .types import *
from .version import VERSION
# WARNING __all__ from .errors is not included here, it will be removed as an export here in v2
# please use "from pydantic.errors import ..." instead
__all__ = [
# annotated types utils
'create_model_from_namedtuple',
'create_model_from_typeddict',
# dataclasses
'dataclasses',
# class_validators
'root_validator',
'validator',
# decorator
'validate_arguments',
# env_settings
'BaseSettings',
# error_wrappers
'ValidationError',
# fields
'Field',
'Required',
# main
'BaseConfig',
'BaseModel',
'Extra',
'compiled',
'create_model',
'validate_model',
# network
'AnyUrl',
'AnyHttpUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'RedisDsn',
'validate_email',
# parse
'Protocol',
# tools
'parse_file_as',
'parse_obj_as',
'parse_raw_as',
# types
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'NonNegativeInt',
'NonPositiveInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'NonNegativeFloat',
'NonPositiveFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictBytes',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'PrivateAttr',
'ByteSize',
# version
'VERSION',
]

View File

@@ -0,0 +1,364 @@
"""
Register Hypothesis strategies for Pydantic custom types.
This enables fully-automatic generation of test data for most Pydantic classes.
Note that this module has *no* runtime impact on Pydantic itself; instead it
is registered as a setuptools entry point and Hypothesis will import it if
Pydantic is installed. See also:
https://hypothesis.readthedocs.io/en/latest/strategies.html#registering-strategies-via-setuptools-entry-points
https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.register_type_strategy
https://hypothesis.readthedocs.io/en/latest/strategies.html#interaction-with-pytest-cov
https://pydantic-docs.helpmanual.io/usage/types/#pydantic-types
Note that because our motivation is to *improve user experience*, the strategies
are always sound (never generate invalid data) but sacrifice completeness for
maintainability (ie may be unable to generate some tricky but valid data).
Finally, this module makes liberal use of `# type: ignore[<code>]` pragmas.
This is because Hypothesis annotates `register_type_strategy()` with
`(T, SearchStrategy[T])`, but in most cases we register e.g. `ConstrainedInt`
to generate instances of the builtin `int` type which match the constraints.
"""
import contextlib
import ipaddress
import json
import math
from fractions import Fraction
from typing import Callable, Dict, Type, Union, cast, overload
import hypothesis.strategies as st
import pydantic
import pydantic.color
import pydantic.types
# FilePath and DirectoryPath are explicitly unsupported, as we'd have to create
# them on-disk, and that's unsafe in general without being told *where* to do so.
#
# URLs are unsupported because it's easy for users to define their own strategy for
# "normal" URLs, and hard for us to define a general strategy which includes "weird"
# URLs but doesn't also have unpredictable performance problems.
#
# conlist() and conset() are unsupported for now, because the workarounds for
# Cython and Hypothesis to handle parametrized generic types are incompatible.
# Once Cython can support 'normal' generics we'll revisit this.
# Emails
try:
import email_validator
except ImportError: # pragma: no cover
pass
else:
def is_valid_email(s: str) -> bool:
# Hypothesis' st.emails() occasionally generates emails like 0@A0--0.ac
# that are invalid according to email-validator, so we filter those out.
try:
email_validator.validate_email(s, check_deliverability=False)
return True
except email_validator.EmailNotValidError: # pragma: no cover
return False
# Note that these strategies deliberately stay away from any tricky Unicode
# or other encoding issues; we're just trying to generate *something* valid.
st.register_type_strategy(pydantic.EmailStr, st.emails().filter(is_valid_email)) # type: ignore[arg-type]
st.register_type_strategy(
pydantic.NameEmail,
st.builds(
'{} <{}>'.format, # type: ignore[arg-type]
st.from_regex('[A-Za-z0-9_]+( [A-Za-z0-9_]+){0,5}', fullmatch=True),
st.emails().filter(is_valid_email),
),
)
# PyObject - dotted names, in this case taken from the math module.
st.register_type_strategy(
pydantic.PyObject, # type: ignore[arg-type]
st.sampled_from(
[cast(pydantic.PyObject, f'math.{name}') for name in sorted(vars(math)) if not name.startswith('_')]
),
)
# CSS3 Colors; as name, hex, rgb(a) tuples or strings, or hsl strings
_color_regexes = (
'|'.join(
(
pydantic.color.r_hex_short,
pydantic.color.r_hex_long,
pydantic.color.r_rgb,
pydantic.color.r_rgba,
pydantic.color.r_hsl,
pydantic.color.r_hsla,
)
)
# Use more precise regex patterns to avoid value-out-of-range errors
.replace(pydantic.color._r_sl, r'(?:(\d\d?(?:\.\d+)?|100(?:\.0+)?)%)')
.replace(pydantic.color._r_alpha, r'(?:(0(?:\.\d+)?|1(?:\.0+)?|\.\d+|\d{1,2}%))')
.replace(pydantic.color._r_255, r'(?:((?:\d|\d\d|[01]\d\d|2[0-4]\d|25[0-4])(?:\.\d+)?|255(?:\.0+)?))')
)
st.register_type_strategy(
pydantic.color.Color,
st.one_of(
st.sampled_from(sorted(pydantic.color.COLORS_BY_NAME)),
st.tuples(
st.integers(0, 255),
st.integers(0, 255),
st.integers(0, 255),
st.none() | st.floats(0, 1) | st.floats(0, 100).map('{}%'.format),
),
st.from_regex(_color_regexes, fullmatch=True),
),
)
# Card numbers, valid according to the Luhn algorithm
def add_luhn_digit(card_number: str) -> str:
# See https://en.wikipedia.org/wiki/Luhn_algorithm
for digit in '0123456789':
with contextlib.suppress(Exception):
pydantic.PaymentCardNumber.validate_luhn_check_digit(card_number + digit)
return card_number + digit
raise AssertionError('Unreachable') # pragma: no cover
card_patterns = (
# Note that these patterns omit the Luhn check digit; that's added by the function above
'4[0-9]{14}', # Visa
'5[12345][0-9]{13}', # Mastercard
'3[47][0-9]{12}', # American Express
'[0-26-9][0-9]{10,17}', # other (incomplete to avoid overlap)
)
st.register_type_strategy(
pydantic.PaymentCardNumber,
st.from_regex('|'.join(card_patterns), fullmatch=True).map(add_luhn_digit), # type: ignore[arg-type]
)
# UUIDs
st.register_type_strategy(pydantic.UUID1, st.uuids(version=1))
st.register_type_strategy(pydantic.UUID3, st.uuids(version=3))
st.register_type_strategy(pydantic.UUID4, st.uuids(version=4))
st.register_type_strategy(pydantic.UUID5, st.uuids(version=5))
# Secrets
st.register_type_strategy(pydantic.SecretBytes, st.binary().map(pydantic.SecretBytes))
st.register_type_strategy(pydantic.SecretStr, st.text().map(pydantic.SecretStr))
# IP addresses, networks, and interfaces
st.register_type_strategy(pydantic.IPvAnyAddress, st.ip_addresses())
st.register_type_strategy(
pydantic.IPvAnyInterface,
st.from_type(ipaddress.IPv4Interface) | st.from_type(ipaddress.IPv6Interface), # type: ignore[arg-type]
)
st.register_type_strategy(
pydantic.IPvAnyNetwork,
st.from_type(ipaddress.IPv4Network) | st.from_type(ipaddress.IPv6Network), # type: ignore[arg-type]
)
# We hook into the con***() functions and the ConstrainedNumberMeta metaclass,
# so here we only have to register subclasses for other constrained types which
# don't go via those mechanisms. Then there are the registration hooks below.
st.register_type_strategy(pydantic.StrictBool, st.booleans())
st.register_type_strategy(pydantic.StrictStr, st.text())
# Constrained-type resolver functions
#
# For these ones, we actually want to inspect the type in order to work out a
# satisfying strategy. First up, the machinery for tracking resolver functions:
RESOLVERS: Dict[type, Callable[[type], st.SearchStrategy]] = {} # type: ignore[type-arg]
@overload
def _registered(typ: Type[pydantic.types.T]) -> Type[pydantic.types.T]:
pass
@overload
def _registered(typ: pydantic.types.ConstrainedNumberMeta) -> pydantic.types.ConstrainedNumberMeta:
pass
def _registered(
typ: Union[Type[pydantic.types.T], pydantic.types.ConstrainedNumberMeta]
) -> Union[Type[pydantic.types.T], pydantic.types.ConstrainedNumberMeta]:
# This function replaces the version in `pydantic.types`, in order to
# effect the registration of new constrained types so that Hypothesis
# can generate valid examples.
pydantic.types._DEFINED_TYPES.add(typ)
for supertype, resolver in RESOLVERS.items():
if issubclass(typ, supertype):
st.register_type_strategy(typ, resolver(typ)) # type: ignore
return typ
raise NotImplementedError(f'Unknown type {typ!r} has no resolver to register') # pragma: no cover
def resolves(
typ: Union[type, pydantic.types.ConstrainedNumberMeta]
) -> Callable[[Callable[..., st.SearchStrategy]], Callable[..., st.SearchStrategy]]: # type: ignore[type-arg]
def inner(f): # type: ignore
assert f not in RESOLVERS
RESOLVERS[typ] = f
return f
return inner
# Type-to-strategy resolver functions
@resolves(pydantic.JsonWrapper)
def resolve_json(cls): # type: ignore[no-untyped-def]
try:
inner = st.none() if cls.inner_type is None else st.from_type(cls.inner_type)
except Exception: # pragma: no cover
finite = st.floats(allow_infinity=False, allow_nan=False)
inner = st.recursive(
base=st.one_of(st.none(), st.booleans(), st.integers(), finite, st.text()),
extend=lambda x: st.lists(x) | st.dictionaries(st.text(), x), # type: ignore
)
return st.builds(
json.dumps,
inner,
ensure_ascii=st.booleans(),
indent=st.none() | st.integers(0, 16),
sort_keys=st.booleans(),
)
@resolves(pydantic.ConstrainedBytes)
def resolve_conbytes(cls): # type: ignore[no-untyped-def] # pragma: no cover
min_size = cls.min_length or 0
max_size = cls.max_length
if not cls.strip_whitespace:
return st.binary(min_size=min_size, max_size=max_size)
# Fun with regex to ensure we neither start nor end with whitespace
repeats = '{{{},{}}}'.format(
min_size - 2 if min_size > 2 else 0,
max_size - 2 if (max_size or 0) > 2 else '',
)
if min_size >= 2:
pattern = rf'\W.{repeats}\W'
elif min_size == 1:
pattern = rf'\W(.{repeats}\W)?'
else:
assert min_size == 0
pattern = rf'(\W(.{repeats}\W)?)?'
return st.from_regex(pattern.encode(), fullmatch=True)
@resolves(pydantic.ConstrainedDecimal)
def resolve_condecimal(cls): # type: ignore[no-untyped-def]
min_value = cls.ge
max_value = cls.le
if cls.gt is not None:
assert min_value is None, 'Set `gt` or `ge`, but not both'
min_value = cls.gt
if cls.lt is not None:
assert max_value is None, 'Set `lt` or `le`, but not both'
max_value = cls.lt
s = st.decimals(min_value, max_value, allow_nan=False)
if cls.lt is not None:
s = s.filter(lambda d: d < cls.lt)
if cls.gt is not None:
s = s.filter(lambda d: cls.gt < d)
return s
@resolves(pydantic.ConstrainedFloat)
def resolve_confloat(cls): # type: ignore[no-untyped-def]
min_value = cls.ge
max_value = cls.le
exclude_min = False
exclude_max = False
if cls.gt is not None:
assert min_value is None, 'Set `gt` or `ge`, but not both'
min_value = cls.gt
exclude_min = True
if cls.lt is not None:
assert max_value is None, 'Set `lt` or `le`, but not both'
max_value = cls.lt
exclude_max = True
if cls.multiple_of is None:
return st.floats(min_value, max_value, exclude_min=exclude_min, exclude_max=exclude_max, allow_nan=False)
if min_value is not None:
min_value = math.ceil(min_value / cls.multiple_of)
if exclude_min:
min_value = min_value + 1
if max_value is not None:
assert max_value >= cls.multiple_of, 'Cannot build model with max value smaller than multiple of'
max_value = math.floor(max_value / cls.multiple_of)
if exclude_max:
max_value = max_value - 1
return st.integers(min_value, max_value).map(lambda x: x * cls.multiple_of)
@resolves(pydantic.ConstrainedInt)
def resolve_conint(cls): # type: ignore[no-untyped-def]
min_value = cls.ge
max_value = cls.le
if cls.gt is not None:
assert min_value is None, 'Set `gt` or `ge`, but not both'
min_value = cls.gt + 1
if cls.lt is not None:
assert max_value is None, 'Set `lt` or `le`, but not both'
max_value = cls.lt - 1
if cls.multiple_of is None or cls.multiple_of == 1:
return st.integers(min_value, max_value)
# These adjustments and the .map handle integer-valued multiples, while the
# .filter handles trickier cases as for confloat.
if min_value is not None:
min_value = math.ceil(Fraction(min_value) / Fraction(cls.multiple_of))
if max_value is not None:
max_value = math.floor(Fraction(max_value) / Fraction(cls.multiple_of))
return st.integers(min_value, max_value).map(lambda x: x * cls.multiple_of)
@resolves(pydantic.ConstrainedStr)
def resolve_constr(cls): # type: ignore[no-untyped-def] # pragma: no cover
min_size = cls.min_length or 0
max_size = cls.max_length
if cls.regex is None and not cls.strip_whitespace:
return st.text(min_size=min_size, max_size=max_size)
if cls.regex is not None:
strategy = st.from_regex(cls.regex)
if cls.strip_whitespace:
strategy = strategy.filter(lambda s: s == s.strip())
elif cls.strip_whitespace:
repeats = '{{{},{}}}'.format(
min_size - 2 if min_size > 2 else 0,
max_size - 2 if (max_size or 0) > 2 else '',
)
if min_size >= 2:
strategy = st.from_regex(rf'\W.{repeats}\W')
elif min_size == 1:
strategy = st.from_regex(rf'\W(.{repeats}\W)?')
else:
assert min_size == 0
strategy = st.from_regex(rf'(\W(.{repeats}\W)?)?')
if min_size == 0 and max_size is None:
return strategy
elif max_size is None:
return strategy.filter(lambda s: min_size <= len(s))
return strategy.filter(lambda s: min_size <= len(s) <= max_size)
# Finally, register all previously-defined types, and patch in our new function
for typ in pydantic.types._DEFINED_TYPES:
_registered(typ)
pydantic.types._registered = _registered
st.register_type_strategy(pydantic.Json, resolve_json)

View File

@@ -0,0 +1,51 @@
from typing import TYPE_CHECKING, Any, Dict, FrozenSet, NamedTuple, Type
from .fields import Required
from .main import BaseModel, create_model
if TYPE_CHECKING:
class TypedDict(Dict[str, Any]):
__annotations__: Dict[str, Type[Any]]
__total__: bool
__required_keys__: FrozenSet[str]
__optional_keys__: FrozenSet[str]
def create_model_from_typeddict(typeddict_cls: Type['TypedDict'], **kwargs: Any) -> Type['BaseModel']:
"""
Create a `BaseModel` based on the fields of a `TypedDict`.
Since `typing.TypedDict` in Python 3.8 does not store runtime information about optional keys,
we raise an error if this happens (see https://bugs.python.org/issue38834).
"""
field_definitions: Dict[str, Any]
# Best case scenario: with python 3.9+ or when `TypedDict` is imported from `typing_extensions`
if not hasattr(typeddict_cls, '__required_keys__'):
raise TypeError(
'You should use `typing_extensions.TypedDict` instead of `typing.TypedDict`. '
'Without it, there is no way to differentiate required and optional fields when subclassed.'
)
field_definitions = {
field_name: (field_type, Required if field_name in typeddict_cls.__required_keys__ else None)
for field_name, field_type in typeddict_cls.__annotations__.items()
}
return create_model(typeddict_cls.__name__, **kwargs, **field_definitions)
def create_model_from_namedtuple(namedtuple_cls: Type['NamedTuple'], **kwargs: Any) -> Type['BaseModel']:
"""
Create a `BaseModel` based on the fields of a named tuple.
A named tuple can be created with `typing.NamedTuple` and declared annotations
but also with `collections.namedtuple`, in this case we consider all fields
to have type `Any`.
"""
namedtuple_annotations: Dict[str, Type[Any]] = getattr(
namedtuple_cls, '__annotations__', {k: Any for k in namedtuple_cls._fields}
)
field_definitions: Dict[str, Any] = {
field_name: (field_type, Required) for field_name, field_type in namedtuple_annotations.items()
}
return create_model(namedtuple_cls.__name__, **kwargs, **field_definitions)

View File

@@ -0,0 +1,334 @@
import warnings
from collections import ChainMap
from functools import wraps
from itertools import chain
from types import FunctionType
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union, overload
from .errors import ConfigError
from .typing import AnyCallable
from .utils import ROOT_KEY, in_ipython
class Validator:
__slots__ = 'func', 'pre', 'each_item', 'always', 'check_fields', 'skip_on_failure'
def __init__(
self,
func: AnyCallable,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = False,
skip_on_failure: bool = False,
):
self.func = func
self.pre = pre
self.each_item = each_item
self.always = always
self.check_fields = check_fields
self.skip_on_failure = skip_on_failure
if TYPE_CHECKING:
from inspect import Signature
from .fields import ModelField
from .main import BaseConfig
from .types import ModelOrDc
ValidatorCallable = Callable[[Optional[ModelOrDc], Any, Dict[str, Any], ModelField, Type[BaseConfig]], Any]
ValidatorsList = List[ValidatorCallable]
ValidatorListDict = Dict[str, List[Validator]]
_FUNCS: Set[str] = set()
VALIDATOR_CONFIG_KEY = '__validator_config__'
ROOT_VALIDATOR_CONFIG_KEY = '__root_validator_config__'
def validator(
*fields: str,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = True,
whole: bool = None,
allow_reuse: bool = False,
) -> Callable[[AnyCallable], classmethod]:
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the
whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
:param allow_reuse: whether to track and raise an error if another validator refers to the decorated function
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError(
"validators should be used with fields and keyword arguments, not bare. " # noqa: Q000
"E.g. usage should be `@validator('<field_name>', ...)`"
)
if whole is not None:
warnings.warn(
'The "whole" keyword argument is deprecated, use "each_item" (inverse meaning, default False) instead',
DeprecationWarning,
)
assert each_item is False, '"each_item" and "whole" conflict, remove "whole"'
each_item = not whole
def dec(f: AnyCallable) -> classmethod:
f_cls = _prepare_validator(f, allow_reuse)
setattr(
f_cls,
VALIDATOR_CONFIG_KEY,
(
fields,
Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields),
),
)
return f_cls
return dec
@overload
def root_validator(_func: AnyCallable) -> classmethod:
...
@overload
def root_validator(
*, pre: bool = False, allow_reuse: bool = False, skip_on_failure: bool = False
) -> Callable[[AnyCallable], classmethod]:
...
def root_validator(
_func: Optional[AnyCallable] = None, *, pre: bool = False, allow_reuse: bool = False, skip_on_failure: bool = False
) -> Union[classmethod, Callable[[AnyCallable], classmethod]]:
"""
Decorate methods on a model indicating that they should be used to validate (and perhaps modify) data either
before or after standard model parsing/validation is performed.
"""
if _func:
f_cls = _prepare_validator(_func, allow_reuse)
setattr(
f_cls, ROOT_VALIDATOR_CONFIG_KEY, Validator(func=f_cls.__func__, pre=pre, skip_on_failure=skip_on_failure)
)
return f_cls
def dec(f: AnyCallable) -> classmethod:
f_cls = _prepare_validator(f, allow_reuse)
setattr(
f_cls, ROOT_VALIDATOR_CONFIG_KEY, Validator(func=f_cls.__func__, pre=pre, skip_on_failure=skip_on_failure)
)
return f_cls
return dec
def _prepare_validator(function: AnyCallable, allow_reuse: bool) -> classmethod:
"""
Avoid validators with duplicated names since without this, validators can be overwritten silently
which generally isn't the intended behaviour, don't run in ipython (see #312) or if allow_reuse is False.
"""
f_cls = function if isinstance(function, classmethod) else classmethod(function)
if not in_ipython() and not allow_reuse:
ref = f_cls.__func__.__module__ + '.' + f_cls.__func__.__qualname__
if ref in _FUNCS:
raise ConfigError(f'duplicate validator function "{ref}"; if this is intended, set `allow_reuse=True`')
_FUNCS.add(ref)
return f_cls
class ValidatorGroup:
def __init__(self, validators: 'ValidatorListDict') -> None:
self.validators = validators
self.used_validators = {'*'}
def get_validators(self, name: str) -> Optional[Dict[str, Validator]]:
self.used_validators.add(name)
validators = self.validators.get(name, [])
if name != ROOT_KEY:
validators += self.validators.get('*', [])
if validators:
return {v.func.__name__: v for v in validators}
else:
return None
def check_for_unused(self) -> None:
unused_validators = set(
chain.from_iterable(
(v.func.__name__ for v in self.validators[f] if v.check_fields)
for f in (self.validators.keys() - self.used_validators)
)
)
if unused_validators:
fn = ', '.join(unused_validators)
raise ConfigError(
f"Validators defined with incorrect fields: {fn} " # noqa: Q000
f"(use check_fields=False if you're inheriting from the model and intended this)"
)
def extract_validators(namespace: Dict[str, Any]) -> Dict[str, List[Validator]]:
validators: Dict[str, List[Validator]] = {}
for var_name, value in namespace.items():
validator_config = getattr(value, VALIDATOR_CONFIG_KEY, None)
if validator_config:
fields, v = validator_config
for field in fields:
if field in validators:
validators[field].append(v)
else:
validators[field] = [v]
return validators
def extract_root_validators(namespace: Dict[str, Any]) -> Tuple[List[AnyCallable], List[Tuple[bool, AnyCallable]]]:
from inspect import signature
pre_validators: List[AnyCallable] = []
post_validators: List[Tuple[bool, AnyCallable]] = []
for name, value in namespace.items():
validator_config: Optional[Validator] = getattr(value, ROOT_VALIDATOR_CONFIG_KEY, None)
if validator_config:
sig = signature(validator_config.func)
args = list(sig.parameters.keys())
if args[0] == 'self':
raise ConfigError(
f'Invalid signature for root validator {name}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, values).'
)
if len(args) != 2:
raise ConfigError(f'Invalid signature for root validator {name}: {sig}, should be: (cls, values).')
# check function signature
if validator_config.pre:
pre_validators.append(validator_config.func)
else:
post_validators.append((validator_config.skip_on_failure, validator_config.func))
return pre_validators, post_validators
def inherit_validators(base_validators: 'ValidatorListDict', validators: 'ValidatorListDict') -> 'ValidatorListDict':
for field, field_validators in base_validators.items():
if field not in validators:
validators[field] = []
validators[field] += field_validators
return validators
def make_generic_validator(validator: AnyCallable) -> 'ValidatorCallable':
"""
Make a generic function which calls a validator with the right arguments.
Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow,
hence this laborious way of doing things.
It's done like this so validators don't all need **kwargs in their signature, eg. any combination of
the arguments "values", "fields" and/or "config" are permitted.
"""
from inspect import signature
sig = signature(validator)
args = list(sig.parameters.keys())
first_arg = args.pop(0)
if first_arg == 'self':
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
elif first_arg == 'cls':
# assume the second argument is value
return wraps(validator)(_generic_validator_cls(validator, sig, set(args[1:])))
else:
# assume the first argument was value which has already been removed
return wraps(validator)(_generic_validator_basic(validator, sig, set(args)))
def prep_validators(v_funcs: Iterable[AnyCallable]) -> 'ValidatorsList':
return [make_generic_validator(f) for f in v_funcs if f]
all_kwargs = {'values', 'field', 'config'}
def _generic_validator_cls(validator: AnyCallable, sig: 'Signature', args: Set[str]) -> 'ValidatorCallable':
# assume the first argument is value
has_kwargs = False
if 'kwargs' in args:
has_kwargs = True
args -= {'kwargs'}
if not args.issubset(all_kwargs):
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, should be: '
f'(cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
if has_kwargs:
return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field, config=config)
elif args == set():
return lambda cls, v, values, field, config: validator(cls, v)
elif args == {'values'}:
return lambda cls, v, values, field, config: validator(cls, v, values=values)
elif args == {'field'}:
return lambda cls, v, values, field, config: validator(cls, v, field=field)
elif args == {'config'}:
return lambda cls, v, values, field, config: validator(cls, v, config=config)
elif args == {'values', 'field'}:
return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field)
elif args == {'values', 'config'}:
return lambda cls, v, values, field, config: validator(cls, v, values=values, config=config)
elif args == {'field', 'config'}:
return lambda cls, v, values, field, config: validator(cls, v, field=field, config=config)
else:
# args == {'values', 'field', 'config'}
return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field, config=config)
def _generic_validator_basic(validator: AnyCallable, sig: 'Signature', args: Set[str]) -> 'ValidatorCallable':
has_kwargs = False
if 'kwargs' in args:
has_kwargs = True
args -= {'kwargs'}
if not args.issubset(all_kwargs):
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, should be: '
f'(value, values, config, field), "values", "config" and "field" are all optional.'
)
if has_kwargs:
return lambda cls, v, values, field, config: validator(v, values=values, field=field, config=config)
elif args == set():
return lambda cls, v, values, field, config: validator(v)
elif args == {'values'}:
return lambda cls, v, values, field, config: validator(v, values=values)
elif args == {'field'}:
return lambda cls, v, values, field, config: validator(v, field=field)
elif args == {'config'}:
return lambda cls, v, values, field, config: validator(v, config=config)
elif args == {'values', 'field'}:
return lambda cls, v, values, field, config: validator(v, values=values, field=field)
elif args == {'values', 'config'}:
return lambda cls, v, values, field, config: validator(v, values=values, config=config)
elif args == {'field', 'config'}:
return lambda cls, v, values, field, config: validator(v, field=field, config=config)
else:
# args == {'values', 'field', 'config'}
return lambda cls, v, values, field, config: validator(v, values=values, field=field, config=config)
def gather_all_validators(type_: 'ModelOrDc') -> Dict[str, classmethod]:
all_attributes = ChainMap(*[cls.__dict__ for cls in type_.__mro__])
return {
k: v
for k, v in all_attributes.items()
if hasattr(v, VALIDATOR_CONFIG_KEY) or hasattr(v, ROOT_VALIDATOR_CONFIG_KEY)
}

View File

@@ -0,0 +1,488 @@
"""
Color definitions are used as per CSS3 specification:
http://www.w3.org/TR/css3-color/#svg-color
A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`.
In these cases the LAST color when sorted alphabetically takes preferences,
eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua".
"""
import math
import re
from colorsys import hls_to_rgb, rgb_to_hls
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from .errors import ColorError
from .utils import Representation, almost_equal_floats
if TYPE_CHECKING:
from .typing import CallableGenerator, ReprArgs
ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]]
ColorType = Union[ColorTuple, str]
HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]]
class RGBA:
"""
Internal use only as a representation of a color.
"""
__slots__ = 'r', 'g', 'b', 'alpha', '_tuple'
def __init__(self, r: float, g: float, b: float, alpha: Optional[float]):
self.r = r
self.g = g
self.b = b
self.alpha = alpha
self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha)
def __getitem__(self, item: Any) -> Any:
return self._tuple[item]
# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached
r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*'
r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*'
_r_255 = r'(\d{1,3}(?:\.\d+)?)'
_r_comma = r'\s*,\s*'
r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*'
_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)'
r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*'
_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?'
_r_sl = r'(\d{1,3}(?:\.\d+)?)%'
r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*'
r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*'
# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used
repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'}
rads = 2 * math.pi
class Color(Representation):
__slots__ = '_original', '_rgba'
def __init__(self, value: ColorType) -> None:
self._rgba: RGBA
self._original: ColorType
if isinstance(value, (tuple, list)):
self._rgba = parse_tuple(value)
elif isinstance(value, str):
self._rgba = parse_str(value)
elif isinstance(value, Color):
self._rgba = value._rgba
value = value._original
else:
raise ColorError(reason='value must be a tuple, list or string')
# if we've got here value must be a valid color
self._original = value
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='color')
def original(self) -> ColorType:
"""
Original value passed to Color
"""
return self._original
def as_named(self, *, fallback: bool = False) -> str:
if self._rgba.alpha is None:
rgb = cast(Tuple[int, int, int], self.as_rgb_tuple())
try:
return COLORS_BY_VALUE[rgb]
except KeyError as e:
if fallback:
return self.as_hex()
else:
raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e
else:
return self.as_hex()
def as_hex(self) -> str:
"""
Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string
a "short" representation of the color is possible and whether there's an alpha channel.
"""
values = [float_to_255(c) for c in self._rgba[:3]]
if self._rgba.alpha is not None:
values.append(float_to_255(self._rgba.alpha))
as_hex = ''.join(f'{v:02x}' for v in values)
if all(c in repeat_colors for c in values):
as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2))
return '#' + as_hex
def as_rgb(self) -> str:
"""
Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string.
"""
if self._rgba.alpha is None:
return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})'
else:
return (
f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, '
f'{round(self._alpha_float(), 2)})'
)
def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple:
"""
Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is
in the range 0 to 1.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
r, g, b = [float_to_255(c) for c in self._rgba[:3]]
if alpha is None:
if self._rgba.alpha is None:
return r, g, b
else:
return r, g, b, self._alpha_float()
elif alpha:
return r, g, b, self._alpha_float()
else:
# alpha is False
return r, g, b
def as_hsl(self) -> str:
"""
Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string.
"""
if self._rgba.alpha is None:
h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})'
else:
h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})'
def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple:
"""
Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in
the range 0 to 1.
NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b)
if alpha is None:
if self._rgba.alpha is None:
return h, s, l
else:
return h, s, l, self._alpha_float()
if alpha:
return h, s, l, self._alpha_float()
else:
# alpha is False
return h, s, l
def _alpha_float(self) -> float:
return 1 if self._rgba.alpha is None else self._rgba.alpha
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls
def __str__(self) -> str:
return self.as_named(fallback=True)
def __repr_args__(self) -> 'ReprArgs':
return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4')
def parse_str(value: str) -> RGBA:
"""
Parse a string to an RGBA tuple, trying the following formats (in this order):
* named color, see COLORS_BY_NAME below
* hex short eg. `<prefix>fff` (prefix can be `#`, `0x` or nothing)
* hex long eg. `<prefix>ffffff` (prefix can be `#`, `0x` or nothing)
* `rgb(<r>, <g>, <b>) `
* `rgba(<r>, <g>, <b>, <a>)`
"""
value_lower = value.lower()
try:
r, g, b = COLORS_BY_NAME[value_lower]
except KeyError:
pass
else:
return ints_to_rgba(r, g, b, None)
m = re.fullmatch(r_hex_short, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v * 2, 16) for v in rgb]
if a:
alpha: Optional[float] = int(a * 2, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_hex_long, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v, 16) for v in rgb]
if a:
alpha = int(a, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_rgb, value_lower)
if m:
return ints_to_rgba(*m.groups(), None) # type: ignore
m = re.fullmatch(r_rgba, value_lower)
if m:
return ints_to_rgba(*m.groups()) # type: ignore
m = re.fullmatch(r_hsl, value_lower)
if m:
h, h_units, s, l_ = m.groups()
return parse_hsl(h, h_units, s, l_)
m = re.fullmatch(r_hsla, value_lower)
if m:
h, h_units, s, l_, a = m.groups()
return parse_hsl(h, h_units, s, l_, parse_float_alpha(a))
raise ColorError(reason='string not recognised as a valid color')
def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA:
return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha))
def parse_color_value(value: Union[int, str], max_val: int = 255) -> float:
"""
Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number
in the range 0 to 1
"""
try:
color = float(value)
except ValueError:
raise ColorError(reason='color values must be a valid number')
if 0 <= color <= max_val:
return color / max_val
else:
raise ColorError(reason=f'color values must be in the range 0 to {max_val}')
def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]:
"""
Parse a value checking it's a valid float in the range 0 to 1
"""
if value is None:
return None
try:
if isinstance(value, str) and value.endswith('%'):
alpha = float(value[:-1]) / 100
else:
alpha = float(value)
except ValueError:
raise ColorError(reason='alpha values must be a valid float')
if almost_equal_floats(alpha, 1):
return None
elif 0 <= alpha <= 1:
return alpha
else:
raise ColorError(reason='alpha values must be in the range 0 to 1')
def parse_hsl(h: str, h_units: str, sat: str, light: str, alpha: Optional[float] = None) -> RGBA:
"""
Parse raw hue, saturation, lightness and alpha values and convert to RGBA.
"""
s_value, l_value = parse_color_value(sat, 100), parse_color_value(light, 100)
h_value = float(h)
if h_units in {None, 'deg'}:
h_value = h_value % 360 / 360
elif h_units == 'rad':
h_value = h_value % rads / rads
else:
# turns
h_value = h_value % 1
r, g, b = hls_to_rgb(h_value, l_value, s_value)
return RGBA(r, g, b, alpha)
def float_to_255(c: float) -> int:
return int(round(c * 255))
COLORS_BY_NAME = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()}

View File

@@ -0,0 +1,267 @@
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, TypeVar, Union, overload
from .class_validators import gather_all_validators
from .error_wrappers import ValidationError
from .errors import DataclassTypeError
from .fields import Field, FieldInfo, Required, Undefined
from .main import create_model, validate_model
from .typing import resolve_annotations
from .utils import ClassAttribute
if TYPE_CHECKING:
from .main import BaseConfig, BaseModel # noqa: F401
from .typing import CallableGenerator, NoArgAnyCallable
DataclassT = TypeVar('DataclassT', bound='Dataclass')
class Dataclass:
__pydantic_model__: Type[BaseModel]
__initialised__: bool
__post_init_original__: Optional[Callable[..., None]]
__processed__: Optional[ClassAttribute]
__has_field_info_default__: bool # whether or not a `pydantic.Field` is used as default value
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
@classmethod
def __get_validators__(cls: Type['Dataclass']) -> 'CallableGenerator':
pass
@classmethod
def __validate__(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
pass
def __call__(self: 'DataclassT', *args: Any, **kwargs: Any) -> 'DataclassT':
pass
def _validate_dataclass(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
if isinstance(v, cls):
return v
elif isinstance(v, (list, tuple)):
return cls(*v)
elif isinstance(v, dict):
return cls(**v)
# In nested dataclasses, v can be of type `dataclasses.dataclass`.
# But to validate fields `cls` will be in fact a `pydantic.dataclasses.dataclass`,
# which inherits directly from the class of `v`.
elif is_builtin_dataclass(v) and cls.__bases__[0] is type(v):
import dataclasses
return cls(**dataclasses.asdict(v))
else:
raise DataclassTypeError(class_name=cls.__name__)
def _get_validators(cls: Type['Dataclass']) -> 'CallableGenerator':
yield cls.__validate__
def setattr_validate_assignment(self: 'Dataclass', name: str, value: Any) -> None:
if self.__initialised__:
d = dict(self.__dict__)
d.pop(name, None)
known_field = self.__pydantic_model__.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
object.__setattr__(self, name, value)
def is_builtin_dataclass(_cls: Type[Any]) -> bool:
"""
`dataclasses.is_dataclass` is True if one of the class parents is a `dataclass`.
This is why we also add a class attribute `__processed__` to only consider 'direct' built-in dataclasses
"""
import dataclasses
return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)
def _generate_pydantic_post_init(
post_init_original: Optional[Callable[..., None]], post_init_post_parse: Optional[Callable[..., None]]
) -> Callable[..., None]:
def _pydantic_post_init(self: 'Dataclass', *initvars: Any) -> None:
if post_init_original is not None:
post_init_original(self, *initvars)
if getattr(self, '__has_field_info_default__', False):
# We need to remove `FieldInfo` values since they are not valid as input
# It's ok to do that because they are obviously the default values!
input_data = {k: v for k, v in self.__dict__.items() if not isinstance(v, FieldInfo)}
else:
input_data = self.__dict__
d, _, validation_error = validate_model(self.__pydantic_model__, input_data, cls=self.__class__)
if validation_error:
raise validation_error
object.__setattr__(self, '__dict__', d)
object.__setattr__(self, '__initialised__', True)
if post_init_post_parse is not None:
post_init_post_parse(self, *initvars)
return _pydantic_post_init
def _process_class(
_cls: Type[Any],
init: bool,
repr: bool,
eq: bool,
order: bool,
unsafe_hash: bool,
frozen: bool,
config: Optional[Type[Any]],
) -> Type['Dataclass']:
import dataclasses
post_init_original = getattr(_cls, '__post_init__', None)
if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
post_init_original = None
if not post_init_original:
post_init_original = getattr(_cls, '__post_init_original__', None)
post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
_pydantic_post_init = _generate_pydantic_post_init(post_init_original, post_init_post_parse)
# If the class is already a dataclass, __post_init__ will not be called automatically
# so no validation will be added.
# We hence create dynamically a new dataclass:
# ```
# @dataclasses.dataclass
# class NewClass(_cls):
# __post_init__ = _pydantic_post_init
# ```
# with the exact same fields as the base dataclass
# and register it on module level to address pickle problem:
# https://github.com/samuelcolvin/pydantic/issues/2111
if is_builtin_dataclass(_cls):
uniq_class_name = f'_Pydantic_{_cls.__name__}_{id(_cls)}'
_cls = type(
# for pretty output new class will have the name as original
_cls.__name__,
(_cls,),
{
'__annotations__': resolve_annotations(_cls.__annotations__, _cls.__module__),
'__post_init__': _pydantic_post_init,
# attrs for pickle to find this class
'__module__': __name__,
'__qualname__': uniq_class_name,
},
)
globals()[uniq_class_name] = _cls
else:
_cls.__post_init__ = _pydantic_post_init
cls: Type['Dataclass'] = dataclasses.dataclass( # type: ignore
_cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
cls.__processed__ = ClassAttribute('__processed__', True)
field_definitions: Dict[str, Any] = {}
for field in dataclasses.fields(cls):
default: Any = Undefined
default_factory: Optional['NoArgAnyCallable'] = None
field_info: FieldInfo
if field.default is not dataclasses.MISSING:
default = field.default
# mypy issue 7020 and 708
elif field.default_factory is not dataclasses.MISSING: # type: ignore
default_factory = field.default_factory # type: ignore
else:
default = Required
if isinstance(default, FieldInfo):
field_info = default
cls.__has_field_info_default__ = True
else:
field_info = Field(default=default, default_factory=default_factory, **field.metadata)
field_definitions[field.name] = (field.type, field_info)
validators = gather_all_validators(cls)
cls.__pydantic_model__ = create_model(
cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **field_definitions
)
cls.__initialised__ = False
cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment]
cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment]
if post_init_original:
cls.__post_init_original__ = post_init_original
if cls.__pydantic_model__.__config__.validate_assignment and not frozen:
cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment]
return cls
@overload
def dataclass(
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Callable[[Type[Any]], Type['Dataclass']]:
...
@overload
def dataclass(
_cls: Type[Any],
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Type['Dataclass']:
...
def dataclass(
_cls: Optional[Type[Any]] = None,
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Union[Callable[[Type[Any]], Type['Dataclass']], Type['Dataclass']]:
"""
Like the python standard lib dataclasses but with type validation.
Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
as Config.validate_assignment.
"""
def wrap(cls: Type[Any]) -> Type['Dataclass']:
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)
if _cls is None:
return wrap
return wrap(_cls)
def make_dataclass_validator(_cls: Type[Any], config: Type['BaseConfig']) -> 'CallableGenerator':
"""
Create a pydantic.dataclass from a builtin dataclass to add type validation
and yield the validators
It retrieves the parameters of the dataclass and forwards them to the newly created dataclass
"""
dataclass_params = _cls.__dataclass_params__
stdlib_dataclass_parameters = {param: getattr(dataclass_params, param) for param in dataclass_params.__slots__}
cls = dataclass(_cls, config=config, **stdlib_dataclass_parameters)
yield from _get_validators(cls)

View File

@@ -0,0 +1,241 @@
"""
Functions to parse datetime objects.
We're using regular expressions rather than time.strptime because:
- They provide both validation and parsing.
- They're more flexible for datetimes.
- The date/datetime/time constructors produce friendlier error messages.
Stolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at
9718fa2e8abe430c3526a9278dd976443d4ae3c6
Changed to:
* use standard python datetime types not django.utils.timezone
* raise ValueError when regex doesn't match rather than returning None
* support parsing unix timestamps for dates and datetimes
"""
import re
from datetime import date, datetime, time, timedelta, timezone
from typing import Dict, Optional, Type, Union
from . import errors
date_expr = r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
time_expr = (
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
date_re = re.compile(f'{date_expr}$')
time_re = re.compile(time_expr)
datetime_re = re.compile(f'{date_expr}[T ]{time_expr}')
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) (days?, )?)?'
r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>-?\d+):)?'
r'(?P<seconds>-?\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by timedelta
iso8601_duration_re = re.compile(
r'^(?P<sign>[-+]?)'
r'P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
EPOCH = datetime(1970, 1, 1)
# if greater than this, the number is in ms, if less than or equal it's in seconds
# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
MS_WATERSHED = int(2e10)
StrBytesIntFloat = Union[str, bytes, int, float]
def get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
if isinstance(value, (int, float)):
return value
try:
return float(value)
except ValueError:
return None
except TypeError:
raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float')
def from_unix_seconds(seconds: Union[int, float]) -> datetime:
while abs(seconds) > MS_WATERSHED:
seconds /= 1000
dt = EPOCH + timedelta(seconds=seconds)
return dt.replace(tzinfo=timezone.utc)
def _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]:
if value == 'Z':
return timezone.utc
elif value is not None:
offset_mins = int(value[-2:]) if len(value) > 3 else 0
offset = 60 * int(value[1:3]) + offset_mins
if value[0] == '-':
offset = -offset
try:
return timezone(timedelta(minutes=offset))
except ValueError:
raise error()
else:
return None
def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
"""
Parse a date/int/float/string and return a datetime.date.
Raise ValueError if the input is well formatted but not a valid date.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
else:
return value
number = get_numeric(value, 'date')
if number is not None:
return from_unix_seconds(number).date()
if isinstance(value, bytes):
value = value.decode()
match = date_re.match(value) # type: ignore
if match is None:
raise errors.DateError()
kw = {k: int(v) for k, v in match.groupdict().items()}
try:
return date(**kw)
except ValueError:
raise errors.DateError()
def parse_time(value: Union[time, StrBytesIntFloat]) -> time:
"""
Parse a time/string and return a datetime.time.
Raise ValueError if the input is well formatted but not a valid time.
Raise ValueError if the input isn't well formatted, in particular if it contains an offset.
"""
if isinstance(value, time):
return value
number = get_numeric(value, 'time')
if number is not None:
if number >= 86400:
# doesn't make sense since the time time loop back around to 0
raise errors.TimeError()
return (datetime.min + timedelta(seconds=number)).time()
if isinstance(value, bytes):
value = value.decode()
match = time_re.match(value) # type: ignore
if match is None:
raise errors.TimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError)
kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
try:
return time(**kw_) # type: ignore
except ValueError:
raise errors.TimeError()
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
"""
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, datetime):
return value
number = get_numeric(value, 'datetime')
if number is not None:
return from_unix_seconds(number)
if isinstance(value, bytes):
value = value.decode()
match = datetime_re.match(value) # type: ignore
if match is None:
raise errors.DateTimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError)
kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
try:
return datetime(**kw_) # type: ignore
except ValueError:
raise errors.DateTimeError()
def parse_duration(value: StrBytesIntFloat) -> timedelta:
"""
Parse a duration int/float/string and return a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation.
"""
if isinstance(value, timedelta):
return value
if isinstance(value, (int, float)):
# bellow code requires a string
value = str(value)
elif isinstance(value, bytes):
value = value.decode()
try:
match = standard_duration_re.match(value) or iso8601_duration_re.match(value)
except TypeError:
raise TypeError('invalid type; expected timedelta, string, bytes, int or float')
if not match:
raise errors.DurationError()
kw = match.groupdict()
sign = -1 if kw.pop('sign', '+') == '-' else 1
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
kw['microseconds'] = '-' + kw['microseconds']
kw_ = {k: float(v) for k, v in kw.items() if v is not None}
return sign * timedelta(**kw_) # type: ignore

View File

@@ -0,0 +1,265 @@
from functools import wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
get_type_hints,
overload,
)
from . import validator
from .errors import ConfigError
from .main import BaseModel, Extra, create_model
from .utils import to_camel
__all__ = ('validate_arguments',)
if TYPE_CHECKING:
from .typing import AnyCallable
AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)
ConfigType = Union[None, Type[Any], Dict[str, Any]]
@overload
def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:
...
@overload
def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':
...
def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:
"""
Decorator to validate the arguments passed to a function.
"""
def validate(_func: 'AnyCallable') -> 'AnyCallable':
vd = ValidatedFunction(_func, config)
@wraps(_func)
def wrapper_function(*args: Any, **kwargs: Any) -> Any:
return vd.call(*args, **kwargs)
wrapper_function.vd = vd # type: ignore
wrapper_function.validate = vd.init_model_instance # type: ignore
wrapper_function.raw_function = vd.raw_function # type: ignore
wrapper_function.model = vd.model # type: ignore
return wrapper_function
if func:
return validate(func)
else:
return validate
ALT_V_ARGS = 'v__args'
ALT_V_KWARGS = 'v__kwargs'
V_POSITIONAL_ONLY_NAME = 'v__positional_only'
V_DUPLICATE_KWARGS = 'v__duplicate_kwargs'
class ValidatedFunction:
def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901
from inspect import Parameter, signature
parameters: Mapping[str, Parameter] = signature(function).parameters
if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:
raise ConfigError(
f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" '
f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator'
)
self.raw_function = function
self.arg_mapping: Dict[int, str] = {}
self.positional_only_args = set()
self.v_args_name = 'args'
self.v_kwargs_name = 'kwargs'
type_hints = get_type_hints(function)
takes_args = False
takes_kwargs = False
fields: Dict[str, Tuple[Any, Any]] = {}
for i, (name, p) in enumerate(parameters.items()):
if p.annotation == p.empty:
annotation = Any
else:
annotation = type_hints[name]
default = ... if p.default == p.empty else p.default
if p.kind == Parameter.POSITIONAL_ONLY:
self.arg_mapping[i] = name
fields[name] = annotation, default
fields[V_POSITIONAL_ONLY_NAME] = List[str], None
self.positional_only_args.add(name)
elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
self.arg_mapping[i] = name
fields[name] = annotation, default
fields[V_DUPLICATE_KWARGS] = List[str], None
elif p.kind == Parameter.KEYWORD_ONLY:
fields[name] = annotation, default
elif p.kind == Parameter.VAR_POSITIONAL:
self.v_args_name = name
fields[name] = Tuple[annotation, ...], None
takes_args = True
else:
assert p.kind == Parameter.VAR_KEYWORD, p.kind
self.v_kwargs_name = name
fields[name] = Dict[str, annotation], None # type: ignore
takes_kwargs = True
# these checks avoid a clash between "args" and a field with that name
if not takes_args and self.v_args_name in fields:
self.v_args_name = ALT_V_ARGS
# same with "kwargs"
if not takes_kwargs and self.v_kwargs_name in fields:
self.v_kwargs_name = ALT_V_KWARGS
if not takes_args:
# we add the field so validation below can raise the correct exception
fields[self.v_args_name] = List[Any], None
if not takes_kwargs:
# same with kwargs
fields[self.v_kwargs_name] = Dict[Any, Any], None
self.create_model(fields, takes_args, takes_kwargs, config)
def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:
values = self.build_values(args, kwargs)
return self.model(**values)
def call(self, *args: Any, **kwargs: Any) -> Any:
m = self.init_model_instance(*args, **kwargs)
return self.execute(m)
def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:
values: Dict[str, Any] = {}
if args:
arg_iter = enumerate(args)
while True:
try:
i, a = next(arg_iter)
except StopIteration:
break
arg_name = self.arg_mapping.get(i)
if arg_name is not None:
values[arg_name] = a
else:
values[self.v_args_name] = [a] + [a for _, a in arg_iter]
break
var_kwargs = {}
wrong_positional_args = []
duplicate_kwargs = []
non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}
for k, v in kwargs.items():
if k in non_var_fields:
if k in self.positional_only_args:
wrong_positional_args.append(k)
if k in values:
duplicate_kwargs.append(k)
values[k] = v
else:
var_kwargs[k] = v
if var_kwargs:
values[self.v_kwargs_name] = var_kwargs
if wrong_positional_args:
values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args
if duplicate_kwargs:
values[V_DUPLICATE_KWARGS] = duplicate_kwargs
return values
def execute(self, m: BaseModel) -> Any:
d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}
var_kwargs = d.pop(self.v_kwargs_name, {})
if self.v_args_name in d:
args_: List[Any] = []
in_kwargs = False
kwargs = {}
for name, value in d.items():
if in_kwargs:
kwargs[name] = value
elif name == self.v_args_name:
args_ += value
in_kwargs = True
else:
args_.append(value)
return self.raw_function(*args_, **kwargs, **var_kwargs)
elif self.positional_only_args:
args_ = []
kwargs = {}
for name, value in d.items():
if name in self.positional_only_args:
args_.append(value)
else:
kwargs[name] = value
return self.raw_function(*args_, **kwargs, **var_kwargs)
else:
return self.raw_function(**d, **var_kwargs)
def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:
pos_args = len(self.arg_mapping)
class CustomConfig:
pass
if not TYPE_CHECKING: # pragma: no branch
if isinstance(config, dict):
CustomConfig = type('Config', (), config) # noqa: F811
elif config is not None:
CustomConfig = config # noqa: F811
if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):
raise ConfigError(
'Setting the "fields" and "alias_generator" property on custom Config for '
'@validate_arguments is not yet supported, please remove.'
)
class DecoratorBaseModel(BaseModel):
@validator(self.v_args_name, check_fields=False, allow_reuse=True)
def check_args(cls, v: List[Any]) -> List[Any]:
if takes_args:
return v
raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
@validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:
if takes_kwargs:
return v
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v.keys()))
raise TypeError(f'unexpected keyword argument{plural}: {keys}')
@validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
def check_positional_only(cls, v: List[str]) -> None:
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
@validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)
def check_duplicate_kwargs(cls, v: List[str]) -> None:
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'multiple values for argument{plural}: {keys}')
class Config(CustomConfig):
extra = Extra.forbid
self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)

View File

@@ -0,0 +1,233 @@
import os
import warnings
from pathlib import Path
from typing import AbstractSet, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
from .fields import ModelField
from .main import BaseConfig, BaseModel, Extra
from .typing import display_as_type
from .utils import deep_update, path_type, sequence_like
env_file_sentinel = str(object())
SettingsSourceCallable = Callable[['BaseSettings'], Dict[str, Any]]
class SettingsError(ValueError):
pass
class BaseSettings(BaseModel):
"""
Base class for settings, allowing values to be overridden by environment variables.
This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),
Heroku and any 12 factor app design.
"""
def __init__(
__pydantic_self__,
_env_file: Union[Path, str, None] = env_file_sentinel,
_env_file_encoding: Optional[str] = None,
_secrets_dir: Union[Path, str, None] = None,
**values: Any,
) -> None:
# Uses something other than `self` the first arg to allow "self" as a settable attribute
super().__init__(
**__pydantic_self__._build_values(
values, _env_file=_env_file, _env_file_encoding=_env_file_encoding, _secrets_dir=_secrets_dir
)
)
def _build_values(
self,
init_kwargs: Dict[str, Any],
_env_file: Union[Path, str, None] = None,
_env_file_encoding: Optional[str] = None,
_secrets_dir: Union[Path, str, None] = None,
) -> Dict[str, Any]:
# Configure built-in sources
init_settings = InitSettingsSource(init_kwargs=init_kwargs)
env_settings = EnvSettingsSource(
env_file=(_env_file if _env_file != env_file_sentinel else self.__config__.env_file),
env_file_encoding=(
_env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding
),
)
file_secret_settings = SecretsSettingsSource(secrets_dir=_secrets_dir or self.__config__.secrets_dir)
# Provide a hook to set built-in sources priority and add / remove sources
sources = self.__config__.customise_sources(
init_settings=init_settings, env_settings=env_settings, file_secret_settings=file_secret_settings
)
if sources:
return deep_update(*reversed([source(self) for source in sources]))
else:
# no one should mean to do this, but I think returning an empty dict is marginally preferable
# to an informative error and much better than a confusing error
return {}
class Config(BaseConfig):
env_prefix = ''
env_file = None
env_file_encoding = None
secrets_dir = None
validate_all = True
extra = Extra.forbid
arbitrary_types_allowed = True
case_sensitive = False
@classmethod
def prepare_field(cls, field: ModelField) -> None:
env_names: Union[List[str], AbstractSet[str]]
field_info_from_config = cls.get_field_info(field.name)
env = field_info_from_config.get('env') or field.field_info.extra.get('env')
if env is None:
if field.has_alias:
warnings.warn(
'aliases are no longer used by BaseSettings to define which environment variables to read. '
'Instead use the "env" field setting. '
'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',
FutureWarning,
)
env_names = {cls.env_prefix + field.name}
elif isinstance(env, str):
env_names = {env}
elif isinstance(env, (set, frozenset)):
env_names = env
elif sequence_like(env):
env_names = list(env)
else:
raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')
if not cls.case_sensitive:
env_names = env_names.__class__(n.lower() for n in env_names)
field.field_info.extra['env_names'] = env_names
@classmethod
def customise_sources(
cls,
init_settings: SettingsSourceCallable,
env_settings: SettingsSourceCallable,
file_secret_settings: SettingsSourceCallable,
) -> Tuple[SettingsSourceCallable, ...]:
return init_settings, env_settings, file_secret_settings
__config__: Config # type: ignore
class InitSettingsSource:
__slots__ = ('init_kwargs',)
def __init__(self, init_kwargs: Dict[str, Any]):
self.init_kwargs = init_kwargs
def __call__(self, settings: BaseSettings) -> Dict[str, Any]:
return self.init_kwargs
def __repr__(self) -> str:
return f'InitSettingsSource(init_kwargs={self.init_kwargs!r})'
class EnvSettingsSource:
__slots__ = ('env_file', 'env_file_encoding')
def __init__(self, env_file: Union[Path, str, None], env_file_encoding: Optional[str]):
self.env_file: Union[Path, str, None] = env_file
self.env_file_encoding: Optional[str] = env_file_encoding
def __call__(self, settings: BaseSettings) -> Dict[str, Any]:
"""
Build environment variables suitable for passing to the Model.
"""
d: Dict[str, Optional[str]] = {}
if settings.__config__.case_sensitive:
env_vars: Mapping[str, Optional[str]] = os.environ
else:
env_vars = {k.lower(): v for k, v in os.environ.items()}
if self.env_file is not None:
env_path = Path(self.env_file).expanduser()
if env_path.is_file():
env_vars = {
**read_env_file(
env_path, encoding=self.env_file_encoding, case_sensitive=settings.__config__.case_sensitive
),
**env_vars,
}
for field in settings.__fields__.values():
env_val: Optional[str] = None
for env_name in field.field_info.extra['env_names']:
env_val = env_vars.get(env_name)
if env_val is not None:
break
if env_val is None:
continue
if field.is_complex():
try:
env_val = settings.__config__.json_loads(env_val) # type: ignore
except ValueError as e:
raise SettingsError(f'error parsing JSON for "{env_name}"') from e
d[field.alias] = env_val
return d
def __repr__(self) -> str:
return f'EnvSettingsSource(env_file={self.env_file!r}, env_file_encoding={self.env_file_encoding!r})'
class SecretsSettingsSource:
__slots__ = ('secrets_dir',)
def __init__(self, secrets_dir: Union[Path, str, None]):
self.secrets_dir: Union[Path, str, None] = secrets_dir
def __call__(self, settings: BaseSettings) -> Dict[str, Any]:
"""
Build fields from "secrets" files.
"""
secrets: Dict[str, Optional[str]] = {}
if self.secrets_dir is None:
return secrets
secrets_path = Path(self.secrets_dir).expanduser()
if not secrets_path.exists():
warnings.warn(f'directory "{secrets_path}" does not exist')
return secrets
if not secrets_path.is_dir():
raise SettingsError(f'secrets_dir must reference a directory, not a {path_type(secrets_path)}')
for field in settings.__fields__.values():
for env_name in field.field_info.extra['env_names']:
path = secrets_path / env_name
if path.is_file():
secrets[field.alias] = path.read_text().strip()
elif path.exists():
warnings.warn(
f'attempted to load secret file "{path}" but found a {path_type(path)} instead.',
stacklevel=4,
)
return secrets
def __repr__(self) -> str:
return f'SecretsSettingsSource(secrets_dir={self.secrets_dir!r})'
def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
try:
from dotenv import dotenv_values
except ImportError as e:
raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding or 'utf8')
if not case_sensitive:
return {k.lower(): v for k, v in file_vars.items()}
else:
return file_vars

View File

@@ -0,0 +1,151 @@
import json
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union
from .json import pydantic_encoder
from .utils import Representation
if TYPE_CHECKING:
from .main import BaseConfig # noqa: F401
from .types import ModelOrDc # noqa: F401
from .typing import ReprArgs
Loc = Tuple[Union[int, str], ...]
__all__ = 'ErrorWrapper', 'ValidationError'
class ErrorWrapper(Representation):
__slots__ = 'exc', '_loc'
def __init__(self, exc: Exception, loc: Union[str, 'Loc']) -> None:
self.exc = exc
self._loc = loc
def loc_tuple(self) -> 'Loc':
if isinstance(self._loc, tuple):
return self._loc
else:
return (self._loc,)
def __repr_args__(self) -> 'ReprArgs':
return [('exc', self.exc), ('loc', self.loc_tuple())]
# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]
# but recursive, therefore just use:
ErrorList = Union[Sequence[Any], ErrorWrapper]
class ValidationError(Representation, ValueError):
__slots__ = 'raw_errors', 'model', '_error_cache'
def __init__(self, errors: Sequence[ErrorList], model: 'ModelOrDc') -> None:
self.raw_errors = errors
self.model = model
self._error_cache: Optional[List[Dict[str, Any]]] = None
def errors(self) -> List[Dict[str, Any]]:
if self._error_cache is None:
try:
config = self.model.__config__ # type: ignore
except AttributeError:
config = self.model.__pydantic_model__.__config__ # type: ignore
self._error_cache = list(flatten_errors(self.raw_errors, config))
return self._error_cache
def json(self, *, indent: Union[None, int, str] = 2) -> str:
return json.dumps(self.errors(), indent=indent, default=pydantic_encoder)
def __str__(self) -> str:
errors = self.errors()
no_errors = len(errors)
return (
f'{no_errors} validation error{"" if no_errors == 1 else "s"} for {self.model.__name__}\n'
f'{display_errors(errors)}'
)
def __repr_args__(self) -> 'ReprArgs':
return [('model', self.model.__name__), ('errors', self.errors())]
def display_errors(errors: List[Dict[str, Any]]) -> str:
return '\n'.join(f'{_display_error_loc(e)}\n {e["msg"]} ({_display_error_type_and_ctx(e)})' for e in errors)
def _display_error_loc(error: Dict[str, Any]) -> str:
return ' -> '.join(str(e) for e in error['loc'])
def _display_error_type_and_ctx(error: Dict[str, Any]) -> str:
t = 'type=' + error['type']
ctx = error.get('ctx')
if ctx:
return t + ''.join(f'; {k}={v}' for k, v in ctx.items())
else:
return t
def flatten_errors(
errors: Sequence[Any], config: Type['BaseConfig'], loc: Optional['Loc'] = None
) -> Generator[Dict[str, Any], None, None]:
for error in errors:
if isinstance(error, ErrorWrapper):
if loc:
error_loc = loc + error.loc_tuple()
else:
error_loc = error.loc_tuple()
if isinstance(error.exc, ValidationError):
yield from flatten_errors(error.exc.raw_errors, config, error_loc)
else:
yield error_dict(error.exc, config, error_loc)
elif isinstance(error, list):
yield from flatten_errors(error, config, loc=loc)
else:
raise RuntimeError(f'Unknown error object: {error}')
def error_dict(exc: Exception, config: Type['BaseConfig'], loc: 'Loc') -> Dict[str, Any]:
type_ = get_exc_type(exc.__class__)
msg_template = config.error_msg_templates.get(type_) or getattr(exc, 'msg_template', None)
ctx = exc.__dict__
if msg_template:
msg = msg_template.format(**ctx)
else:
msg = str(exc)
d: Dict[str, Any] = {'loc': loc, 'msg': msg, 'type': type_}
if ctx:
d['ctx'] = ctx
return d
_EXC_TYPE_CACHE: Dict[Type[Exception], str] = {}
def get_exc_type(cls: Type[Exception]) -> str:
# slightly more efficient than using lru_cache since we don't need to worry about the cache filling up
try:
return _EXC_TYPE_CACHE[cls]
except KeyError:
r = _get_exc_type(cls)
_EXC_TYPE_CACHE[cls] = r
return r
def _get_exc_type(cls: Type[Exception]) -> str:
if issubclass(cls, AssertionError):
return 'assertion_error'
base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error'
if cls in (TypeError, ValueError):
# just TypeError or ValueError, no extra code
return base_name
# if it's not a TypeError or ValueError, we just take the lowercase of the exception name
# no chaining or snake case logic, use "code" for more complex error types.
code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower()
return base_name + '.' + code

View File

@@ -0,0 +1,583 @@
from decimal import Decimal
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Set, Tuple, Type, Union
from .typing import display_as_type
if TYPE_CHECKING:
from .typing import DictStrAny
# explicitly state exports to avoid "from .errors import *" also importing Decimal, Path etc.
__all__ = (
'PydanticTypeError',
'PydanticValueError',
'ConfigError',
'MissingError',
'ExtraError',
'NoneIsNotAllowedError',
'NoneIsAllowedError',
'WrongConstantError',
'NotNoneError',
'BoolError',
'BytesError',
'DictError',
'EmailError',
'UrlError',
'UrlSchemeError',
'UrlSchemePermittedError',
'UrlUserInfoError',
'UrlHostError',
'UrlHostTldError',
'UrlPortError',
'UrlExtraError',
'EnumError',
'IntEnumError',
'EnumMemberError',
'IntegerError',
'FloatError',
'PathError',
'_PathValueError',
'PathNotExistsError',
'PathNotAFileError',
'PathNotADirectoryError',
'PyObjectError',
'SequenceError',
'ListError',
'SetError',
'FrozenSetError',
'TupleError',
'TupleLengthError',
'ListMinLengthError',
'ListMaxLengthError',
'AnyStrMinLengthError',
'AnyStrMaxLengthError',
'StrError',
'StrRegexError',
'_NumberBoundError',
'NumberNotGtError',
'NumberNotGeError',
'NumberNotLtError',
'NumberNotLeError',
'NumberNotMultipleError',
'DecimalError',
'DecimalIsNotFiniteError',
'DecimalMaxDigitsError',
'DecimalMaxPlacesError',
'DecimalWholeDigitsError',
'DateTimeError',
'DateError',
'TimeError',
'DurationError',
'HashableError',
'UUIDError',
'UUIDVersionError',
'ArbitraryTypeError',
'ClassError',
'SubclassError',
'JsonError',
'JsonTypeError',
'PatternError',
'DataclassTypeError',
'CallableError',
'IPvAnyAddressError',
'IPvAnyInterfaceError',
'IPvAnyNetworkError',
'IPv4AddressError',
'IPv6AddressError',
'IPv4NetworkError',
'IPv6NetworkError',
'IPv4InterfaceError',
'IPv6InterfaceError',
'ColorError',
'StrictBoolError',
'NotDigitError',
'LuhnValidationError',
'InvalidLengthForBrand',
'InvalidByteSize',
'InvalidByteSizeUnit',
)
def cls_kwargs(cls: Type['PydanticErrorMixin'], ctx: 'DictStrAny') -> 'PydanticErrorMixin':
"""
For built-in exceptions like ValueError or TypeError, we need to implement
__reduce__ to override the default behaviour (instead of __getstate__/__setstate__)
By default pickle protocol 2 calls `cls.__new__(cls, *args)`.
Since we only use kwargs, we need a little constructor to change that.
Note: the callable can't be a lambda as pickle looks in the namespace to find it
"""
return cls(**ctx)
class PydanticErrorMixin:
code: str
msg_template: str
def __init__(self, **ctx: Any) -> None:
self.__dict__ = ctx
def __str__(self) -> str:
return self.msg_template.format(**self.__dict__)
def __reduce__(self) -> Tuple[Callable[..., 'PydanticErrorMixin'], Tuple[Type['PydanticErrorMixin'], 'DictStrAny']]:
return cls_kwargs, (self.__class__, self.__dict__)
class PydanticTypeError(PydanticErrorMixin, TypeError):
pass
class PydanticValueError(PydanticErrorMixin, ValueError):
pass
class ConfigError(RuntimeError):
pass
class MissingError(PydanticValueError):
msg_template = 'field required'
class ExtraError(PydanticValueError):
msg_template = 'extra fields not permitted'
class NoneIsNotAllowedError(PydanticTypeError):
code = 'none.not_allowed'
msg_template = 'none is not an allowed value'
class NoneIsAllowedError(PydanticTypeError):
code = 'none.allowed'
msg_template = 'value is not none'
class WrongConstantError(PydanticValueError):
code = 'const'
def __str__(self) -> str:
permitted = ', '.join(repr(v) for v in self.permitted) # type: ignore
return f'unexpected value; permitted: {permitted}'
class NotNoneError(PydanticTypeError):
code = 'not_none'
msg_template = 'value is not None'
class BoolError(PydanticTypeError):
msg_template = 'value could not be parsed to a boolean'
class BytesError(PydanticTypeError):
msg_template = 'byte type expected'
class DictError(PydanticTypeError):
msg_template = 'value is not a valid dict'
class EmailError(PydanticValueError):
msg_template = 'value is not a valid email address'
class UrlError(PydanticValueError):
code = 'url'
class UrlSchemeError(UrlError):
code = 'url.scheme'
msg_template = 'invalid or missing URL scheme'
class UrlSchemePermittedError(UrlError):
code = 'url.scheme'
msg_template = 'URL scheme not permitted'
def __init__(self, allowed_schemes: Set[str]):
super().__init__(allowed_schemes=allowed_schemes)
class UrlUserInfoError(UrlError):
code = 'url.userinfo'
msg_template = 'userinfo required in URL but missing'
class UrlHostError(UrlError):
code = 'url.host'
msg_template = 'URL host invalid'
class UrlHostTldError(UrlError):
code = 'url.host'
msg_template = 'URL host invalid, top level domain required'
class UrlPortError(UrlError):
code = 'url.port'
msg_template = 'URL port invalid, port cannot exceed 65535'
class UrlExtraError(UrlError):
code = 'url.extra'
msg_template = 'URL invalid, extra characters found after valid URL: {extra!r}'
class EnumMemberError(PydanticTypeError):
code = 'enum'
def __str__(self) -> str:
permitted = ', '.join(repr(v.value) for v in self.enum_values) # type: ignore
return f'value is not a valid enumeration member; permitted: {permitted}'
class IntegerError(PydanticTypeError):
msg_template = 'value is not a valid integer'
class FloatError(PydanticTypeError):
msg_template = 'value is not a valid float'
class PathError(PydanticTypeError):
msg_template = 'value is not a valid path'
class _PathValueError(PydanticValueError):
def __init__(self, *, path: Path) -> None:
super().__init__(path=str(path))
class PathNotExistsError(_PathValueError):
code = 'path.not_exists'
msg_template = 'file or directory at path "{path}" does not exist'
class PathNotAFileError(_PathValueError):
code = 'path.not_a_file'
msg_template = 'path "{path}" does not point to a file'
class PathNotADirectoryError(_PathValueError):
code = 'path.not_a_directory'
msg_template = 'path "{path}" does not point to a directory'
class PyObjectError(PydanticTypeError):
msg_template = 'ensure this value contains valid import path or valid callable: {error_message}'
class SequenceError(PydanticTypeError):
msg_template = 'value is not a valid sequence'
class IterableError(PydanticTypeError):
msg_template = 'value is not a valid iterable'
class ListError(PydanticTypeError):
msg_template = 'value is not a valid list'
class SetError(PydanticTypeError):
msg_template = 'value is not a valid set'
class FrozenSetError(PydanticTypeError):
msg_template = 'value is not a valid frozenset'
class DequeError(PydanticTypeError):
msg_template = 'value is not a valid deque'
class TupleError(PydanticTypeError):
msg_template = 'value is not a valid tuple'
class TupleLengthError(PydanticValueError):
code = 'tuple.length'
msg_template = 'wrong tuple length {actual_length}, expected {expected_length}'
def __init__(self, *, actual_length: int, expected_length: int) -> None:
super().__init__(actual_length=actual_length, expected_length=expected_length)
class ListMinLengthError(PydanticValueError):
code = 'list.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class ListMaxLengthError(PydanticValueError):
code = 'list.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class SetMinLengthError(PydanticValueError):
code = 'set.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class SetMaxLengthError(PydanticValueError):
code = 'set.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class AnyStrMinLengthError(PydanticValueError):
code = 'any_str.min_length'
msg_template = 'ensure this value has at least {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class AnyStrMaxLengthError(PydanticValueError):
code = 'any_str.max_length'
msg_template = 'ensure this value has at most {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class StrError(PydanticTypeError):
msg_template = 'str type expected'
class StrRegexError(PydanticValueError):
code = 'str.regex'
msg_template = 'string does not match regex "{pattern}"'
def __init__(self, *, pattern: str) -> None:
super().__init__(pattern=pattern)
class _NumberBoundError(PydanticValueError):
def __init__(self, *, limit_value: Union[int, float, Decimal]) -> None:
super().__init__(limit_value=limit_value)
class NumberNotGtError(_NumberBoundError):
code = 'number.not_gt'
msg_template = 'ensure this value is greater than {limit_value}'
class NumberNotGeError(_NumberBoundError):
code = 'number.not_ge'
msg_template = 'ensure this value is greater than or equal to {limit_value}'
class NumberNotLtError(_NumberBoundError):
code = 'number.not_lt'
msg_template = 'ensure this value is less than {limit_value}'
class NumberNotLeError(_NumberBoundError):
code = 'number.not_le'
msg_template = 'ensure this value is less than or equal to {limit_value}'
class NumberNotMultipleError(PydanticValueError):
code = 'number.not_multiple'
msg_template = 'ensure this value is a multiple of {multiple_of}'
def __init__(self, *, multiple_of: Union[int, float, Decimal]) -> None:
super().__init__(multiple_of=multiple_of)
class DecimalError(PydanticTypeError):
msg_template = 'value is not a valid decimal'
class DecimalIsNotFiniteError(PydanticValueError):
code = 'decimal.not_finite'
msg_template = 'value is not a valid decimal'
class DecimalMaxDigitsError(PydanticValueError):
code = 'decimal.max_digits'
msg_template = 'ensure that there are no more than {max_digits} digits in total'
def __init__(self, *, max_digits: int) -> None:
super().__init__(max_digits=max_digits)
class DecimalMaxPlacesError(PydanticValueError):
code = 'decimal.max_places'
msg_template = 'ensure that there are no more than {decimal_places} decimal places'
def __init__(self, *, decimal_places: int) -> None:
super().__init__(decimal_places=decimal_places)
class DecimalWholeDigitsError(PydanticValueError):
code = 'decimal.whole_digits'
msg_template = 'ensure that there are no more than {whole_digits} digits before the decimal point'
def __init__(self, *, whole_digits: int) -> None:
super().__init__(whole_digits=whole_digits)
class DateTimeError(PydanticValueError):
msg_template = 'invalid datetime format'
class DateError(PydanticValueError):
msg_template = 'invalid date format'
class TimeError(PydanticValueError):
msg_template = 'invalid time format'
class DurationError(PydanticValueError):
msg_template = 'invalid duration format'
class HashableError(PydanticTypeError):
msg_template = 'value is not a valid hashable'
class UUIDError(PydanticTypeError):
msg_template = 'value is not a valid uuid'
class UUIDVersionError(PydanticValueError):
code = 'uuid.version'
msg_template = 'uuid version {required_version} expected'
def __init__(self, *, required_version: int) -> None:
super().__init__(required_version=required_version)
class ArbitraryTypeError(PydanticTypeError):
code = 'arbitrary_type'
msg_template = 'instance of {expected_arbitrary_type} expected'
def __init__(self, *, expected_arbitrary_type: Type[Any]) -> None:
super().__init__(expected_arbitrary_type=display_as_type(expected_arbitrary_type))
class ClassError(PydanticTypeError):
code = 'class'
msg_template = 'a class is expected'
class SubclassError(PydanticTypeError):
code = 'subclass'
msg_template = 'subclass of {expected_class} expected'
def __init__(self, *, expected_class: Type[Any]) -> None:
super().__init__(expected_class=display_as_type(expected_class))
class JsonError(PydanticValueError):
msg_template = 'Invalid JSON'
class JsonTypeError(PydanticTypeError):
code = 'json'
msg_template = 'JSON object must be str, bytes or bytearray'
class PatternError(PydanticValueError):
code = 'regex_pattern'
msg_template = 'Invalid regular expression'
class DataclassTypeError(PydanticTypeError):
code = 'dataclass'
msg_template = 'instance of {class_name}, tuple or dict expected'
class CallableError(PydanticTypeError):
msg_template = '{value} is not callable'
class EnumError(PydanticTypeError):
code = 'enum_instance'
msg_template = '{value} is not a valid Enum instance'
class IntEnumError(PydanticTypeError):
code = 'int_enum_instance'
msg_template = '{value} is not a valid IntEnum instance'
class IPvAnyAddressError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 address'
class IPvAnyInterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 interface'
class IPvAnyNetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 network'
class IPv4AddressError(PydanticValueError):
msg_template = 'value is not a valid IPv4 address'
class IPv6AddressError(PydanticValueError):
msg_template = 'value is not a valid IPv6 address'
class IPv4NetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv4 network'
class IPv6NetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv6 network'
class IPv4InterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv4 interface'
class IPv6InterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv6 interface'
class ColorError(PydanticValueError):
msg_template = 'value is not a valid color: {reason}'
class StrictBoolError(PydanticValueError):
msg_template = 'value is not a valid boolean'
class NotDigitError(PydanticValueError):
code = 'payment_card_number.digits'
msg_template = 'card number is not all digits'
class LuhnValidationError(PydanticValueError):
code = 'payment_card_number.luhn_check'
msg_template = 'card number is not luhn valid'
class InvalidLengthForBrand(PydanticValueError):
code = 'payment_card_number.invalid_length_for_brand'
msg_template = 'Length for a {brand} card must be {required_length}'
class InvalidByteSize(PydanticValueError):
msg_template = 'could not parse value and unit from byte string'
class InvalidByteSizeUnit(PydanticValueError):
msg_template = 'could not interpret byte unit: {unit}'

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,266 @@
import sys
import typing
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Dict,
Generic,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
get_type_hints,
)
from .class_validators import gather_all_validators
from .fields import DeferredType
from .main import BaseModel, create_model
from .typing import display_as_type, get_args, get_origin, typing_base
from .utils import all_identical, lenient_issubclass
_generic_types_cache: Dict[Tuple[Type[Any], Union[Any, Tuple[Any, ...]]], Type[BaseModel]] = {}
GenericModelT = TypeVar('GenericModelT', bound='GenericModel')
TypeVarType = Any # since mypy doesn't allow the use of TypeVar as a type
class GenericModel(BaseModel):
__slots__ = ()
__concrete__: ClassVar[bool] = False
if TYPE_CHECKING:
# Putting this in a TYPE_CHECKING block allows us to replace `if Generic not in cls.__bases__` with
# `not hasattr(cls, "__parameters__")`. This means we don't need to force non-concrete subclasses of
# `GenericModel` to also inherit from `Generic`, which would require changes to the use of `create_model` below.
__parameters__: ClassVar[Tuple[TypeVarType, ...]]
# Setting the return type as Type[Any] instead of Type[BaseModel] prevents PyCharm warnings
def __class_getitem__(cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]]) -> Type[Any]:
"""Instantiates a new class from a generic class `cls` and type variables `params`.
:param params: Tuple of types the class . Given a generic class
`Model` with 2 type variables and a concrete model `Model[str, int]`,
the value `(str, int)` would be passed to `params`.
:return: New model class inheriting from `cls` with instantiated
types described by `params`. If no parameters are given, `cls` is
returned as is.
"""
cached = _generic_types_cache.get((cls, params))
if cached is not None:
return cached
if cls.__concrete__ and Generic not in cls.__bases__:
raise TypeError('Cannot parameterize a concrete instantiation of a generic model')
if not isinstance(params, tuple):
params = (params,)
if cls is GenericModel and any(isinstance(param, TypeVar) for param in params):
raise TypeError('Type parameters should be placed on typing.Generic, not GenericModel')
if not hasattr(cls, '__parameters__'):
raise TypeError(f'Type {cls.__name__} must inherit from typing.Generic before being parameterized')
check_parameters_count(cls, params)
# Build map from generic typevars to passed params
typevars_map: Dict[TypeVarType, Type[Any]] = dict(zip(cls.__parameters__, params))
if all_identical(typevars_map.keys(), typevars_map.values()) and typevars_map:
return cls # if arguments are equal to parameters it's the same object
# Create new model with original model as parent inserting fields with DeferredType.
model_name = cls.__concrete_name__(params)
validators = gather_all_validators(cls)
type_hints = get_type_hints(cls).items()
instance_type_hints = {k: v for k, v in type_hints if get_origin(v) is not ClassVar}
fields = {k: (DeferredType(), cls.__fields__[k].field_info) for k in instance_type_hints if k in cls.__fields__}
model_module, called_globally = get_caller_frame_info()
created_model = cast(
Type[GenericModel], # casting ensures mypy is aware of the __concrete__ and __parameters__ attributes
create_model(
model_name,
__module__=model_module or cls.__module__,
__base__=cls,
__config__=None,
__validators__=validators,
**fields,
),
)
if called_globally: # create global reference and therefore allow pickling
object_by_reference = None
reference_name = model_name
reference_module_globals = sys.modules[created_model.__module__].__dict__
while object_by_reference is not created_model:
object_by_reference = reference_module_globals.setdefault(reference_name, created_model)
reference_name += '_'
created_model.Config = cls.Config
# Find any typevars that are still present in the model.
# If none are left, the model is fully "concrete", otherwise the new
# class is a generic class as well taking the found typevars as
# parameters.
new_params = tuple(
{param: None for param in iter_contained_typevars(typevars_map.values())}
) # use dict as ordered set
created_model.__concrete__ = not new_params
if new_params:
created_model.__parameters__ = new_params
# Save created model in cache so we don't end up creating duplicate
# models that should be identical.
_generic_types_cache[(cls, params)] = created_model
if len(params) == 1:
_generic_types_cache[(cls, params[0])] = created_model
# Recursively walk class type hints and replace generic typevars
# with concrete types that were passed.
_prepare_model_fields(created_model, fields, instance_type_hints, typevars_map)
return created_model
@classmethod
def __concrete_name__(cls: Type[Any], params: Tuple[Type[Any], ...]) -> str:
"""Compute class name for child classes.
:param params: Tuple of types the class . Given a generic class
`Model` with 2 type variables and a concrete model `Model[str, int]`,
the value `(str, int)` would be passed to `params`.
:return: String representing a the new class where `params` are
passed to `cls` as type variables.
This method can be overridden to achieve a custom naming scheme for GenericModels.
"""
param_names = [display_as_type(param) for param in params]
params_component = ', '.join(param_names)
return f'{cls.__name__}[{params_component}]'
def replace_types(type_: Any, type_map: Mapping[Any, Any]) -> Any:
"""Return type with all occurances of `type_map` keys recursively replaced with their values.
:param type_: Any type, class or generic alias
:param type_map: Mapping from `TypeVar` instance to concrete types.
:return: New type representing the basic structure of `type_` with all
`typevar_map` keys recursively replaced.
>>> replace_types(Tuple[str, Union[List[str], float]], {str: int})
Tuple[int, Union[List[int], float]]
"""
if not type_map:
return type_
type_args = get_args(type_)
origin_type = get_origin(type_)
# Having type args is a good indicator that this is a typing module
# class instantiation or a generic alias of some sort.
if type_args:
resolved_type_args = tuple(replace_types(arg, type_map) for arg in type_args)
if all_identical(type_args, resolved_type_args):
# If all arguments are the same, there is no need to modify the
# type or create a new object at all
return type_
if origin_type is not None and isinstance(type_, typing_base) and not isinstance(origin_type, typing_base):
# In python < 3.9 generic aliases don't exist so any of these like `list`,
# `type` or `collections.abc.Callable` need to be translated.
# See: https://www.python.org/dev/peps/pep-0585
origin_type = getattr(typing, type_._name)
return origin_type[resolved_type_args]
# We handle pydantic generic models separately as they don't have the same
# semantics as "typing" classes or generic aliases
if not origin_type and lenient_issubclass(type_, GenericModel) and not type_.__concrete__:
type_args = type_.__parameters__
resolved_type_args = tuple(replace_types(t, type_map) for t in type_args)
if all_identical(type_args, resolved_type_args):
return type_
return type_[resolved_type_args]
# Handle special case for typehints that can have lists as arguments.
# `typing.Callable[[int, str], int]` is an example for this.
if isinstance(type_, (List, list)):
resolved_list = list(replace_types(element, type_map) for element in type_)
if all_identical(type_, resolved_list):
return type_
return resolved_list
# If all else fails, we try to resolve the type directly and otherwise just
# return the input with no modifications.
return type_map.get(type_, type_)
def check_parameters_count(cls: Type[GenericModel], parameters: Tuple[Any, ...]) -> None:
actual = len(parameters)
expected = len(cls.__parameters__)
if actual != expected:
description = 'many' if actual > expected else 'few'
raise TypeError(f'Too {description} parameters for {cls.__name__}; actual {actual}, expected {expected}')
DictValues: Type[Any] = {}.values().__class__
def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]:
"""Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found."""
if isinstance(v, TypeVar):
yield v
elif hasattr(v, '__parameters__') and not get_origin(v) and lenient_issubclass(v, GenericModel):
yield from v.__parameters__
elif isinstance(v, (DictValues, list)):
for var in v:
yield from iter_contained_typevars(var)
else:
args = get_args(v)
for arg in args:
yield from iter_contained_typevars(arg)
def get_caller_frame_info() -> Tuple[Optional[str], bool]:
"""
Used inside a function to check whether it was called globally
Will only work against non-compiled code, therefore used only in pydantic.generics
:returns Tuple[module_name, called_globally]
"""
try:
previous_caller_frame = sys._getframe(2)
except ValueError as e:
raise RuntimeError('This function must be used inside another function') from e
except AttributeError: # sys module does not have _getframe function, so there's nothing we can do about it
return None, False
frame_globals = previous_caller_frame.f_globals
return frame_globals.get('__name__'), previous_caller_frame.f_locals is frame_globals
def _prepare_model_fields(
created_model: Type[GenericModel],
fields: Mapping[str, Any],
instance_type_hints: Mapping[str, type],
typevars_map: Mapping[Any, type],
) -> None:
"""
Replace DeferredType fields with concrete type hints and prepare them.
"""
for key, field in created_model.__fields__.items():
if key not in fields:
assert field.type_.__class__ is not DeferredType
# https://github.com/nedbat/coveragepy/issues/198
continue # pragma: no cover
assert field.type_.__class__ is DeferredType, field.type_.__class__
field_type_hint = instance_type_hints[key]
concrete_type = replace_types(field_type_hint, typevars_map)
field.type_ = concrete_type
field.outer_type_ = concrete_type
field.prepare()
created_model.__annotations__[key] = concrete_type

View File

@@ -0,0 +1,116 @@
import datetime
import re
import sys
from collections import deque
from decimal import Decimal
from enum import Enum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from types import GeneratorType
from typing import Any, Callable, Dict, Type, Union
from uuid import UUID
if sys.version_info >= (3, 7):
Pattern = re.Pattern
else:
# python 3.6
Pattern = re.compile('a').__class__
from .color import Color
from .types import SecretBytes, SecretStr
__all__ = 'pydantic_encoder', 'custom_pydantic_encoder', 'timedelta_isoformat'
def isoformat(o: Union[datetime.date, datetime.time]) -> str:
return o.isoformat()
def decimal_encoder(dec_value: Decimal) -> Union[int, float]:
"""
Encodes a Decimal as int of there's no exponent, otherwise float
This is useful when we use ConstrainedDecimal to represent Numeric(x,0)
where a integer (but not int typed) is used. Encoding this as a float
results in failed round-tripping between encode and prase.
Our Id type is a prime example of this.
>>> decimal_encoder(Decimal("1.0"))
1.0
>>> decimal_encoder(Decimal("1"))
1
"""
if dec_value.as_tuple().exponent >= 0:
return int(dec_value)
else:
return float(dec_value)
ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = {
bytes: lambda o: o.decode(),
Color: str,
datetime.date: isoformat,
datetime.datetime: isoformat,
datetime.time: isoformat,
datetime.timedelta: lambda td: td.total_seconds(),
Decimal: decimal_encoder,
Enum: lambda o: o.value,
frozenset: list,
deque: list,
GeneratorType: list,
IPv4Address: str,
IPv4Interface: str,
IPv4Network: str,
IPv6Address: str,
IPv6Interface: str,
IPv6Network: str,
Path: str,
Pattern: lambda o: o.pattern,
SecretBytes: str,
SecretStr: str,
set: list,
UUID: str,
}
def pydantic_encoder(obj: Any) -> Any:
from dataclasses import asdict, is_dataclass
from .main import BaseModel
if isinstance(obj, BaseModel):
return obj.dict()
elif is_dataclass(obj):
return asdict(obj)
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = ENCODERS_BY_TYPE[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
def custom_pydantic_encoder(type_encoders: Dict[Any, Callable[[Type[Any]], Any]], obj: Any) -> Any:
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = type_encoders[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
return pydantic_encoder(obj)
def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for timedeltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'P{td.days}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S'

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,690 @@
from configparser import ConfigParser
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type as TypingType, Union
from mypy.errorcodes import ErrorCode
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
ARG_STAR2,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
ClassDef,
Context,
Decorator,
EllipsisExpr,
FuncBase,
FuncDef,
JsonDict,
MemberExpr,
NameExpr,
PassStmt,
PlaceholderNode,
RefExpr,
StrExpr,
SymbolNode,
SymbolTableNode,
TempNode,
TypeInfo,
TypeVarExpr,
Var,
)
from mypy.options import Options
from mypy.plugin import CheckerPluginInterface, ClassDefContext, MethodContext, Plugin, SemanticAnalyzerPluginInterface
from mypy.plugins import dataclasses
from mypy.semanal import set_callable_name # type: ignore
from mypy.server.trigger import make_wildcard_trigger
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
Type,
TypeOfAny,
TypeType,
TypeVarDef,
TypeVarType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars
from mypy.util import get_unique_redefinition_name
CONFIGFILE_KEY = 'pydantic-mypy'
METADATA_KEY = 'pydantic-mypy-metadata'
BASEMODEL_FULLNAME = 'pydantic.main.BaseModel'
BASESETTINGS_FULLNAME = 'pydantic.env_settings.BaseSettings'
FIELD_FULLNAME = 'pydantic.fields.Field'
DATACLASS_FULLNAME = 'pydantic.dataclasses.dataclass'
def plugin(version: str) -> 'TypingType[Plugin]':
"""
`version` is the mypy version string
We might want to use this to print a warning if the mypy version being used is
newer, or especially older, than we expect (or need).
"""
return PydanticPlugin
class PydanticPlugin(Plugin):
def __init__(self, options: Options) -> None:
self.plugin_config = PydanticPluginConfig(options)
super().__init__(options)
def get_base_class_hook(self, fullname: str) -> 'Optional[Callable[[ClassDefContext], None]]':
sym = self.lookup_fully_qualified(fullname)
if sym and isinstance(sym.node, TypeInfo): # pragma: no branch
# No branching may occur if the mypy cache has not been cleared
if any(get_fullname(base) == BASEMODEL_FULLNAME for base in sym.node.mro):
return self._pydantic_model_class_maker_callback
return None
def get_method_hook(self, fullname: str) -> Optional[Callable[[MethodContext], Type]]:
if fullname.endswith('.from_orm'):
return from_orm_callback
return None
def get_class_decorator_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]:
if fullname == DATACLASS_FULLNAME:
return dataclasses.dataclass_class_maker_callback
return None
def _pydantic_model_class_maker_callback(self, ctx: ClassDefContext) -> None:
transformer = PydanticModelTransformer(ctx, self.plugin_config)
transformer.transform()
class PydanticPluginConfig:
__slots__ = ('init_forbid_extra', 'init_typed', 'warn_required_dynamic_aliases', 'warn_untyped_fields')
init_forbid_extra: bool
init_typed: bool
warn_required_dynamic_aliases: bool
warn_untyped_fields: bool
def __init__(self, options: Options) -> None:
if options.config_file is None: # pragma: no cover
return
plugin_config = ConfigParser()
plugin_config.read(options.config_file)
for key in self.__slots__:
setting = plugin_config.getboolean(CONFIGFILE_KEY, key, fallback=False)
setattr(self, key, setting)
def from_orm_callback(ctx: MethodContext) -> Type:
"""
Raise an error if orm_mode is not enabled
"""
model_type: Instance
if isinstance(ctx.type, CallableType) and isinstance(ctx.type.ret_type, Instance):
model_type = ctx.type.ret_type # called on the class
elif isinstance(ctx.type, Instance):
model_type = ctx.type # called on an instance (unusual, but still valid)
else: # pragma: no cover
detail = f'ctx.type: {ctx.type} (of type {ctx.type.__class__.__name__})'
error_unexpected_behavior(detail, ctx.api, ctx.context)
return ctx.default_return_type
pydantic_metadata = model_type.type.metadata.get(METADATA_KEY)
if pydantic_metadata is None:
return ctx.default_return_type
orm_mode = pydantic_metadata.get('config', {}).get('orm_mode')
if orm_mode is not True:
error_from_orm(get_name(model_type.type), ctx.api, ctx.context)
return ctx.default_return_type
class PydanticModelTransformer:
tracked_config_fields: Set[str] = {
'extra',
'allow_mutation',
'frozen',
'orm_mode',
'allow_population_by_field_name',
'alias_generator',
}
def __init__(self, ctx: ClassDefContext, plugin_config: PydanticPluginConfig) -> None:
self._ctx = ctx
self.plugin_config = plugin_config
def transform(self) -> None:
"""
Configures the BaseModel subclass according to the plugin settings.
In particular:
* determines the model config and fields,
* adds a fields-aware signature for the initializer and construct methods
* freezes the class if allow_mutation = False or frozen = True
* stores the fields, config, and if the class is settings in the mypy metadata for access by subclasses
"""
ctx = self._ctx
info = self._ctx.cls.info
config = self.collect_config()
fields = self.collect_fields(config)
for field in fields:
if info[field.name].type is None:
if not ctx.api.final_iteration:
ctx.api.defer()
is_settings = any(get_fullname(base) == BASESETTINGS_FULLNAME for base in info.mro[:-1])
self.add_initializer(fields, config, is_settings)
self.add_construct_method(fields)
self.set_frozen(fields, frozen=config.allow_mutation is False or config.frozen is True)
info.metadata[METADATA_KEY] = {
'fields': {field.name: field.serialize() for field in fields},
'config': config.set_values_dict(),
}
def collect_config(self) -> 'ModelConfigData':
"""
Collects the values of the config attributes that are used by the plugin, accounting for parent classes.
"""
ctx = self._ctx
cls = ctx.cls
config = ModelConfigData()
for stmt in cls.defs.body:
if not isinstance(stmt, ClassDef):
continue
if stmt.name == 'Config':
for substmt in stmt.defs.body:
if not isinstance(substmt, AssignmentStmt):
continue
config.update(self.get_config_update(substmt))
if (
config.has_alias_generator
and not config.allow_population_by_field_name
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(ctx.api, stmt)
for info in cls.info.mro[1:]: # 0 is the current class
if METADATA_KEY not in info.metadata:
continue
# Each class depends on the set of fields in its ancestors
ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info)))
for name, value in info.metadata[METADATA_KEY]['config'].items():
config.setdefault(name, value)
return config
def collect_fields(self, model_config: 'ModelConfigData') -> List['PydanticModelField']:
"""
Collects the fields for the model, accounting for parent classes
"""
# First, collect fields belonging to the current class.
ctx = self._ctx
cls = self._ctx.cls
fields = [] # type: List[PydanticModelField]
known_fields = set() # type: Set[str]
for stmt in cls.defs.body:
if not isinstance(stmt, AssignmentStmt): # `and stmt.new_syntax` to require annotation
continue
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr):
continue
if not stmt.new_syntax and self.plugin_config.warn_untyped_fields:
error_untyped_fields(ctx.api, stmt)
# if lhs.name == '__config__': # BaseConfig not well handled; I'm not sure why yet
# continue
sym = cls.info.names.get(lhs.name)
if sym is None: # pragma: no cover
# This is likely due to a star import (see the dataclasses plugin for a more detailed explanation)
# This is the same logic used in the dataclasses plugin
continue
node = sym.node
if isinstance(node, PlaceholderNode): # pragma: no cover
# See the PlaceholderNode docstring for more detail about how this can occur
# Basically, it is an edge case when dealing with complex import logic
# This is the same logic used in the dataclasses plugin
continue
if not isinstance(node, Var):
continue
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
is_required = self.get_is_required(cls, stmt, lhs)
alias, has_dynamic_alias = self.get_alias_info(stmt)
if (
has_dynamic_alias
and not model_config.allow_population_by_field_name
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(ctx.api, stmt)
fields.append(
PydanticModelField(
name=lhs.name,
is_required=is_required,
alias=alias,
has_dynamic_alias=has_dynamic_alias,
line=stmt.line,
column=stmt.column,
)
)
known_fields.add(lhs.name)
all_fields = fields.copy()
for info in cls.info.mro[1:]: # 0 is the current class, -2 is BaseModel, -1 is object
if METADATA_KEY not in info.metadata:
continue
superclass_fields = []
# Each class depends on the set of fields in its ancestors
ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info)))
for name, data in info.metadata[METADATA_KEY]['fields'].items():
if name not in known_fields:
field = PydanticModelField.deserialize(info, data)
known_fields.add(name)
superclass_fields.append(field)
else:
(field,) = [a for a in all_fields if a.name == name]
all_fields.remove(field)
superclass_fields.append(field)
all_fields = superclass_fields + all_fields
return all_fields
def add_initializer(self, fields: List['PydanticModelField'], config: 'ModelConfigData', is_settings: bool) -> None:
"""
Adds a fields-aware `__init__` method to the class.
The added `__init__` will be annotated with types vs. all `Any` depending on the plugin settings.
"""
ctx = self._ctx
typed = self.plugin_config.init_typed
use_alias = config.allow_population_by_field_name is not True
force_all_optional = is_settings or bool(
config.has_alias_generator and not config.allow_population_by_field_name
)
init_arguments = self.get_field_arguments(
fields, typed=typed, force_all_optional=force_all_optional, use_alias=use_alias
)
if not self.should_init_forbid_extra(fields, config):
var = Var('kwargs')
init_arguments.append(Argument(var, AnyType(TypeOfAny.explicit), None, ARG_STAR2))
add_method(ctx, '__init__', init_arguments, NoneType())
def add_construct_method(self, fields: List['PydanticModelField']) -> None:
"""
Adds a fully typed `construct` classmethod to the class.
Similar to the fields-aware __init__ method, but always uses the field names (not aliases),
and does not treat settings fields as optional.
"""
ctx = self._ctx
set_str = ctx.api.named_type('__builtins__.set', [ctx.api.named_type('__builtins__.str')])
optional_set_str = UnionType([set_str, NoneType()])
fields_set_argument = Argument(Var('_fields_set', optional_set_str), optional_set_str, None, ARG_OPT)
construct_arguments = self.get_field_arguments(fields, typed=True, force_all_optional=False, use_alias=False)
construct_arguments = [fields_set_argument] + construct_arguments
obj_type = ctx.api.named_type('__builtins__.object')
self_tvar_name = '_PydanticBaseModel' # Make sure it does not conflict with other names in the class
tvar_fullname = ctx.cls.fullname + '.' + self_tvar_name
tvd = TypeVarDef(self_tvar_name, tvar_fullname, -1, [], obj_type)
self_tvar_expr = TypeVarExpr(self_tvar_name, tvar_fullname, [], obj_type)
ctx.cls.info.names[self_tvar_name] = SymbolTableNode(MDEF, self_tvar_expr)
self_type = TypeVarType(tvd)
add_method(
ctx,
'construct',
construct_arguments,
return_type=self_type,
self_type=self_type,
tvar_def=tvd,
is_classmethod=True,
)
def set_frozen(self, fields: List['PydanticModelField'], frozen: bool) -> None:
"""
Marks all fields as properties so that attempts to set them trigger mypy errors.
This is the same approach used by the attrs and dataclasses plugins.
"""
info = self._ctx.cls.info
for field in fields:
sym_node = info.names.get(field.name)
if sym_node is not None:
var = sym_node.node
assert isinstance(var, Var)
var.is_property = frozen
else:
var = field.to_var(info, use_alias=False)
var.info = info
var.is_property = frozen
var._fullname = get_fullname(info) + '.' + get_name(var)
info.names[get_name(var)] = SymbolTableNode(MDEF, var)
def get_config_update(self, substmt: AssignmentStmt) -> Optional['ModelConfigData']:
"""
Determines the config update due to a single statement in the Config class definition.
Warns if a tracked config attribute is set to a value the plugin doesn't know how to interpret (e.g., an int)
"""
lhs = substmt.lvalues[0]
if not (isinstance(lhs, NameExpr) and lhs.name in self.tracked_config_fields):
return None
if lhs.name == 'extra':
if isinstance(substmt.rvalue, StrExpr):
forbid_extra = substmt.rvalue.value == 'forbid'
elif isinstance(substmt.rvalue, MemberExpr):
forbid_extra = substmt.rvalue.name == 'forbid'
else:
error_invalid_config_value(lhs.name, self._ctx.api, substmt)
return None
return ModelConfigData(forbid_extra=forbid_extra)
if lhs.name == 'alias_generator':
has_alias_generator = True
if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname == 'builtins.None':
has_alias_generator = False
return ModelConfigData(has_alias_generator=has_alias_generator)
if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname in ('builtins.True', 'builtins.False'):
return ModelConfigData(**{lhs.name: substmt.rvalue.fullname == 'builtins.True'})
error_invalid_config_value(lhs.name, self._ctx.api, substmt)
return None
@staticmethod
def get_is_required(cls: ClassDef, stmt: AssignmentStmt, lhs: NameExpr) -> bool:
"""
Returns a boolean indicating whether the field defined in `stmt` is a required field.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only, so only non-required if Optional
value_type = get_proper_type(cls.info[lhs.name].type)
if isinstance(value_type, UnionType) and any(isinstance(item, NoneType) for item in value_type.items):
# Annotated as Optional, or otherwise having NoneType in the union
return False
return True
if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME:
# The "default value" is a call to `Field`; at this point, the field is
# only required if default is Ellipsis (i.e., `field_name: Annotation = Field(...)`)
return len(expr.args) > 0 and expr.args[0].__class__ is EllipsisExpr
# Only required if the "default value" is Ellipsis (i.e., `field_name: Annotation = ...`)
return isinstance(expr, EllipsisExpr)
@staticmethod
def get_alias_info(stmt: AssignmentStmt) -> Tuple[Optional[str], bool]:
"""
Returns a pair (alias, has_dynamic_alias), extracted from the declaration of the field defined in `stmt`.
`has_dynamic_alias` is True if and only if an alias is provided, but not as a string literal.
If `has_dynamic_alias` is True, `alias` will be None.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only
return None, False
if not (
isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME
):
# Assigned value is not a call to pydantic.fields.Field
return None, False
for i, arg_name in enumerate(expr.arg_names):
if arg_name != 'alias':
continue
arg = expr.args[i]
if isinstance(arg, StrExpr):
return arg.value, False
else:
return None, True
return None, False
def get_field_arguments(
self, fields: List['PydanticModelField'], typed: bool, force_all_optional: bool, use_alias: bool
) -> List[Argument]:
"""
Helper function used during the construction of the `__init__` and `construct` method signatures.
Returns a list of mypy Argument instances for use in the generated signatures.
"""
info = self._ctx.cls.info
arguments = [
field.to_argument(info, typed=typed, force_optional=force_all_optional, use_alias=use_alias)
for field in fields
if not (use_alias and field.has_dynamic_alias)
]
return arguments
def should_init_forbid_extra(self, fields: List['PydanticModelField'], config: 'ModelConfigData') -> bool:
"""
Indicates whether the generated `__init__` should get a `**kwargs` at the end of its signature
We disallow arbitrary kwargs if the extra config setting is "forbid", or if the plugin config says to,
*unless* a required dynamic alias is present (since then we can't determine a valid signature).
"""
if not config.allow_population_by_field_name:
if self.is_dynamic_alias_present(fields, bool(config.has_alias_generator)):
return False
if config.forbid_extra:
return True
return self.plugin_config.init_forbid_extra
@staticmethod
def is_dynamic_alias_present(fields: List['PydanticModelField'], has_alias_generator: bool) -> bool:
"""
Returns whether any fields on the model have a "dynamic alias", i.e., an alias that cannot be
determined during static analysis.
"""
for field in fields:
if field.has_dynamic_alias:
return True
if has_alias_generator:
for field in fields:
if field.alias is None:
return True
return False
class PydanticModelField:
def __init__(
self, name: str, is_required: bool, alias: Optional[str], has_dynamic_alias: bool, line: int, column: int
):
self.name = name
self.is_required = is_required
self.alias = alias
self.has_dynamic_alias = has_dynamic_alias
self.line = line
self.column = column
def to_var(self, info: TypeInfo, use_alias: bool) -> Var:
name = self.name
if use_alias and self.alias is not None:
name = self.alias
return Var(name, info[self.name].type)
def to_argument(self, info: TypeInfo, typed: bool, force_optional: bool, use_alias: bool) -> Argument:
if typed and info[self.name].type is not None:
type_annotation = info[self.name].type
else:
type_annotation = AnyType(TypeOfAny.explicit)
return Argument(
variable=self.to_var(info, use_alias),
type_annotation=type_annotation,
initializer=None,
kind=ARG_NAMED_OPT if force_optional or not self.is_required else ARG_NAMED,
)
def serialize(self) -> JsonDict:
return self.__dict__
@classmethod
def deserialize(cls, info: TypeInfo, data: JsonDict) -> 'PydanticModelField':
return cls(**data)
class ModelConfigData:
def __init__(
self,
forbid_extra: Optional[bool] = None,
allow_mutation: Optional[bool] = None,
frozen: Optional[bool] = None,
orm_mode: Optional[bool] = None,
allow_population_by_field_name: Optional[bool] = None,
has_alias_generator: Optional[bool] = None,
):
self.forbid_extra = forbid_extra
self.allow_mutation = allow_mutation
self.frozen = frozen
self.orm_mode = orm_mode
self.allow_population_by_field_name = allow_population_by_field_name
self.has_alias_generator = has_alias_generator
def set_values_dict(self) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if v is not None}
def update(self, config: Optional['ModelConfigData']) -> None:
if config is None:
return
for k, v in config.set_values_dict().items():
setattr(self, k, v)
def setdefault(self, key: str, value: Any) -> None:
if getattr(self, key) is None:
setattr(self, key, value)
ERROR_ORM = ErrorCode('pydantic-orm', 'Invalid from_orm call', 'Pydantic')
ERROR_CONFIG = ErrorCode('pydantic-config', 'Invalid config value', 'Pydantic')
ERROR_ALIAS = ErrorCode('pydantic-alias', 'Dynamic alias disallowed', 'Pydantic')
ERROR_UNEXPECTED = ErrorCode('pydantic-unexpected', 'Unexpected behavior', 'Pydantic')
ERROR_UNTYPED = ErrorCode('pydantic-field', 'Untyped field disallowed', 'Pydantic')
def error_from_orm(model_name: str, api: CheckerPluginInterface, context: Context) -> None:
api.fail(f'"{model_name}" does not have orm_mode=True', context, code=ERROR_ORM)
def error_invalid_config_value(name: str, api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail(f'Invalid value for "Config.{name}"', context, code=ERROR_CONFIG)
def error_required_dynamic_aliases(api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail('Required dynamic aliases disallowed', context, code=ERROR_ALIAS)
def error_unexpected_behavior(detail: str, api: CheckerPluginInterface, context: Context) -> None: # pragma: no cover
# Can't think of a good way to test this, but I confirmed it renders as desired by adding to a non-error path
link = 'https://github.com/samuelcolvin/pydantic/issues/new/choose'
full_message = f'The pydantic mypy plugin ran into unexpected behavior: {detail}\n'
full_message += f'Please consider reporting this bug at {link} so we can try to fix it!'
api.fail(full_message, context, code=ERROR_UNEXPECTED)
def error_untyped_fields(api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail('Untyped fields disallowed', context, code=ERROR_UNTYPED)
def add_method(
ctx: ClassDefContext,
name: str,
args: List[Argument],
return_type: Type,
self_type: Optional[Type] = None,
tvar_def: Optional[TypeVarDef] = None,
is_classmethod: bool = False,
is_new: bool = False,
# is_staticmethod: bool = False,
) -> None:
"""
Adds a new method to a class.
This can be dropped if/when https://github.com/python/mypy/issues/7301 is merged
"""
info = ctx.cls.info
# First remove any previously generated methods with the same name
# to avoid clashes and problems in the semantic analyzer.
if name in info.names:
sym = info.names[name]
if sym.plugin_generated and isinstance(sym.node, FuncDef):
ctx.cls.defs.body.remove(sym.node)
self_type = self_type or fill_typevars(info)
if is_classmethod or is_new:
first = [Argument(Var('_cls'), TypeType.make_normalized(self_type), None, ARG_POS)]
# elif is_staticmethod:
# first = []
else:
self_type = self_type or fill_typevars(info)
first = [Argument(Var('self'), self_type, None, ARG_POS)]
args = first + args
arg_types, arg_names, arg_kinds = [], [], []
for arg in args:
assert arg.type_annotation, 'All arguments must be fully typed.'
arg_types.append(arg.type_annotation)
arg_names.append(get_name(arg.variable))
arg_kinds.append(arg.kind)
function_type = ctx.api.named_type('__builtins__.function')
signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type)
if tvar_def:
signature.variables = [tvar_def]
func = FuncDef(name, args, Block([PassStmt()]))
func.info = info
func.type = set_callable_name(signature, func)
func.is_class = is_classmethod
# func.is_static = is_staticmethod
func._fullname = get_fullname(info) + '.' + name
func.line = info.line
# NOTE: we would like the plugin generated node to dominate, but we still
# need to keep any existing definitions so they get semantically analyzed.
if name in info.names:
# Get a nice unique name instead.
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
if is_classmethod: # or is_staticmethod:
func.is_decorated = True
v = Var(name, func.type)
v.info = info
v._fullname = func._fullname
# if is_classmethod:
v.is_classmethod = True
dec = Decorator(func, [NameExpr('classmethod')], v)
# else:
# v.is_staticmethod = True
# dec = Decorator(func, [NameExpr('staticmethod')], v)
dec.line = info.line
sym = SymbolTableNode(MDEF, dec)
else:
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
info.names[name] = sym
info.defn.defs.body.append(func)
def get_fullname(x: Union[FuncBase, SymbolNode]) -> str:
"""
Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped.
"""
fn = x.fullname
if callable(fn): # pragma: no cover
return fn()
return fn
def get_name(x: Union[FuncBase, SymbolNode]) -> str:
"""
Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped.
"""
fn = x.name
if callable(fn): # pragma: no cover
return fn()
return fn

View File

@@ -0,0 +1,498 @@
import re
from ipaddress import (
IPv4Address,
IPv4Interface,
IPv4Network,
IPv6Address,
IPv6Interface,
IPv6Network,
_BaseAddress,
_BaseNetwork,
)
from typing import (
TYPE_CHECKING,
Any,
Dict,
FrozenSet,
Generator,
Optional,
Pattern,
Set,
Tuple,
Type,
Union,
cast,
no_type_check,
)
from . import errors
from .utils import Representation, update_not_none
from .validators import constr_length_validator, str_validator
if TYPE_CHECKING:
import email_validator
from .fields import ModelField
from .main import BaseConfig # noqa: F401
from .typing import AnyCallable
CallableGenerator = Generator[AnyCallable, None, None]
else:
email_validator = None
NetworkType = Union[str, bytes, int, Tuple[Union[str, bytes, int], Union[str, int]]]
__all__ = [
'AnyUrl',
'AnyHttpUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'RedisDsn',
'validate_email',
]
_url_regex_cache = None
_ascii_domain_regex_cache = None
_int_domain_regex_cache = None
def url_regex() -> Pattern[str]:
global _url_regex_cache
if _url_regex_cache is None:
_url_regex_cache = re.compile(
r'(?:(?P<scheme>[a-z][a-z0-9+\-.]+)://)?' # scheme https://tools.ietf.org/html/rfc3986#appendix-A
r'(?:(?P<user>[^\s:/]*)(?::(?P<password>[^\s/]*))?@)?' # user info
r'(?:'
r'(?P<ipv4>(?:\d{1,3}\.){3}\d{1,3})|' # ipv4
r'(?P<ipv6>\[[A-F0-9]*:[A-F0-9:]+\])|' # ipv6
r'(?P<domain>[^\s/:?#]+)' # domain, validation occurs later
r')?'
r'(?::(?P<port>\d+))?' # port
r'(?P<path>/[^\s?#]*)?' # path
r'(?:\?(?P<query>[^\s#]+))?' # query
r'(?:#(?P<fragment>\S+))?', # fragment
re.IGNORECASE,
)
return _url_regex_cache
def ascii_domain_regex() -> Pattern[str]:
global _ascii_domain_regex_cache
if _ascii_domain_regex_cache is None:
ascii_chunk = r'[_0-9a-z](?:[-_0-9a-z]{0,61}[_0-9a-z])?'
ascii_domain_ending = r'(?P<tld>\.[a-z]{2,63})?\.?'
_ascii_domain_regex_cache = re.compile(
fr'(?:{ascii_chunk}\.)*?{ascii_chunk}{ascii_domain_ending}', re.IGNORECASE
)
return _ascii_domain_regex_cache
def int_domain_regex() -> Pattern[str]:
global _int_domain_regex_cache
if _int_domain_regex_cache is None:
int_chunk = r'[_0-9a-\U00040000](?:[-_0-9a-\U00040000]{0,61}[_0-9a-\U00040000])?'
int_domain_ending = r'(?P<tld>(\.[^\W\d_]{2,63})|(\.(?:xn--)[_0-9a-z-]{2,63}))?\.?'
_int_domain_regex_cache = re.compile(fr'(?:{int_chunk}\.)*?{int_chunk}{int_domain_ending}', re.IGNORECASE)
return _int_domain_regex_cache
class AnyUrl(str):
strip_whitespace = True
min_length = 1
max_length = 2 ** 16
allowed_schemes: Optional[Set[str]] = None
tld_required: bool = False
user_required: bool = False
__slots__ = ('scheme', 'user', 'password', 'host', 'tld', 'host_type', 'port', 'path', 'query', 'fragment')
@no_type_check
def __new__(cls, url: Optional[str], **kwargs) -> object:
return str.__new__(cls, cls.build(**kwargs) if url is None else url)
def __init__(
self,
url: str,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
tld: Optional[str] = None,
host_type: str = 'domain',
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
) -> None:
str.__init__(url)
self.scheme = scheme
self.user = user
self.password = password
self.host = host
self.tld = tld
self.host_type = host_type
self.port = port
self.path = path
self.query = query
self.fragment = fragment
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**kwargs: str,
) -> str:
url = scheme + '://'
if user:
url += user
if password:
url += ':' + password
if user or password:
url += '@'
url += host
if port:
url += ':' + port
if path:
url += path
if query:
url += '?' + query
if fragment:
url += '#' + fragment
return url
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length, format='uri')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any, field: 'ModelField', config: 'BaseConfig') -> 'AnyUrl':
if value.__class__ == cls:
return value
value = str_validator(value)
if cls.strip_whitespace:
value = value.strip()
url: str = cast(str, constr_length_validator(value, field, config))
m = url_regex().match(url)
# the regex should always match, if it doesn't please report with details of the URL tried
assert m, 'URL regex failed unexpectedly'
parts = m.groupdict()
parts = cls.validate_parts(parts)
host, tld, host_type, rebuild = cls.validate_host(parts)
if m.end() != len(url):
raise errors.UrlExtraError(extra=url[m.end() :])
return cls(
None if rebuild else url,
scheme=parts['scheme'],
user=parts['user'],
password=parts['password'],
host=host,
tld=tld,
host_type=host_type,
port=parts['port'],
path=parts['path'],
query=parts['query'],
fragment=parts['fragment'],
)
@classmethod
def validate_parts(cls, parts: Dict[str, str]) -> Dict[str, str]:
"""
A method used to validate parts of an URL.
Could be overridden to set default values for parts if missing
"""
scheme = parts['scheme']
if scheme is None:
raise errors.UrlSchemeError()
if cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(cls.allowed_schemes)
port = parts['port']
if port is not None and int(port) > 65_535:
raise errors.UrlPortError()
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def validate_host(cls, parts: Dict[str, str]) -> Tuple[str, Optional[str], str, bool]:
host, tld, host_type, rebuild = None, None, None, False
for f in ('domain', 'ipv4', 'ipv6'):
host = parts[f]
if host:
host_type = f
break
if host is None:
raise errors.UrlHostError()
elif host_type == 'domain':
is_international = False
d = ascii_domain_regex().fullmatch(host)
if d is None:
d = int_domain_regex().fullmatch(host)
if d is None:
raise errors.UrlHostError()
is_international = True
tld = d.group('tld')
if tld is None and not is_international:
d = int_domain_regex().fullmatch(host)
tld = d.group('tld')
is_international = True
if tld is not None:
tld = tld[1:]
elif cls.tld_required:
raise errors.UrlHostTldError()
if is_international:
host_type = 'int_domain'
rebuild = True
host = host.encode('idna').decode('ascii')
if tld is not None:
tld = tld.encode('idna').decode('ascii')
return host, tld, host_type, rebuild # type: ignore
def __repr__(self) -> str:
extra = ', '.join(f'{n}={getattr(self, n)!r}' for n in self.__slots__ if getattr(self, n) is not None)
return f'{self.__class__.__name__}({super().__repr__()}, {extra})'
class AnyHttpUrl(AnyUrl):
allowed_schemes = {'http', 'https'}
class HttpUrl(AnyUrl):
allowed_schemes = {'http', 'https'}
tld_required = True
# https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers
max_length = 2083
class PostgresDsn(AnyUrl):
allowed_schemes = {'postgres', 'postgresql'}
user_required = True
class RedisDsn(AnyUrl):
allowed_schemes = {'redis', 'rediss'}
@classmethod
def validate_parts(cls, parts: Dict[str, str]) -> Dict[str, str]:
defaults = {
'domain': 'localhost' if not (parts['ipv4'] or parts['ipv6']) else '',
'port': '6379',
'path': '/0',
}
for key, value in defaults.items():
if not parts[key]:
parts[key] = value
return super().validate_parts(parts)
def stricturl(
*,
strip_whitespace: bool = True,
min_length: int = 1,
max_length: int = 2 ** 16,
tld_required: bool = True,
allowed_schemes: Optional[Union[FrozenSet[str], Set[str]]] = None,
) -> Type[AnyUrl]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
min_length=min_length,
max_length=max_length,
tld_required=tld_required,
allowed_schemes=allowed_schemes,
)
return type('UrlValue', (AnyUrl,), namespace)
def import_email_validator() -> None:
global email_validator
try:
import email_validator
except ImportError as e:
raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e
class EmailStr(str):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
# included here and below so the error happens straight away
import_email_validator()
yield str_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> str:
return validate_email(value)[1]
class NameEmail(Representation):
__slots__ = 'name', 'email'
def __init__(self, name: str, email: str):
self.name = name
self.email = email
def __eq__(self, other: Any) -> bool:
return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='name-email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
import_email_validator()
yield cls.validate
@classmethod
def validate(cls, value: Any) -> 'NameEmail':
if value.__class__ == cls:
return value
value = str_validator(value)
return cls(*validate_email(value))
def __str__(self) -> str:
return f'{self.name} <{self.email}>'
class IPvAnyAddress(_BaseAddress):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyaddress')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Union[str, bytes, int]) -> Union[IPv4Address, IPv6Address]:
try:
return IPv4Address(value)
except ValueError:
pass
try:
return IPv6Address(value)
except ValueError:
raise errors.IPvAnyAddressError()
class IPvAnyInterface(_BaseAddress):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyinterface')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Interface, IPv6Interface]:
try:
return IPv4Interface(value)
except ValueError:
pass
try:
return IPv6Interface(value)
except ValueError:
raise errors.IPvAnyInterfaceError()
class IPvAnyNetwork(_BaseNetwork): # type: ignore
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanynetwork')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Network, IPv6Network]:
# Assume IP Network is defined with a default value for ``strict`` argument.
# Define your own class if you want to specify network address check strictness.
try:
return IPv4Network(value)
except ValueError:
pass
try:
return IPv6Network(value)
except ValueError:
raise errors.IPvAnyNetworkError()
pretty_email_regex = re.compile(r'([\w ]*?) *<(.*)> *')
def validate_email(value: Union[str]) -> Tuple[str, str]:
"""
Brutally simple email address validation. Note unlike most email address validation
* raw ip address (literal) domain parts are not allowed.
* "John Doe <local_part@domain.com>" style "pretty" email addresses are processed
* the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
solution is really possible.
* spaces are striped from the beginning and end of addresses but no error is raised
See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
"""
if email_validator is None:
import_email_validator()
m = pretty_email_regex.fullmatch(value)
name: Optional[str] = None
if m:
name, value = m.groups()
email = value.strip()
try:
email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailNotValidError as e:
raise errors.EmailError() from e
at_index = email.index('@')
local_part = email[:at_index] # RFC 5321, local part must be case-sensitive.
global_part = email[at_index:].lower()
return name or local_part, local_part + global_part

View File

@@ -0,0 +1,66 @@
import json
import pickle
from enum import Enum
from pathlib import Path
from typing import Any, Callable, Union
from .types import StrBytes
class Protocol(str, Enum):
json = 'json'
pickle = 'pickle'
def load_str_bytes(
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
) -> Any:
if proto is None and content_type:
if content_type.endswith(('json', 'javascript')):
pass
elif allow_pickle and content_type.endswith('pickle'):
proto = Protocol.pickle
else:
raise TypeError(f'Unknown content-type: {content_type}')
proto = proto or Protocol.json
if proto == Protocol.json:
if isinstance(b, bytes):
b = b.decode(encoding)
return json_loads(b)
elif proto == Protocol.pickle:
if not allow_pickle:
raise RuntimeError('Trying to decode with pickle with allow_pickle=False')
bb = b if isinstance(b, bytes) else b.encode()
return pickle.loads(bb)
else:
raise TypeError(f'Unknown protocol: {proto}')
def load_file(
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
) -> Any:
path = Path(path)
b = path.read_bytes()
if content_type is None:
if path.suffix in ('.js', '.json'):
proto = Protocol.json
elif path.suffix == '.pkl':
proto = Protocol.pickle
return load_str_bytes(
b, proto=proto, content_type=content_type, encoding=encoding, allow_pickle=allow_pickle, json_loads=json_loads
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,79 @@
import json
from functools import lru_cache
from pathlib import Path
from typing import Any, Callable, Optional, Type, TypeVar, Union
from .parse import Protocol, load_file, load_str_bytes
from .types import StrBytes
from .typing import display_as_type
__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as')
NameFactory = Union[str, Callable[[Type[Any]], str]]
def _generate_parsing_type_name(type_: Any) -> str:
return f'ParsingModel[{display_as_type(type_)}]'
@lru_cache(maxsize=2048)
def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:
from pydantic.main import create_model
if type_name is None:
type_name = _generate_parsing_type_name
if not isinstance(type_name, str):
type_name = type_name(type_)
return create_model(type_name, __root__=(type_, ...))
T = TypeVar('T')
def parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:
model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]
return model_type(__root__=obj).__root__
def parse_file_as(
type_: Type[T],
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
type_name: Optional[NameFactory] = None,
) -> T:
obj = load_file(
path,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=json_loads,
)
return parse_obj_as(type_, obj, type_name=type_name)
def parse_raw_as(
type_: Type[T],
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
type_name: Optional[NameFactory] = None,
) -> T:
obj = load_str_bytes(
b,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=json_loads,
)
return parse_obj_as(type_, obj, type_name=type_name)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,399 @@
import sys
from enum import Enum
from typing import ( # type: ignore
TYPE_CHECKING,
AbstractSet,
Any,
ClassVar,
Dict,
Generator,
List,
Mapping,
NewType,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
_eval_type,
cast,
)
from typing_extensions import Annotated, Literal
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import GenericAlias # type: ignore
except ImportError:
# python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
GenericAlias = ()
if sys.version_info < (3, 7):
if TYPE_CHECKING:
class ForwardRef:
def __init__(self, arg: Any):
pass
def _eval_type(self, globalns: Any, localns: Any) -> Any:
pass
else:
from typing import _ForwardRef as ForwardRef
else:
from typing import ForwardRef
if sys.version_info < (3, 7):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._eval_type(globalns, localns)
elif sys.version_info < (3, 9):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._evaluate(globalns, localns)
else:
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
# Even though it is the right signature for python 3.9, mypy complains with
# `error: Too many arguments for "_evaluate" of "ForwardRef"` hence the cast...
return cast(Any, type_)._evaluate(globalns, localns, set())
if sys.version_info < (3, 7):
from typing import Callable as Callable
AnyCallable = Callable[..., Any]
NoArgAnyCallable = Callable[[], Any]
else:
from collections.abc import Callable as Callable
from typing import Callable as TypingCallable
AnyCallable = TypingCallable[..., Any]
NoArgAnyCallable = TypingCallable[[], Any]
# Annotated[...] is implemented by returning an instance of one of these classes, depending on
# python/typing_extensions version.
AnnotatedTypeNames = {'AnnotatedMeta', '_AnnotatedAlias'}
if sys.version_info < (3, 8):
def get_origin(t: Type[Any]) -> Optional[Type[Any]]:
if type(t).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm in py3.6
return getattr(t, '__origin__', None)
else:
from typing import get_origin as _typing_get_origin
def get_origin(tp: Type[Any]) -> Type[Any]:
"""
We can't directly use `typing.get_origin` since we need a fallback to support
custom generic classes like `ConstrainedList`
It should be useless once https://github.com/cython/cython/issues/3537 is
solved and https://github.com/samuelcolvin/pydantic/pull/1753 is merged.
"""
if type(tp).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm
return _typing_get_origin(tp) or getattr(tp, '__origin__', None)
if sys.version_info < (3, 7): # noqa: C901 (ignore complexity)
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Simplest get_args compatibility layer possible.
The Python 3.6 typing module does not have `_GenericAlias` so
this won't work for everything. In particular this will not
support the `generics` module (we don't support generic models in
python 3.6).
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
return getattr(t, '__args__', ())
elif sys.version_info < (3, 8): # noqa: C901
from typing import _GenericAlias
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Compatibility version of get_args for python 3.7.
Mostly compatible with the python 3.8 `typing` module version
and able to handle almost all use cases.
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
if isinstance(t, _GenericAlias):
res = t.__args__
if t.__origin__ is Callable and res and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return getattr(t, '__args__', ())
else:
from typing import get_args as _typing_get_args
def _generic_get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""
In python 3.9, `typing.Dict`, `typing.List`, ...
do have an empty `__args__` by default (instead of the generic ~T for example).
In order to still support `Dict` for example and consider it as `Dict[Any, Any]`,
we retrieve the `_nparams` value that tells us how many parameters it needs.
"""
if hasattr(tp, '_nparams'):
return (Any,) * tp._nparams
return ()
def get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if type(tp).__name__ in AnnotatedTypeNames:
return tp.__args__ + tp.__metadata__
# the fallback is needed for the same reasons as `get_origin` (see above)
return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp)
if TYPE_CHECKING:
from .fields import ModelField
TupleGenerator = Generator[Tuple[str, Any], None, None]
DictStrAny = Dict[str, Any]
DictAny = Dict[Any, Any]
SetStr = Set[str]
ListStr = List[str]
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
DictIntStrAny = Dict[IntStr, Any]
MappingIntStrAny = Mapping[IntStr, Any]
CallableGenerator = Generator[AnyCallable, None, None]
ReprArgs = Sequence[Tuple[Optional[str], Any]]
__all__ = (
'ForwardRef',
'Callable',
'AnyCallable',
'NoArgAnyCallable',
'NoneType',
'NONE_TYPES',
'display_as_type',
'resolve_annotations',
'is_callable_type',
'is_literal_type',
'all_literal_values',
'is_namedtuple',
'is_typeddict',
'is_new_type',
'new_type_supertype',
'is_classvar',
'update_field_forward_refs',
'TupleGenerator',
'DictStrAny',
'DictAny',
'SetStr',
'ListStr',
'IntStr',
'AbstractSetIntStr',
'DictIntStrAny',
'CallableGenerator',
'ReprArgs',
'CallableGenerator',
'GenericAlias',
'get_args',
'get_origin',
'typing_base',
)
NoneType = None.__class__
NONE_TYPES: Set[Any] = {None, NoneType, Literal[None]}
def display_as_type(v: Type[Any]) -> str:
if not isinstance(v, typing_base) and not isinstance(v, GenericAlias) and not isinstance(v, type):
v = v.__class__
if isinstance(v, type) and issubclass(v, Enum):
if issubclass(v, int):
return 'int'
elif issubclass(v, str):
return 'str'
else:
return 'enum'
if isinstance(v, GenericAlias):
# Generic alias are constructs like `list[int]`
return str(v).replace('typing.', '')
try:
return v.__name__
except AttributeError:
# happens with typing objects
return str(v).replace('typing.', '')
def resolve_annotations(raw_annotations: Dict[str, Type[Any]], module_name: Optional[str]) -> Dict[str, Type[Any]]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
base_globals: Optional[Dict[str, Any]] = None
if module_name:
try:
module = sys.modules[module_name]
except KeyError:
# happens occasionally, see https://github.com/samuelcolvin/pydantic/issues/2363
pass
else:
base_globals = module.__dict__
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
if sys.version_info >= (3, 7):
value = ForwardRef(value, is_argument=False)
else:
value = ForwardRef(value)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: Type[Any]) -> bool:
return type_ is Callable or get_origin(type_) is Callable
if sys.version_info >= (3, 7):
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and get_origin(type_) is Literal
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return get_args(type_)
else:
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and hasattr(type_, '__values__') and type_ == Literal[type_.__values__]
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return type_.__values__
def all_literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
"""
This method is used to retrieve all Literal values as
Literal can be used recursively (see https://www.python.org/dev/peps/pep-0586)
e.g. `Literal[Literal[Literal[1, 2, 3], "foo"], 5, None]`
"""
if not is_literal_type(type_):
return (type_,)
values = literal_values(type_)
return tuple(x for value in values for x in all_literal_values(value))
def is_namedtuple(type_: Type[Any]) -> bool:
"""
Check if a given class is a named tuple.
It can be either a `typing.NamedTuple` or `collections.namedtuple`
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, tuple) and hasattr(type_, '_fields')
def is_typeddict(type_: Type[Any]) -> bool:
"""
Check if a given class is a typed dict (from `typing` or `typing_extensions`)
In 3.10, there will be a public method (https://docs.python.org/3.10/library/typing.html#typing.is_typeddict)
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, dict) and hasattr(type_, '__total__')
test_type = NewType('test_type', str)
def is_new_type(type_: Type[Any]) -> bool:
"""
Check whether type_ was created using typing.NewType
"""
return isinstance(type_, test_type.__class__) and hasattr(type_, '__supertype__') # type: ignore
def new_type_supertype(type_: Type[Any]) -> Type[Any]:
while hasattr(type_, '__supertype__'):
type_ = type_.__supertype__
return type_
def _check_classvar(v: Optional[Type[Any]]) -> bool:
if v is None:
return False
return v.__class__ == ClassVar.__class__ and (sys.version_info < (3, 7) or getattr(v, '_name', None) == 'ClassVar')
def is_classvar(ann_type: Type[Any]) -> bool:
return _check_classvar(ann_type) or _check_classvar(get_origin(ann_type))
def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this ModelField, globalns and localns.
"""
if field.type_.__class__ == ForwardRef:
field.type_ = evaluate_forwardref(field.type_, globalns, localns or None)
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns)
def get_class(type_: Type[Any]) -> Union[None, bool, Type[Any]]:
"""
Tries to get the class of a Type[T] annotation. Returns True if Type is used
without brackets. Otherwise returns None.
"""
try:
origin = get_origin(type_)
if origin is None: # Python 3.6
origin = type_
if issubclass(origin, Type): # type: ignore
if not get_args(type_) or not isinstance(get_args(type_)[0], type):
return True
return get_args(type_)[0]
except (AttributeError, TypeError):
pass
return None

View File

@@ -0,0 +1,666 @@
import warnings
import weakref
from collections import OrderedDict, defaultdict, deque
from copy import deepcopy
from itertools import islice, zip_longest
from types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
no_type_check,
)
from .typing import GenericAlias, NoneType, display_as_type
from .version import version_info
if TYPE_CHECKING:
from inspect import Signature
from pathlib import Path
from .dataclasses import Dataclass # noqa: F401
from .fields import ModelField # noqa: F401
from .main import BaseConfig, BaseModel # noqa: F401
from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs # noqa: F401
__all__ = (
'import_string',
'sequence_like',
'validate_field_name',
'lenient_issubclass',
'in_ipython',
'deep_update',
'update_not_none',
'almost_equal_floats',
'get_model',
'to_camel',
'is_valid_field',
'smart_deepcopy',
'PyObjectStr',
'Representation',
'GetterDict',
'ValueItems',
'version_info', # required here to match behaviour in v1.3
'ClassAttribute',
'path_type',
'ROOT_KEY',
)
ROOT_KEY = '__root__'
# these are types that are returned unchanged by deepcopy
IMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = {
int,
float,
complex,
str,
bool,
bytes,
type,
NoneType,
FunctionType,
BuiltinFunctionType,
LambdaType,
weakref.ref,
CodeType,
# note: including ModuleType will differ from behaviour of deepcopy by not producing error.
# It might be not a good idea in general, but considering that this function used only internally
# against default values of fields, this will allow to actually have a field with module as default value
ModuleType,
NotImplemented.__class__,
Ellipsis.__class__,
}
# these are types that if empty, might be copied with simple copy() instead of deepcopy()
BUILTIN_COLLECTIONS: Set[Type[Any]] = {
list,
set,
tuple,
frozenset,
dict,
OrderedDict,
defaultdict,
deque,
}
def import_string(dotted_path: str) -> Any:
"""
Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import fails.
"""
from importlib import import_module
try:
module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)
except ValueError as e:
raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as e:
raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
def truncate(v: Union[str], *, max_len: int = 80) -> str:
"""
Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long
"""
warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)
if isinstance(v, str) and len(v) > (max_len - 2):
# -3 so quote + string + … + quote has correct length
return (v[: (max_len - 3)] + '').__repr__()
try:
v = v.__repr__()
except TypeError:
v = v.__class__.__repr__(v) # in case v is a type
if len(v) > max_len:
v = v[: max_len - 1] + ''
return v
def sequence_like(v: Type[Any]) -> bool:
return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque))
def validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:
"""
Ensure that the field's name does not shadow an existing attribute of the model.
"""
for base in bases:
if getattr(base, field_name, None):
raise NameError(
f'Field name "{field_name}" shadows a BaseModel attribute; '
f'use a different field name with "alias=\'{field_name}\'".'
)
def lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...]]) -> bool:
try:
return isinstance(cls, type) and issubclass(cls, class_or_tuple)
except TypeError:
if isinstance(cls, GenericAlias):
return False
raise # pragma: no cover
def in_ipython() -> bool:
"""
Check whether we're in an ipython environment, including jupyter notebooks.
"""
try:
eval('__IPYTHON__')
except NameError:
return False
else: # pragma: no cover
return True
KeyType = TypeVar('KeyType')
def deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]:
updated_mapping = mapping.copy()
for updating_mapping in updating_mappings:
for k, v in updating_mapping.items():
if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict):
updated_mapping[k] = deep_update(updated_mapping[k], v)
else:
updated_mapping[k] = v
return updated_mapping
def update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:
mapping.update({k: v for k, v in update.items() if v is not None})
def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:
"""
Return True if two floats are almost equal
"""
return abs(value_1 - value_2) <= delta
def generate_model_signature(
init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']
) -> 'Signature':
"""
Generate signature for model based on its fields
"""
from inspect import Parameter, Signature, signature
present_params = signature(init).parameters.values()
merged_params: Dict[str, Parameter] = {}
var_kw = None
use_var_kw = False
for param in islice(present_params, 1, None): # skip self arg
if param.kind is param.VAR_KEYWORD:
var_kw = param
continue
merged_params[param.name] = param
if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through
allow_names = config.allow_population_by_field_name
for field_name, field in fields.items():
param_name = field.alias
if field_name in merged_params or param_name in merged_params:
continue
elif not param_name.isidentifier():
if allow_names and field_name.isidentifier():
param_name = field_name
else:
use_var_kw = True
continue
# TODO: replace annotation with actual expected types once #1055 solved
kwargs = {'default': field.default} if not field.required else {}
merged_params[param_name] = Parameter(
param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs
)
if config.extra is config.extra.allow:
use_var_kw = True
if var_kw and use_var_kw:
# Make sure the parameter for extra kwargs
# does not have the same name as a field
default_model_signature = [
('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),
('data', Parameter.VAR_KEYWORD),
]
if [(p.name, p.kind) for p in present_params] == default_model_signature:
# if this is the standard model signature, use extra_data as the extra args name
var_kw_name = 'extra_data'
else:
# else start from var_kw
var_kw_name = var_kw.name
# generate a name that's definitely unique
while var_kw_name in fields:
var_kw_name += '_'
merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)
return Signature(parameters=list(merged_params.values()), return_annotation=None)
def get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']:
from .main import BaseModel # noqa: F811
try:
model_cls = obj.__pydantic_model__ # type: ignore
except AttributeError:
model_cls = obj
if not issubclass(model_cls, BaseModel):
raise TypeError('Unsupported type, must be either BaseModel or dataclass')
return model_cls
def to_camel(string: str) -> str:
return ''.join(word.capitalize() for word in string.split('_'))
T = TypeVar('T')
def unique_list(input_list: Union[List[T], Tuple[T, ...]]) -> List[T]:
"""
Make a list unique while maintaining order.
"""
result = []
unique_set = set()
for v in input_list:
if v not in unique_set:
unique_set.add(v)
result.append(v)
return result
def update_normalized_all(
item: Union['AbstractSetIntStr', 'MappingIntStrAny'],
all_items: Union['AbstractSetIntStr', 'MappingIntStrAny'],
) -> Union['AbstractSetIntStr', 'MappingIntStrAny']:
"""
Update item based on what all items contains.
The update is done based on these cases:
- if both arguments are dicts then each key-value pair existing in ``all_items`` is merged into ``item``,
while the rest of the key-value pairs are updated recursively with this function.
- if both arguments are sets then they are just merged.
- if ``item`` is a dictionary and ``all_items`` is a set then all values of it are added to ``item`` as
``key: ...``.
- if ``item`` is set and ``all_items`` is a dictionary, then ``item`` is converted to a dictionary and then the
key-value pairs of ``all_items`` are merged in it.
During recursive calls, there is a case where ``all_items`` can be an Ellipsis, in which case the ``item`` is
returned as is.
"""
if not item:
return all_items
if isinstance(item, dict) and isinstance(all_items, dict):
item = dict(item)
item.update({k: update_normalized_all(item[k], v) for k, v in all_items.items() if k in item})
item.update({k: v for k, v in all_items.items() if k not in item})
return item
if isinstance(item, set) and isinstance(all_items, set):
item = set(item)
item.update(all_items)
return item
if isinstance(item, dict) and isinstance(all_items, set):
item = dict(item)
item.update({k: ... for k in all_items if k not in item})
return item
if isinstance(item, set) and isinstance(all_items, dict):
item = {k: ... for k in item}
item.update({k: v for k, v in all_items.items() if k not in item})
return item
# Case when item or all_items is ... (in recursive calls).
return item
class PyObjectStr(str):
"""
String class where repr doesn't include quotes. Useful with Representation when you want to return a string
representation of something that valid (or pseudo-valid) python.
"""
def __repr__(self) -> str:
return str(self)
class Representation:
"""
Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.
__pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations
of objects.
"""
__slots__: Tuple[str, ...] = tuple()
def __repr_args__(self) -> 'ReprArgs':
"""
Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.
Can either return:
* name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`
* or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`
"""
attrs = ((s, getattr(self, s)) for s in self.__slots__)
return [(a, v) for a, v in attrs if v is not None]
def __repr_name__(self) -> str:
"""
Name of the instance's class, used in __repr__.
"""
return self.__class__.__name__
def __repr_str__(self, join_str: str) -> str:
return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())
def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:
"""
Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects
"""
yield self.__repr_name__() + '('
yield 1
for name, value in self.__repr_args__():
if name is not None:
yield name + '='
yield fmt(value)
yield ','
yield 0
yield -1
yield ')'
def __str__(self) -> str:
return self.__repr_str__(' ')
def __repr__(self) -> str:
return f'{self.__repr_name__()}({self.__repr_str__(", ")})'
class GetterDict(Representation):
"""
Hack to make object's smell just enough like dicts for validate_model.
We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.
"""
__slots__ = ('_obj',)
def __init__(self, obj: Any):
self._obj = obj
def __getitem__(self, key: str) -> Any:
try:
return getattr(self._obj, key)
except AttributeError as e:
raise KeyError(key) from e
def get(self, key: Any, default: Any = None) -> Any:
return getattr(self._obj, key, default)
def extra_keys(self) -> Set[Any]:
"""
We don't want to get any other attributes of obj if the model didn't explicitly ask for them
"""
return set()
def keys(self) -> List[Any]:
"""
Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python
dictionaries.
"""
return list(self)
def values(self) -> List[Any]:
return [self[k] for k in self]
def items(self) -> Iterator[Tuple[str, Any]]:
for k in self:
yield k, self.get(k)
def __iter__(self) -> Iterator[str]:
for name in dir(self._obj):
if not name.startswith('_'):
yield name
def __len__(self) -> int:
return sum(1 for _ in self)
def __contains__(self, item: Any) -> bool:
return item in self.keys()
def __eq__(self, other: Any) -> bool:
return dict(self) == dict(other.items())
def __repr_args__(self) -> 'ReprArgs':
return [(None, dict(self))]
def __repr_name__(self) -> str:
return f'GetterDict[{display_as_type(self._obj)}]'
class ValueItems(Representation):
"""
Class for more convenient calculation of excluded or included fields on values.
"""
__slots__ = ('_items', '_type')
def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:
if TYPE_CHECKING:
self._items: Union['AbstractSetIntStr', 'MappingIntStrAny']
self._type: Type[Union[set, dict]] # type: ignore
# For further type checks speed-up
if isinstance(items, Mapping):
self._type = dict
elif isinstance(items, AbstractSet):
self._type = set
else:
raise TypeError(f'Unexpected type of exclude value {items.__class__}')
if isinstance(value, (list, tuple)):
items = self._normalize_indexes(items, len(value))
self._items = items
@no_type_check
def is_excluded(self, item: Any) -> bool:
"""
Check if item is fully excluded
(value considered excluded if self._type is set and item contained in self._items
or self._type is dict and self._items.get(item) is ...
:param item: key or index of a value
"""
if self._type is set:
return item in self._items
return self._items.get(item) is ...
@no_type_check
def is_included(self, item: Any) -> bool:
"""
Check if value is contained in self._items
:param item: key or index of value
"""
return item in self._items
@no_type_check
def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:
"""
:param e: key or index of element on value
:return: raw values for elemet if self._items is dict and contain needed element
"""
if self._type is dict:
item = self._items.get(e)
return item if item is not ... else None
return None
@no_type_check
def _normalize_indexes(
self, items: Union['AbstractSetIntStr', 'MappingIntStrAny'], v_length: int
) -> Union['AbstractSetIntStr', 'DictIntStrAny']:
"""
:param items: dict or set of indexes which will be normalized
:param v_length: length of sequence indexes of which will be
>>> self._normalize_indexes({0, -2, -1}, 4)
{0, 2, 3}
>>> self._normalize_indexes({'__all__'}, 4)
{0, 1, 2, 3}
"""
if any(not isinstance(i, int) and i != '__all__' for i in items):
raise TypeError(
'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '
'expected integer keys or keyword "__all__"'
)
if self._type is set:
if '__all__' in items:
if items != {'__all__'}:
raise ValueError('set with keyword "__all__" must not contain other elements')
return {i for i in range(v_length)}
return {v_length + i if i < 0 else i for i in items}
else:
all_items = items.get('__all__')
for i, v in items.items():
if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or v is ...):
raise TypeError(f'Unexpected type of exclude value for index "{i}" {v.__class__}')
normalized_items = {v_length + i if i < 0 else i: v for i, v in items.items() if i != '__all__'}
if all_items:
default: Type[Union[Set[Any], Dict[Any, Any]]]
if isinstance(all_items, Mapping):
default = dict
elif isinstance(all_items, AbstractSet):
default = set
else:
for i in range(v_length):
normalized_items.setdefault(i, ...)
return normalized_items
for i in range(v_length):
normalized_item = normalized_items.setdefault(i, default())
if normalized_item is not ...:
normalized_items[i] = update_normalized_all(normalized_item, all_items)
return normalized_items
def __repr_args__(self) -> 'ReprArgs':
return [(None, self._items)]
class ClassAttribute:
"""
Hide class attribute from its instances
"""
__slots__ = (
'name',
'value',
)
def __init__(self, name: str, value: Any) -> None:
self.name = name
self.value = value
def __get__(self, instance: Any, owner: Type[Any]) -> None:
if instance is None:
return self.value
raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only')
path_types = {
'is_dir': 'directory',
'is_file': 'file',
'is_mount': 'mount point',
'is_symlink': 'symlink',
'is_block_device': 'block device',
'is_char_device': 'char device',
'is_fifo': 'FIFO',
'is_socket': 'socket',
}
def path_type(p: 'Path') -> str:
"""
Find out what sort of thing a path is.
"""
assert p.exists(), 'path does not exist'
for method, name in path_types.items():
if getattr(p, method)():
return name
return 'unknown'
Obj = TypeVar('Obj')
def smart_deepcopy(obj: Obj) -> Obj:
"""
Return type as is for immutable built-in types
Use obj.copy() for built-in empty collections
Use copy.deepcopy() for non-empty collections and unknown objects
"""
obj_type = obj.__class__
if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES:
return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway
elif not obj and obj_type in BUILTIN_COLLECTIONS:
# faster way for empty collections, no need to copy its members
return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method
return deepcopy(obj) # slowest way when we actually might need a deepcopy
def is_valid_field(name: str) -> bool:
if not name.startswith('_'):
return True
return ROOT_KEY == name
def is_valid_private_name(name: str) -> bool:
return not is_valid_field(name) and name not in {
'__annotations__',
'__classcell__',
'__doc__',
'__module__',
'__orig_bases__',
'__qualname__',
}
_EMPTY = object()
def all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool:
"""
Check that the items of `left` are the same objects as those in `right`.
>>> a, b = object(), object()
>>> all_identical([a, b, a], [a, b, a])
True
>>> all_identical([a, b, [a]], [a, b, [a]]) # new list object, while "equal" is not "identical"
False
"""
for left_item, right_item in zip_longest(left, right, fillvalue=_EMPTY):
if left_item is not right_item:
return False
return True

View File

@@ -0,0 +1,715 @@
import re
from collections import OrderedDict, deque
from collections.abc import Hashable
from datetime import date, datetime, time, timedelta
from decimal import Decimal, DecimalException
from enum import Enum, IntEnum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Deque,
Dict,
FrozenSet,
Generator,
List,
NamedTuple,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from uuid import UUID
from typing_extensions import Literal
from . import errors
from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
from .typing import (
NONE_TYPES,
AnyCallable,
ForwardRef,
all_literal_values,
display_as_type,
get_class,
is_callable_type,
is_literal_type,
is_namedtuple,
is_typeddict,
)
from .utils import almost_equal_floats, lenient_issubclass, sequence_like
if TYPE_CHECKING:
from .annotated_types import TypedDict
from .fields import ModelField
from .main import BaseConfig
from .types import ConstrainedDecimal, ConstrainedFloat, ConstrainedInt
ConstrainedNumber = Union[ConstrainedDecimal, ConstrainedFloat, ConstrainedInt]
AnyOrderedDict = OrderedDict[Any, Any]
Number = Union[int, float, Decimal]
StrBytes = Union[str, bytes]
def str_validator(v: Any) -> Union[str]:
if isinstance(v, str):
if isinstance(v, Enum):
return v.value
else:
return v
elif isinstance(v, (float, int, Decimal)):
# is there anything else we want to add here? If you think so, create an issue.
return str(v)
elif isinstance(v, (bytes, bytearray)):
return v.decode()
else:
raise errors.StrError()
def strict_str_validator(v: Any) -> Union[str]:
if isinstance(v, str):
return v
raise errors.StrError()
def bytes_validator(v: Any) -> bytes:
if isinstance(v, bytes):
return v
elif isinstance(v, bytearray):
return bytes(v)
elif isinstance(v, str):
return v.encode()
elif isinstance(v, (float, int, Decimal)):
return str(v).encode()
else:
raise errors.BytesError()
def strict_bytes_validator(v: Any) -> Union[bytes]:
if isinstance(v, bytes):
return v
elif isinstance(v, bytearray):
return bytes(v)
else:
raise errors.BytesError()
BOOL_FALSE = {0, '0', 'off', 'f', 'false', 'n', 'no'}
BOOL_TRUE = {1, '1', 'on', 't', 'true', 'y', 'yes'}
def bool_validator(v: Any) -> bool:
if v is True or v is False:
return v
if isinstance(v, bytes):
v = v.decode()
if isinstance(v, str):
v = v.lower()
try:
if v in BOOL_TRUE:
return True
if v in BOOL_FALSE:
return False
except TypeError:
raise errors.BoolError()
raise errors.BoolError()
def int_validator(v: Any) -> int:
if isinstance(v, int) and not (v is True or v is False):
return v
try:
return int(v)
except (TypeError, ValueError):
raise errors.IntegerError()
def strict_int_validator(v: Any) -> int:
if isinstance(v, int) and not (v is True or v is False):
return v
raise errors.IntegerError()
def float_validator(v: Any) -> float:
if isinstance(v, float):
return v
try:
return float(v)
except (TypeError, ValueError):
raise errors.FloatError()
def strict_float_validator(v: Any) -> float:
if isinstance(v, float):
return v
raise errors.FloatError()
def number_multiple_validator(v: 'Number', field: 'ModelField') -> 'Number':
field_type: ConstrainedNumber = field.type_
if field_type.multiple_of is not None:
mod = float(v) / float(field_type.multiple_of) % 1
if not almost_equal_floats(mod, 0.0) and not almost_equal_floats(mod, 1.0):
raise errors.NumberNotMultipleError(multiple_of=field_type.multiple_of)
return v
def number_size_validator(v: 'Number', field: 'ModelField') -> 'Number':
field_type: ConstrainedNumber = field.type_
if field_type.gt is not None and not v > field_type.gt:
raise errors.NumberNotGtError(limit_value=field_type.gt)
elif field_type.ge is not None and not v >= field_type.ge:
raise errors.NumberNotGeError(limit_value=field_type.ge)
if field_type.lt is not None and not v < field_type.lt:
raise errors.NumberNotLtError(limit_value=field_type.lt)
if field_type.le is not None and not v <= field_type.le:
raise errors.NumberNotLeError(limit_value=field_type.le)
return v
def constant_validator(v: 'Any', field: 'ModelField') -> 'Any':
"""Validate ``const`` fields.
The value provided for a ``const`` field must be equal to the default value
of the field. This is to support the keyword of the same name in JSON
Schema.
"""
if v != field.default:
raise errors.WrongConstantError(given=v, permitted=[field.default])
return v
def anystr_length_validator(v: 'StrBytes', config: 'BaseConfig') -> 'StrBytes':
v_len = len(v)
min_length = config.min_anystr_length
if min_length is not None and v_len < min_length:
raise errors.AnyStrMinLengthError(limit_value=min_length)
max_length = config.max_anystr_length
if max_length is not None and v_len > max_length:
raise errors.AnyStrMaxLengthError(limit_value=max_length)
return v
def anystr_strip_whitespace(v: 'StrBytes') -> 'StrBytes':
return v.strip()
def anystr_lower(v: 'StrBytes') -> 'StrBytes':
return v.lower()
def ordered_dict_validator(v: Any) -> 'AnyOrderedDict':
if isinstance(v, OrderedDict):
return v
try:
return OrderedDict(v)
except (TypeError, ValueError):
raise errors.DictError()
def dict_validator(v: Any) -> Dict[Any, Any]:
if isinstance(v, dict):
return v
try:
return dict(v)
except (TypeError, ValueError):
raise errors.DictError()
def list_validator(v: Any) -> List[Any]:
if isinstance(v, list):
return v
elif sequence_like(v):
return list(v)
else:
raise errors.ListError()
def tuple_validator(v: Any) -> Tuple[Any, ...]:
if isinstance(v, tuple):
return v
elif sequence_like(v):
return tuple(v)
else:
raise errors.TupleError()
def set_validator(v: Any) -> Set[Any]:
if isinstance(v, set):
return v
elif sequence_like(v):
return set(v)
else:
raise errors.SetError()
def frozenset_validator(v: Any) -> FrozenSet[Any]:
if isinstance(v, frozenset):
return v
elif sequence_like(v):
return frozenset(v)
else:
raise errors.FrozenSetError()
def deque_validator(v: Any) -> Deque[Any]:
if isinstance(v, deque):
return v
elif sequence_like(v):
return deque(v)
else:
raise errors.DequeError()
def enum_member_validator(v: Any, field: 'ModelField', config: 'BaseConfig') -> Enum:
try:
enum_v = field.type_(v)
except ValueError:
# field.type_ should be an enum, so will be iterable
raise errors.EnumMemberError(enum_values=list(field.type_))
return enum_v.value if config.use_enum_values else enum_v
def uuid_validator(v: Any, field: 'ModelField') -> UUID:
try:
if isinstance(v, str):
v = UUID(v)
elif isinstance(v, (bytes, bytearray)):
try:
v = UUID(v.decode())
except ValueError:
# 16 bytes in big-endian order as the bytes argument fail
# the above check
v = UUID(bytes=v)
except ValueError:
raise errors.UUIDError()
if not isinstance(v, UUID):
raise errors.UUIDError()
required_version = getattr(field.type_, '_required_version', None)
if required_version and v.version != required_version:
raise errors.UUIDVersionError(required_version=required_version)
return v
def decimal_validator(v: Any) -> Decimal:
if isinstance(v, Decimal):
return v
elif isinstance(v, (bytes, bytearray)):
v = v.decode()
v = str(v).strip()
try:
v = Decimal(v)
except DecimalException:
raise errors.DecimalError()
if not v.is_finite():
raise errors.DecimalIsNotFiniteError()
return v
def hashable_validator(v: Any) -> Hashable:
if isinstance(v, Hashable):
return v
raise errors.HashableError()
def ip_v4_address_validator(v: Any) -> IPv4Address:
if isinstance(v, IPv4Address):
return v
try:
return IPv4Address(v)
except ValueError:
raise errors.IPv4AddressError()
def ip_v6_address_validator(v: Any) -> IPv6Address:
if isinstance(v, IPv6Address):
return v
try:
return IPv6Address(v)
except ValueError:
raise errors.IPv6AddressError()
def ip_v4_network_validator(v: Any) -> IPv4Network:
"""
Assume IPv4Network initialised with a default ``strict`` argument
See more:
https://docs.python.org/library/ipaddress.html#ipaddress.IPv4Network
"""
if isinstance(v, IPv4Network):
return v
try:
return IPv4Network(v)
except ValueError:
raise errors.IPv4NetworkError()
def ip_v6_network_validator(v: Any) -> IPv6Network:
"""
Assume IPv6Network initialised with a default ``strict`` argument
See more:
https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network
"""
if isinstance(v, IPv6Network):
return v
try:
return IPv6Network(v)
except ValueError:
raise errors.IPv6NetworkError()
def ip_v4_interface_validator(v: Any) -> IPv4Interface:
if isinstance(v, IPv4Interface):
return v
try:
return IPv4Interface(v)
except ValueError:
raise errors.IPv4InterfaceError()
def ip_v6_interface_validator(v: Any) -> IPv6Interface:
if isinstance(v, IPv6Interface):
return v
try:
return IPv6Interface(v)
except ValueError:
raise errors.IPv6InterfaceError()
def path_validator(v: Any) -> Path:
if isinstance(v, Path):
return v
try:
return Path(v)
except TypeError:
raise errors.PathError()
def path_exists_validator(v: Any) -> Path:
if not v.exists():
raise errors.PathNotExistsError(path=v)
return v
def callable_validator(v: Any) -> AnyCallable:
"""
Perform a simple check if the value is callable.
Note: complete matching of argument type hints and return types is not performed
"""
if callable(v):
return v
raise errors.CallableError(value=v)
def enum_validator(v: Any) -> Enum:
if isinstance(v, Enum):
return v
raise errors.EnumError(value=v)
def int_enum_validator(v: Any) -> IntEnum:
if isinstance(v, IntEnum):
return v
raise errors.IntEnumError(value=v)
def make_literal_validator(type_: Any) -> Callable[[Any], Any]:
permitted_choices = all_literal_values(type_)
# To have a O(1) complexity and still return one of the values set inside the `Literal`,
# we create a dict with the set values (a set causes some problems with the way intersection works).
# In some cases the set value and checked value can indeed be different (see `test_literal_validator_str_enum`)
allowed_choices = {v: v for v in permitted_choices}
def literal_validator(v: Any) -> Any:
try:
return allowed_choices[v]
except KeyError:
raise errors.WrongConstantError(given=v, permitted=permitted_choices)
return literal_validator
def constr_length_validator(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
v_len = len(v)
min_length = field.type_.min_length or config.min_anystr_length
if min_length is not None and v_len < min_length:
raise errors.AnyStrMinLengthError(limit_value=min_length)
max_length = field.type_.max_length or config.max_anystr_length
if max_length is not None and v_len > max_length:
raise errors.AnyStrMaxLengthError(limit_value=max_length)
return v
def constr_strip_whitespace(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
strip_whitespace = field.type_.strip_whitespace or config.anystr_strip_whitespace
if strip_whitespace:
v = v.strip()
return v
def constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
lower = field.type_.to_lower or config.anystr_lower
if lower:
v = v.lower()
return v
def validate_json(v: Any, config: 'BaseConfig') -> Any:
if v is None:
# pass None through to other validators
return v
try:
return config.json_loads(v) # type: ignore
except ValueError:
raise errors.JsonError()
except TypeError:
raise errors.JsonTypeError()
T = TypeVar('T')
def make_arbitrary_type_validator(type_: Type[T]) -> Callable[[T], T]:
def arbitrary_type_validator(v: Any) -> T:
if isinstance(v, type_):
return v
raise errors.ArbitraryTypeError(expected_arbitrary_type=type_)
return arbitrary_type_validator
def make_class_validator(type_: Type[T]) -> Callable[[Any], Type[T]]:
def class_validator(v: Any) -> Type[T]:
if lenient_issubclass(v, type_):
return v
raise errors.SubclassError(expected_class=type_)
return class_validator
def any_class_validator(v: Any) -> Type[T]:
if isinstance(v, type):
return v
raise errors.ClassError()
def none_validator(v: Any) -> 'Literal[None]':
if v is None:
return v
raise errors.NotNoneError()
def pattern_validator(v: Any) -> Pattern[str]:
if isinstance(v, Pattern):
return v
str_value = str_validator(v)
try:
return re.compile(str_value)
except re.error:
raise errors.PatternError()
NamedTupleT = TypeVar('NamedTupleT', bound=NamedTuple)
def make_namedtuple_validator(namedtuple_cls: Type[NamedTupleT]) -> Callable[[Tuple[Any, ...]], NamedTupleT]:
from .annotated_types import create_model_from_namedtuple
NamedTupleModel = create_model_from_namedtuple(namedtuple_cls)
namedtuple_cls.__pydantic_model__ = NamedTupleModel # type: ignore[attr-defined]
def namedtuple_validator(values: Tuple[Any, ...]) -> NamedTupleT:
annotations = NamedTupleModel.__annotations__
if len(values) > len(annotations):
raise errors.ListMaxLengthError(limit_value=len(annotations))
dict_values: Dict[str, Any] = dict(zip(annotations, values))
validated_dict_values: Dict[str, Any] = dict(NamedTupleModel(**dict_values))
return namedtuple_cls(**validated_dict_values)
return namedtuple_validator
def make_typeddict_validator(
typeddict_cls: Type['TypedDict'], config: Type['BaseConfig']
) -> Callable[[Any], Dict[str, Any]]:
from .annotated_types import create_model_from_typeddict
TypedDictModel = create_model_from_typeddict(typeddict_cls, __config__=config)
typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined]
def typeddict_validator(values: 'TypedDict') -> Dict[str, Any]:
return TypedDictModel.parse_obj(values).dict(exclude_unset=True)
return typeddict_validator
class IfConfig:
def __init__(self, validator: AnyCallable, *config_attr_names: str) -> None:
self.validator = validator
self.config_attr_names = config_attr_names
def check(self, config: Type['BaseConfig']) -> bool:
return any(getattr(config, name) not in {None, False} for name in self.config_attr_names)
# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same,
# IPv4Interface before IPv4Address, etc
_VALIDATORS: List[Tuple[Type[Any], List[Any]]] = [
(IntEnum, [int_validator, enum_member_validator]),
(Enum, [enum_member_validator]),
(
str,
[
str_validator,
IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),
IfConfig(anystr_lower, 'anystr_lower'),
IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),
],
),
(
bytes,
[
bytes_validator,
IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),
IfConfig(anystr_lower, 'anystr_lower'),
IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),
],
),
(bool, [bool_validator]),
(int, [int_validator]),
(float, [float_validator]),
(Path, [path_validator]),
(datetime, [parse_datetime]),
(date, [parse_date]),
(time, [parse_time]),
(timedelta, [parse_duration]),
(OrderedDict, [ordered_dict_validator]),
(dict, [dict_validator]),
(list, [list_validator]),
(tuple, [tuple_validator]),
(set, [set_validator]),
(frozenset, [frozenset_validator]),
(deque, [deque_validator]),
(UUID, [uuid_validator]),
(Decimal, [decimal_validator]),
(IPv4Interface, [ip_v4_interface_validator]),
(IPv6Interface, [ip_v6_interface_validator]),
(IPv4Address, [ip_v4_address_validator]),
(IPv6Address, [ip_v6_address_validator]),
(IPv4Network, [ip_v4_network_validator]),
(IPv6Network, [ip_v6_network_validator]),
]
def find_validators( # noqa: C901 (ignore complexity)
type_: Type[Any], config: Type['BaseConfig']
) -> Generator[AnyCallable, None, None]:
from .dataclasses import is_builtin_dataclass, make_dataclass_validator
if type_ is Any:
return
type_type = type_.__class__
if type_type == ForwardRef or type_type == TypeVar:
return
if type_ in NONE_TYPES:
yield none_validator
return
if type_ is Pattern:
yield pattern_validator
return
if type_ is Hashable:
yield hashable_validator
return
if is_callable_type(type_):
yield callable_validator
return
if is_literal_type(type_):
yield make_literal_validator(type_)
return
if is_builtin_dataclass(type_):
yield from make_dataclass_validator(type_, config)
return
if type_ is Enum:
yield enum_validator
return
if type_ is IntEnum:
yield int_enum_validator
return
if is_namedtuple(type_):
yield tuple_validator
yield make_namedtuple_validator(type_)
return
if is_typeddict(type_):
yield make_typeddict_validator(type_, config)
return
class_ = get_class(type_)
if class_ is not None:
if isinstance(class_, type):
yield make_class_validator(class_)
else:
yield any_class_validator
return
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
for v in validators:
if isinstance(v, IfConfig):
if v.check(config):
yield v.validator
else:
yield v
return
except TypeError:
raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})')
if config.arbitrary_types_allowed:
yield make_arbitrary_type_validator(type_)
else:
raise RuntimeError(f'no validator found for {type_}, see `arbitrary_types_allowed` in Config')

View File

@@ -0,0 +1,30 @@
__all__ = 'VERSION', 'version_info'
VERSION = '1.8.1'
def version_info() -> str:
import platform
import sys
from importlib import import_module
from pathlib import Path
from .main import compiled
optional_deps = []
for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):
try:
import_module(p.replace('-', '_'))
except ImportError:
continue
optional_deps.append(p)
info = {
'pydantic version': VERSION,
'pydantic compiled': compiled,
'install path': Path(__file__).resolve().parent,
'python version': sys.version,
'platform': platform.platform(),
'optional deps. installed': optional_deps,
}
return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())