diff --git a/.venv/bin/Activate.ps1 b/.venv/bin/Activate.ps1 new file mode 100644 index 0000000..b49d77b --- /dev/null +++ b/.venv/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/.venv/bin/about b/.venv/bin/about new file mode 100755 index 0000000..b995ed5 --- /dev/null +++ b/.venv/bin/about @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from attributecode.cmd import about +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(about()) diff --git a/.venv/bin/activate b/.venv/bin/activate new file mode 100644 index 0000000..2da4891 --- /dev/null +++ b/.venv/bin/activate @@ -0,0 +1,63 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # Call hash to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + hash -r 2> /dev/null + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(.venv) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(.venv) " + export VIRTUAL_ENV_PROMPT +fi + +# Call hash to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +hash -r 2> /dev/null diff --git a/.venv/bin/activate.csh b/.venv/bin/activate.csh new file mode 100644 index 0000000..b7f7447 --- /dev/null +++ b/.venv/bin/activate.csh @@ -0,0 +1,26 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(.venv) $prompt" + setenv VIRTUAL_ENV_PROMPT "(.venv) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/.venv/bin/activate.fish b/.venv/bin/activate.fish new file mode 100644 index 0000000..68f7f7e --- /dev/null +++ b/.venv/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(.venv) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(.venv) " +end diff --git a/.venv/bin/black b/.venv/bin/black new file mode 100755 index 0000000..07582f6 --- /dev/null +++ b/.venv/bin/black @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from black import patched_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(patched_main()) diff --git a/.venv/bin/blackd b/.venv/bin/blackd new file mode 100755 index 0000000..60b837f --- /dev/null +++ b/.venv/bin/blackd @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from blackd import patched_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(patched_main()) diff --git a/.venv/bin/docutils b/.venv/bin/docutils new file mode 100755 index 0000000..2af76a2 --- /dev/null +++ b/.venv/bin/docutils @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from docutils.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/isort b/.venv/bin/isort new file mode 100755 index 0000000..3017dcf --- /dev/null +++ b/.venv/bin/isort @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from isort.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/isort-identify-imports b/.venv/bin/isort-identify-imports new file mode 100755 index 0000000..c07854a --- /dev/null +++ b/.venv/bin/isort-identify-imports @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from isort.main import identify_imports_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(identify_imports_main()) diff --git a/.venv/bin/keyring b/.venv/bin/keyring new file mode 100755 index 0000000..3aaa178 --- /dev/null +++ b/.venv/bin/keyring @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from keyring.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/markdown-it b/.venv/bin/markdown-it new file mode 100755 index 0000000..a235416 --- /dev/null +++ b/.venv/bin/markdown-it @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from markdown_it.cli.parse import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/normalizer b/.venv/bin/normalizer new file mode 100755 index 0000000..588a27d --- /dev/null +++ b/.venv/bin/normalizer @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from charset_normalizer.cli import cli_detect +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_detect()) diff --git a/.venv/bin/pip b/.venv/bin/pip new file mode 100755 index 0000000..61f5c64 --- /dev/null +++ b/.venv/bin/pip @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pip3 b/.venv/bin/pip3 new file mode 100755 index 0000000..61f5c64 --- /dev/null +++ b/.venv/bin/pip3 @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pip3.11 b/.venv/bin/pip3.11 new file mode 100755 index 0000000..61f5c64 --- /dev/null +++ b/.venv/bin/pip3.11 @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pkginfo b/.venv/bin/pkginfo new file mode 100755 index 0000000..1c0ea20 --- /dev/null +++ b/.venv/bin/pkginfo @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pkginfo.commandline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/py.test b/.venv/bin/py.test new file mode 100755 index 0000000..e1829dc --- /dev/null +++ b/.venv/bin/py.test @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(console_main()) diff --git a/.venv/bin/pycodestyle b/.venv/bin/pycodestyle new file mode 100755 index 0000000..7260fb7 --- /dev/null +++ b/.venv/bin/pycodestyle @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pycodestyle import _main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(_main()) diff --git a/.venv/bin/pygmentize b/.venv/bin/pygmentize new file mode 100755 index 0000000..4fc6e78 --- /dev/null +++ b/.venv/bin/pygmentize @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pygments.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pytest b/.venv/bin/pytest new file mode 100755 index 0000000..e1829dc --- /dev/null +++ b/.venv/bin/pytest @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(console_main()) diff --git a/.venv/bin/python b/.venv/bin/python new file mode 120000 index 0000000..b116143 --- /dev/null +++ b/.venv/bin/python @@ -0,0 +1 @@ +/home/diwa-audi/.pyenv/versions/3.11.8/bin/python \ No newline at end of file diff --git a/.venv/bin/python3 b/.venv/bin/python3 new file mode 120000 index 0000000..d8654aa --- /dev/null +++ b/.venv/bin/python3 @@ -0,0 +1 @@ +python \ No newline at end of file diff --git a/.venv/bin/python3.11 b/.venv/bin/python3.11 new file mode 120000 index 0000000..d8654aa --- /dev/null +++ b/.venv/bin/python3.11 @@ -0,0 +1 @@ +python \ No newline at end of file diff --git a/.venv/bin/rst2html.py b/.venv/bin/rst2html.py new file mode 100755 index 0000000..1eb7caf --- /dev/null +++ b/.venv/bin/rst2html.py @@ -0,0 +1,23 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2html.py 8927 2022-01-03 23:50:05Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing HTML. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates (X)HTML documents from standalone reStructuredText ' + 'sources. ' + default_description) + +publish_cmdline(writer_name='html', description=description) diff --git a/.venv/bin/rst2html4.py b/.venv/bin/rst2html4.py new file mode 100755 index 0000000..db4f5cc --- /dev/null +++ b/.venv/bin/rst2html4.py @@ -0,0 +1,26 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2html4.py 8927 2022-01-03 23:50:05Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing (X)HTML. + +The output conforms to XHTML 1.0 transitional +and almost to HTML 4.01 transitional (except for closing empty tags). +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates (X)HTML documents from standalone reStructuredText ' + 'sources. ' + default_description) + +publish_cmdline(writer_name='html4', description=description) diff --git a/.venv/bin/rst2html5.py b/.venv/bin/rst2html5.py new file mode 100755 index 0000000..1805a88 --- /dev/null +++ b/.venv/bin/rst2html5.py @@ -0,0 +1,33 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# :Copyright: © 2015 Günter Milde. +# :License: Released under the terms of the `2-Clause BSD license`_, in short: +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. +# This file is offered as-is, without any warranty. +# +# .. _2-Clause BSD license: https://opensource.org/licenses/BSD-2-Clause +# +# Revision: $Revision: 9021 $ +# Date: $Date: 2022-03-04 16:54:22 +0100 (Fr, 04. Mär 2022) $ + +""" +A minimal front end to the Docutils Publisher, producing HTML 5 documents. + +The output is also valid XML. +""" + +try: + import locale # module missing in Jython + locale.setlocale(locale.LC_ALL, '') +except locale.Error: + pass + +from docutils.core import publish_cmdline, default_description + +description = ('Generates HTML5 documents from standalone ' + 'reStructuredText sources.\n' + + default_description) + +publish_cmdline(writer_name='html5', description=description) diff --git a/.venv/bin/rst2latex.py b/.venv/bin/rst2latex.py new file mode 100755 index 0000000..e467546 --- /dev/null +++ b/.venv/bin/rst2latex.py @@ -0,0 +1,26 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2latex.py 8956 2022-01-20 10:11:44Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing LaTeX. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline + +description = ('Generates LaTeX documents from standalone reStructuredText ' + 'sources. ' + 'Reads from (default is stdin) and writes to ' + ' (default is stdout). See ' + ' for ' + 'the full reference.') + +publish_cmdline(writer_name='latex', description=description) diff --git a/.venv/bin/rst2man.py b/.venv/bin/rst2man.py new file mode 100755 index 0000000..7a8c2b5 --- /dev/null +++ b/.venv/bin/rst2man.py @@ -0,0 +1,27 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# Author: +# Contact: grubert@users.sf.net +# Copyright: This module has been placed in the public domain. + +""" +man.py +====== + +This module provides a simple command line interface that uses the +man page writer to output from ReStructuredText source. +""" + +import locale +try: + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description +from docutils.writers import manpage + +description = ("Generates plain unix manual documents. " + + default_description) + +publish_cmdline(writer=manpage.Writer(), description=description) diff --git a/.venv/bin/rst2odt.py b/.venv/bin/rst2odt.py new file mode 100755 index 0000000..bffe171 --- /dev/null +++ b/.venv/bin/rst2odt.py @@ -0,0 +1,28 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2odt.py 8994 2022-01-29 16:28:17Z milde $ +# Author: Dave Kuhlman +# Copyright: This module has been placed in the public domain. + +""" +A front end to the Docutils Publisher, producing OpenOffice documents. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline_to_binary, default_description +from docutils.writers.odf_odt import Writer, Reader + + +description = ('Generates OpenDocument/OpenOffice/ODF documents from ' + 'standalone reStructuredText sources. ' + default_description) + + +writer = Writer() +reader = Reader() +output = publish_cmdline_to_binary(reader=reader, writer=writer, + description=description) diff --git a/.venv/bin/rst2odt_prepstyles.py b/.venv/bin/rst2odt_prepstyles.py new file mode 100755 index 0000000..9adba67 --- /dev/null +++ b/.venv/bin/rst2odt_prepstyles.py @@ -0,0 +1,65 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2odt_prepstyles.py 8932 2022-01-05 14:59:31Z milde $ +# Author: Dave Kuhlman +# Copyright: This module has been placed in the public domain. + +""" +Fix a word-processor-generated styles.odt for odtwriter use: Drop page size +specifications from styles.xml in STYLE_FILE.odt. +""" + +# Author: Michael Schutte + +from lxml import etree +import sys +import zipfile +from tempfile import mkstemp +import shutil +import os + +NAMESPACES = { + "style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0", + "fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0" +} + + +def prepstyle(filename): + + zin = zipfile.ZipFile(filename) + styles = zin.read("styles.xml") + + root = etree.fromstring(styles) + for el in root.xpath("//style:page-layout-properties", + namespaces=NAMESPACES): + for attr in el.attrib: + if attr.startswith("{%s}" % NAMESPACES["fo"]): + del el.attrib[attr] + + tempname = mkstemp() + zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w", + zipfile.ZIP_DEFLATED) + + for item in zin.infolist(): + if item.filename == "styles.xml": + zout.writestr(item, etree.tostring(root)) + else: + zout.writestr(item, zin.read(item.filename)) + + zout.close() + zin.close() + shutil.move(tempname[1], filename) + + +def main(): + args = sys.argv[1:] + if len(args) != 1 or args[0] in ('-h', '--help'): + print(__doc__, file=sys.stderr) + print("Usage: %s STYLE_FILE.odt\n" % sys.argv[0], file=sys.stderr) + sys.exit(1) + filename = args[0] + prepstyle(filename) + + +if __name__ == '__main__': + main() diff --git a/.venv/bin/rst2pseudoxml.py b/.venv/bin/rst2pseudoxml.py new file mode 100755 index 0000000..27f2809 --- /dev/null +++ b/.venv/bin/rst2pseudoxml.py @@ -0,0 +1,23 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2pseudoxml.py 8927 2022-01-03 23:50:05Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing pseudo-XML. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates pseudo-XML from standalone reStructuredText ' + 'sources (for testing purposes). ' + default_description) + +publish_cmdline(description=description) diff --git a/.venv/bin/rst2s5.py b/.venv/bin/rst2s5.py new file mode 100755 index 0000000..9e051d5 --- /dev/null +++ b/.venv/bin/rst2s5.py @@ -0,0 +1,24 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2s5.py 8927 2022-01-03 23:50:05Z milde $ +# Author: Chris Liechti +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing HTML slides using +the S5 template system. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates S5 (X)HTML slideshow documents from standalone ' + 'reStructuredText sources. ' + default_description) + +publish_cmdline(writer_name='s5', description=description) diff --git a/.venv/bin/rst2xetex.py b/.venv/bin/rst2xetex.py new file mode 100755 index 0000000..7163a8d --- /dev/null +++ b/.venv/bin/rst2xetex.py @@ -0,0 +1,27 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2xetex.py 8956 2022-01-20 10:11:44Z milde $ +# Author: Guenter Milde +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline + +description = ('Generates LaTeX documents from standalone reStructuredText ' + 'sources for compilation with the Unicode-aware TeX variants ' + 'XeLaTeX or LuaLaTeX. ' + 'Reads from (default is stdin) and writes to ' + ' (default is stdout). See ' + ' for ' + 'the full reference.') + +publish_cmdline(writer_name='xetex', description=description) diff --git a/.venv/bin/rst2xml.py b/.venv/bin/rst2xml.py new file mode 100755 index 0000000..8d91f39 --- /dev/null +++ b/.venv/bin/rst2xml.py @@ -0,0 +1,23 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rst2xml.py 8927 2022-01-03 23:50:05Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing Docutils XML. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates Docutils-native XML from standalone ' + 'reStructuredText sources. ' + default_description) + +publish_cmdline(writer_name='xml', description=description) diff --git a/.venv/bin/rstpep2html.py b/.venv/bin/rstpep2html.py new file mode 100755 index 0000000..5c25e58 --- /dev/null +++ b/.venv/bin/rstpep2html.py @@ -0,0 +1,25 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python + +# $Id: rstpep2html.py 8927 2022-01-03 23:50:05Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing HTML from PEP +(Python Enhancement Proposal) documents. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates (X)HTML from reStructuredText-format PEP files. ' + + default_description) + +publish_cmdline(reader_name='pep', writer_name='pep_html', + description=description) diff --git a/.venv/bin/twine b/.venv/bin/twine new file mode 100755 index 0000000..af42829 --- /dev/null +++ b/.venv/bin/twine @@ -0,0 +1,8 @@ +#!/home/diwa-audi/Documents/codes/openSource/contri_2/commoncode/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from twine.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/lib/python3.11/site-packages/6b397dd64e00b5aff23d__mypyc.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/6b397dd64e00b5aff23d__mypyc.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..79e5015 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/6b397dd64e00b5aff23d__mypyc.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/LICENSE.rst b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/LICENSE.rst new file mode 100644 index 0000000..c37cae4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/METADATA b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/METADATA new file mode 100644 index 0000000..f54bb5c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/METADATA @@ -0,0 +1,113 @@ +Metadata-Version: 2.1 +Name: Jinja2 +Version: 3.1.2 +Summary: A very fast and expressive template engine. +Home-page: https://palletsprojects.com/p/jinja/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Changes, https://jinja.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/jinja/ +Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/ +Project-URL: Twitter, https://twitter.com/PalletsTeam +Project-URL: Chat, https://discord.gg/pallets +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst +Requires-Dist: MarkupSafe (>=2.0) +Provides-Extra: i18n +Requires-Dist: Babel (>=2.7) ; extra == 'i18n' + +Jinja +===== + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U Jinja2 + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +In A Nutshell +------------- + +.. code-block:: jinja + + {% extends "base.html" %} + {% block title %}Members{% endblock %} + {% block content %} + + {% endblock %} + + +Donate +------ + +The Pallets organization develops and supports Jinja and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://jinja.palletsprojects.com/ +- Changes: https://jinja.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/Jinja2/ +- Source Code: https://github.com/pallets/jinja/ +- Issue Tracker: https://github.com/pallets/jinja/issues/ +- Website: https://palletsprojects.com/p/jinja/ +- Twitter: https://twitter.com/PalletsTeam +- Chat: https://discord.gg/pallets + + diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/RECORD b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/RECORD new file mode 100644 index 0000000..b145ed2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/RECORD @@ -0,0 +1,59 @@ +Jinja2-3.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-3.1.2.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +Jinja2-3.1.2.dist-info/METADATA,sha256=PZ6v2SIidMNixR7MRUX9f7ZWsPwtXanknqiZUmRbh4U,3539 +Jinja2-3.1.2.dist-info/RECORD,, +Jinja2-3.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +Jinja2-3.1.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +Jinja2-3.1.2.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59 +Jinja2-3.1.2.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=8vGduD8ytwgD6GDSqpYc2m3aU-T7PKOAddvVXgGr_Fs,1927 +jinja2/__pycache__/__init__.cpython-311.pyc,, +jinja2/__pycache__/_identifier.cpython-311.pyc,, +jinja2/__pycache__/async_utils.cpython-311.pyc,, +jinja2/__pycache__/bccache.cpython-311.pyc,, +jinja2/__pycache__/compiler.cpython-311.pyc,, +jinja2/__pycache__/constants.cpython-311.pyc,, +jinja2/__pycache__/debug.cpython-311.pyc,, +jinja2/__pycache__/defaults.cpython-311.pyc,, +jinja2/__pycache__/environment.cpython-311.pyc,, +jinja2/__pycache__/exceptions.cpython-311.pyc,, +jinja2/__pycache__/ext.cpython-311.pyc,, +jinja2/__pycache__/filters.cpython-311.pyc,, +jinja2/__pycache__/idtracking.cpython-311.pyc,, +jinja2/__pycache__/lexer.cpython-311.pyc,, +jinja2/__pycache__/loaders.cpython-311.pyc,, +jinja2/__pycache__/meta.cpython-311.pyc,, +jinja2/__pycache__/nativetypes.cpython-311.pyc,, +jinja2/__pycache__/nodes.cpython-311.pyc,, +jinja2/__pycache__/optimizer.cpython-311.pyc,, +jinja2/__pycache__/parser.cpython-311.pyc,, +jinja2/__pycache__/runtime.cpython-311.pyc,, +jinja2/__pycache__/sandbox.cpython-311.pyc,, +jinja2/__pycache__/tests.cpython-311.pyc,, +jinja2/__pycache__/utils.cpython-311.pyc,, +jinja2/__pycache__/visitor.cpython-311.pyc,, +jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958 +jinja2/async_utils.py,sha256=dHlbTeaxFPtAOQEYOGYh_PHcDT0rsDaUJAFDl_0XtTg,2472 +jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061 +jinja2/compiler.py,sha256=Gs-N8ThJ7OWK4-reKoO8Wh1ZXz95MVphBKNVf75qBr8,72172 +jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433 +jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299 +jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267 +jinja2/environment.py,sha256=6uHIcc7ZblqOMdx_uYNKqRnnwAF0_nzbyeMP9FFtuh4,61349 +jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071 +jinja2/ext.py,sha256=ivr3P7LKbddiXDVez20EflcO3q2aHQwz9P_PgWGHVqE,31502 +jinja2/filters.py,sha256=9js1V-h2RlyW90IhLiBGLM2U-k6SCy2F4BUUMgB3K9Q,53509 +jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704 +jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726 +jinja2/loaders.py,sha256=BfptfvTVpClUd-leMkHczdyPNYFzp_n7PKOJ98iyHOg,23207 +jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396 +jinja2/nativetypes.py,sha256=DXgORDPRmVWgy034H0xL8eF7qYoK3DrMxs-935d0Fzk,4226 +jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550 +jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650 +jinja2/parser.py,sha256=nHd-DFHbiygvfaPtm9rcQXJChZG7DPsWfiEsqfwKerY,39595 +jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2/runtime.py,sha256=5CmD5BjbEJxSiDNTFBeKCaq8qU4aYD2v6q2EluyExms,33476 +jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584 +jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905 +jinja2/utils.py,sha256=u9jXESxGn8ATZNVolwmkjUVu4SA-tLgV0W7PcSfPfdQ,23965 +jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568 diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/entry_points.txt b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/entry_points.txt new file mode 100644 index 0000000..7b9666c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[babel.extractors] +jinja2 = jinja2.ext:babel_extract[i18n] diff --git a/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/top_level.txt new file mode 100644 index 0000000..7f7afbf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Jinja2-3.1.2.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/LICENSE.rst b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/LICENSE.rst new file mode 100644 index 0000000..9d227a0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2010 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/METADATA b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/METADATA new file mode 100644 index 0000000..4a34999 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/METADATA @@ -0,0 +1,98 @@ +Metadata-Version: 2.1 +Name: MarkupSafe +Version: 2.1.2 +Summary: Safely add untrusted strings to HTML/XML markup. +Home-page: https://palletsprojects.com/p/markupsafe/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://markupsafe.palletsprojects.com/ +Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/markupsafe/ +Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/ +Project-URL: Twitter, https://twitter.com/PalletsTeam +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst + +MarkupSafe +========== + +MarkupSafe implements a text object that escapes characters so it is +safe to use in HTML and XML. Characters that have special meanings are +replaced so that they display as the actual characters. This mitigates +injection attacks, meaning untrusted user input can safely be displayed +on a page. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U MarkupSafe + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +Examples +-------- + +.. code-block:: pycon + + >>> from markupsafe import Markup, escape + + >>> # escape replaces special characters and wraps in Markup + >>> escape("") + Markup('<script>alert(document.cookie);</script>') + + >>> # wrap in Markup to mark text "safe" and prevent escaping + >>> Markup("Hello") + Markup('hello') + + >>> escape(Markup("Hello")) + Markup('hello') + + >>> # Markup is a str subclass + >>> # methods and operators escape their arguments + >>> template = Markup("Hello {name}") + >>> template.format(name='"World"') + Markup('Hello "World"') + + +Donate +------ + +The Pallets organization develops and supports MarkupSafe and other +popular packages. In order to grow the community of contributors and +users, and allow the maintainers to devote more time to the projects, +`please donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://markupsafe.palletsprojects.com/ +- Changes: https://markupsafe.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/MarkupSafe/ +- Source Code: https://github.com/pallets/markupsafe/ +- Issue Tracker: https://github.com/pallets/markupsafe/issues/ +- Website: https://palletsprojects.com/p/markupsafe/ +- Twitter: https://twitter.com/PalletsTeam +- Chat: https://discord.gg/pallets diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/RECORD b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/RECORD new file mode 100644 index 0000000..d8ef0c0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/RECORD @@ -0,0 +1,15 @@ +MarkupSafe-2.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +MarkupSafe-2.1.2.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 +MarkupSafe-2.1.2.dist-info/METADATA,sha256=jPw4iOiZg6adxZ5bdvjZapeNmPMINMGG2q2v2rI4SqA,3222 +MarkupSafe-2.1.2.dist-info/RECORD,, +MarkupSafe-2.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +MarkupSafe-2.1.2.dist-info/WHEEL,sha256=zcODeksSPXfEVqloqbuxG5DqZN6qJmVdi0EN9bjsiGQ,152 +MarkupSafe-2.1.2.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 +markupsafe/__init__.py,sha256=LtjnhQ6AHmAgHl37cev2oQBXjr4xOF-QhdXgsCAL3-0,9306 +markupsafe/__pycache__/__init__.cpython-311.pyc,, +markupsafe/__pycache__/_native.cpython-311.pyc,, +markupsafe/_native.py,sha256=GR86Qvo_GcgKmKreA1WmYN9ud17OFwkww8E-fiW-57s,1713 +markupsafe/_speedups.c,sha256=X2XvQVtIdcK4Usz70BvkzoOfjTCmQlDkkjYSn-swE0g,7083 +markupsafe/_speedups.cpython-311-x86_64-linux-gnu.so,sha256=oxS10ynvGJxxD2aGbEyu_VASpUD-XwDsjQOlaw6cSzs,53656 +markupsafe/_speedups.pyi,sha256=vfMCsOgbAXRNLUXkyuyonG8uEWKYU4PDqNuMaDELAYw,229 +markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/WHEEL new file mode 100644 index 0000000..9a9cfe1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/top_level.txt new file mode 100644 index 0000000..75bf729 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.2.dist-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/AUTHORS b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/AUTHORS new file mode 100644 index 0000000..18a32c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/AUTHORS @@ -0,0 +1,264 @@ +Pygments is written and maintained by Georg Brandl . + +Major developers are Tim Hatch and Armin Ronacher +. + +Other contributors, listed alphabetically, are: + +* Sam Aaron -- Ioke lexer +* Jean Abou Samra -- LilyPond lexer +* João Abecasis -- JSLT lexer +* Ali Afshar -- image formatter +* Thomas Aglassinger -- Easytrieve, JCL, Rexx, Transact-SQL and VBScript + lexers +* Muthiah Annamalai -- Ezhil lexer +* Kumar Appaiah -- Debian control lexer +* Andreas Amann -- AppleScript lexer +* Timothy Armstrong -- Dart lexer fixes +* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers +* Eiríkr Åsheim -- Uxntal lexer +* Jeremy Ashkenas -- CoffeeScript lexer +* José Joaquín Atria -- Praat lexer +* Stefan Matthias Aust -- Smalltalk lexer +* Lucas Bajolet -- Nit lexer +* Ben Bangert -- Mako lexers +* Max Battcher -- Darcs patch lexer +* Thomas Baruchel -- APL lexer +* Tim Baumann -- (Literate) Agda lexer +* Paul Baumgart, 280 North, Inc. -- Objective-J lexer +* Michael Bayer -- Myghty lexers +* Thomas Beale -- Archetype lexers +* John Benediktsson -- Factor lexer +* Trevor Bergeron -- mIRC formatter +* Vincent Bernat -- LessCSS lexer +* Christopher Bertels -- Fancy lexer +* Sébastien Bigaret -- QVT Operational lexer +* Jarrett Billingsley -- MiniD lexer +* Adam Blinkinsop -- Haskell, Redcode lexers +* Stéphane Blondon -- Procfile, SGF and Sieve lexers +* Frits van Bommel -- assembler lexers +* Pierre Bourdon -- bugfixes +* Martijn Braam -- Kernel log lexer, BARE lexer +* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter +* chebee7i -- Python traceback lexer improvements +* Hiram Chirino -- Scaml and Jade lexers +* Mauricio Caceres -- SAS and Stata lexers. +* Ian Cooper -- VGL lexer +* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers +* Leaf Corcoran -- MoonScript lexer +* Christopher Creutzig -- MuPAD lexer +* Daniël W. Crompton -- Pike lexer +* Pete Curry -- bugfixes +* Bryan Davis -- EBNF lexer +* Bruno Deferrari -- Shen lexer +* Walter Dörwald -- UL4 lexer +* Luke Drummond -- Meson lexer +* Giedrius Dubinskas -- HTML formatter improvements +* Owen Durni -- Haxe lexer +* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer +* James Edwards -- Terraform lexer +* Nick Efford -- Python 3 lexer +* Sven Efftinge -- Xtend lexer +* Artem Egorkine -- terminal256 formatter +* Matthew Fernandez -- CAmkES lexer +* Paweł Fertyk -- GDScript lexer, HTML formatter improvements +* Michael Ficarra -- CPSA lexer +* James H. Fisher -- PostScript lexer +* William S. Fulton -- SWIG lexer +* Carlos Galdino -- Elixir and Elixir Console lexers +* Michael Galloy -- IDL lexer +* Naveen Garg -- Autohotkey lexer +* Simon Garnotel -- FreeFem++ lexer +* Laurent Gautier -- R/S lexer +* Alex Gaynor -- PyPy log lexer +* Richard Gerkin -- Igor Pro lexer +* Alain Gilbert -- TypeScript lexer +* Alex Gilding -- BlitzBasic lexer +* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers +* Bertrand Goetzmann -- Groovy lexer +* Krzysiek Goj -- Scala lexer +* Rostyslav Golda -- FloScript lexer +* Andrey Golovizin -- BibTeX lexers +* Matt Good -- Genshi, Cheetah lexers +* Michał Górny -- vim modeline support +* Alex Gosse -- TrafficScript lexer +* Patrick Gotthardt -- PHP namespaces support +* Hubert Gruniaux -- C and C++ lexer improvements +* Olivier Guibe -- Asymptote lexer +* Phil Hagelberg -- Fennel lexer +* Florian Hahn -- Boogie lexer +* Martin Harriman -- SNOBOL lexer +* Matthew Harrison -- SVG formatter +* Steven Hazel -- Tcl lexer +* Dan Michael Heggø -- Turtle lexer +* Aslak Hellesøy -- Gherkin lexer +* Greg Hendershott -- Racket lexer +* Justin Hendrick -- ParaSail lexer +* Jordi Gutiérrez Hermoso -- Octave lexer +* David Hess, Fish Software, Inc. -- Objective-J lexer +* Ken Hilton -- Typographic Number Theory and Arrow lexers +* Varun Hiremath -- Debian control lexer +* Rob Hoelz -- Perl 6 lexer +* Doug Hogan -- Mscgen lexer +* Ben Hollis -- Mason lexer +* Max Horn -- GAP lexer +* Fred Hornsey -- OMG IDL Lexer +* Alastair Houghton -- Lexer inheritance facility +* Tim Howard -- BlitzMax lexer +* Dustin Howett -- Logos lexer +* Ivan Inozemtsev -- Fantom lexer +* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session, + MSDOS session, BC, WDiff +* Brian R. Jackson -- Tea lexer +* Christian Jann -- ShellSession lexer +* Dennis Kaarsemaker -- sources.list lexer +* Dmitri Kabak -- Inferno Limbo lexer +* Igor Kalnitsky -- vhdl lexer +* Colin Kennedy - USD lexer +* Alexander Kit -- MaskJS lexer +* Pekka Klärck -- Robot Framework lexer +* Gerwin Klein -- Isabelle lexer +* Eric Knibbe -- Lasso lexer +* Stepan Koltsov -- Clay lexer +* Oliver Kopp - Friendly grayscale style +* Adam Koprowski -- Opa lexer +* Benjamin Kowarsch -- Modula-2 lexer +* Domen Kožar -- Nix lexer +* Oleh Krekel -- Emacs Lisp lexer +* Alexander Kriegisch -- Kconfig and AspectJ lexers +* Marek Kubica -- Scheme lexer +* Jochen Kupperschmidt -- Markdown processor +* Gerd Kurzbach -- Modelica lexer +* Jon Larimer, Google Inc. -- Smali lexer +* Olov Lassus -- Dart lexer +* Matt Layman -- TAP lexer +* Kristian Lyngstøl -- Varnish lexers +* Sylvestre Ledru -- Scilab lexer +* Chee Sing Lee -- Flatline lexer +* Mark Lee -- Vala lexer +* Pete Lomax -- Phix lexer +* Valentin Lorentz -- C++ lexer improvements +* Ben Mabey -- Gherkin lexer +* Angus MacArthur -- QML lexer +* Louis Mandel -- X10 lexer +* Louis Marchand -- Eiffel lexer +* Simone Margaritelli -- Hybris lexer +* Tim Martin - World of Warcraft TOC lexer +* Kirk McDonald -- D lexer +* Gordon McGregor -- SystemVerilog lexer +* Stephen McKamey -- Duel/JBST lexer +* Brian McKenna -- F# lexer +* Charles McLaughlin -- Puppet lexer +* Kurt McKee -- Tera Term macro lexer, PostgreSQL updates, MySQL overhaul, JSON lexer +* Joe Eli McIlvain -- Savi lexer +* Lukas Meuser -- BBCode formatter, Lua lexer +* Cat Miller -- Pig lexer +* Paul Miller -- LiveScript lexer +* Hong Minhee -- HTTP lexer +* Michael Mior -- Awk lexer +* Bruce Mitchener -- Dylan lexer rewrite +* Reuben Morais -- SourcePawn lexer +* Jon Morton -- Rust lexer +* Paulo Moura -- Logtalk lexer +* Mher Movsisyan -- DTD lexer +* Dejan Muhamedagic -- Crmsh lexer +* Ana Nelson -- Ragel, ANTLR, R console lexers +* Kurt Neufeld -- Markdown lexer +* Nam T. Nguyen -- Monokai style +* Jesper Noehr -- HTML formatter "anchorlinenos" +* Mike Nolta -- Julia lexer +* Avery Nortonsmith -- Pointless lexer +* Jonas Obrist -- BBCode lexer +* Edward O'Callaghan -- Cryptol lexer +* David Oliva -- Rebol lexer +* Pat Pannuto -- nesC lexer +* Jon Parise -- Protocol buffers and Thrift lexers +* Benjamin Peterson -- Test suite refactoring +* Ronny Pfannschmidt -- BBCode lexer +* Dominik Picheta -- Nimrod lexer +* Andrew Pinkham -- RTF Formatter Refactoring +* Clément Prévost -- UrbiScript lexer +* Tanner Prynn -- cmdline -x option and loading lexers from files +* Oleh Prypin -- Crystal lexer (based on Ruby lexer) +* Nick Psaris -- K and Q lexers +* Xidorn Quan -- Web IDL lexer +* Elias Rabel -- Fortran fixed form lexer +* raichoo -- Idris lexer +* Daniel Ramirez -- GDScript lexer +* Kashif Rasul -- CUDA lexer +* Nathan Reed -- HLSL lexer +* Justin Reidy -- MXML lexer +* Norman Richards -- JSON lexer +* Corey Richardson -- Rust lexer updates +* Fabrizio Riguzzi -- cplint leder +* Lubomir Rintel -- GoodData MAQL and CL lexers +* Andre Roberge -- Tango style +* Georg Rollinger -- HSAIL lexer +* Michiel Roos -- TypoScript lexer +* Konrad Rudolph -- LaTeX formatter enhancements +* Mario Ruggier -- Evoque lexers +* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements +* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers +* Matteo Sasso -- Common Lisp lexer +* Joe Schafer -- Ada lexer +* Max Schillinger -- TiddlyWiki5 lexer +* Ken Schutte -- Matlab lexers +* René Schwaiger -- Rainbow Dash style +* Sebastian Schweizer -- Whiley lexer +* Tassilo Schweyer -- Io, MOOCode lexers +* Pablo Seminario -- PromQL lexer +* Ted Shaw -- AutoIt lexer +* Joerg Sieker -- ABAP lexer +* Robert Simmons -- Standard ML lexer +* Kirill Simonov -- YAML lexer +* Corbin Simpson -- Monte lexer +* Ville Skyttä -- ASCII armored lexer +* Alexander Smishlajev -- Visual FoxPro lexer +* Steve Spigarelli -- XQuery lexer +* Jerome St-Louis -- eC lexer +* Camil Staps -- Clean and NuSMV lexers; Solarized style +* James Strachan -- Kotlin lexer +* Tom Stuart -- Treetop lexer +* Colin Sullivan -- SuperCollider lexer +* Ben Swift -- Extempore lexer +* tatt61880 -- Kuin lexer +* Edoardo Tenani -- Arduino lexer +* Tiberius Teng -- default style overhaul +* Jeremy Thurgood -- Erlang, Squid config lexers +* Brian Tiffin -- OpenCOBOL lexer +* Bob Tolbert -- Hy lexer +* Doug Torrance -- Macaulay2 lexer +* Matthias Trute -- Forth lexer +* Tuoa Spi T4 -- Bdd lexer +* Erick Tryzelaar -- Felix lexer +* Alexander Udalov -- Kotlin lexer improvements +* Thomas Van Doren -- Chapel lexer +* Daniele Varrazzo -- PostgreSQL lexers +* Abe Voelker -- OpenEdge ABL lexer +* Pepijn de Vos -- HTML formatter CTags support +* Matthias Vallentin -- Bro lexer +* Benoît Vinot -- AMPL lexer +* Linh Vu Hong -- RSL lexer +* Immanuel Washington -- Smithy lexer +* Nathan Weizenbaum -- Haml and Sass lexers +* Nathan Whetsell -- Csound lexers +* Dietmar Winkler -- Modelica lexer +* Nils Winter -- Smalltalk lexer +* Davy Wybiral -- Clojure lexer +* Whitney Young -- ObjectiveC lexer +* Diego Zamboni -- CFengine3 lexer +* Enrique Zamudio -- Ceylon lexer +* Alex Zimin -- Nemerle lexer +* Rob Zimmerman -- Kal lexer +* Vincent Zurczak -- Roboconf lexer +* Hubert Gruniaux -- C and C++ lexer improvements +* Thomas Symalla -- AMDGPU Lexer +* 15b3 -- Image Formatter improvements +* Fabian Neumann -- CDDL lexer +* Thomas Duboucher -- CDDL lexer +* Philipp Imhof -- Pango Markup formatter +* Thomas Voss -- Sed lexer +* Martin Fischer -- WCAG contrast testing +* Marc Auberer -- Spice lexer + +Many thanks for all contributions! diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/LICENSE new file mode 100644 index 0000000..446a1a8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2006-2022 by the respective authors (see AUTHORS file). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/METADATA b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/METADATA new file mode 100644 index 0000000..0ba0902 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/METADATA @@ -0,0 +1,38 @@ +Metadata-Version: 2.1 +Name: Pygments +Version: 2.14.0 +Summary: Pygments is a syntax highlighting package written in Python. +Home-page: https://pygments.org/ +Author: Georg Brandl +Author-email: georg@python.org +License: BSD-2-Clause +Project-URL: Documentation, https://pygments.org/docs/ +Project-URL: Source, https://github.com/pygments/pygments +Project-URL: Bug Tracker, https://github.com/pygments/pygments/issues +Project-URL: Changelog, https://github.com/pygments/pygments/blob/master/CHANGES +Keywords: syntax highlighting +Platform: any +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: End Users/Desktop +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Text Processing :: Filters +Classifier: Topic :: Utilities +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: AUTHORS +Provides-Extra: plugins +Requires-Dist: importlib-metadata ; (python_version < "3.8") and extra == 'plugins' + diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/RECORD b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/RECORD new file mode 100644 index 0000000..9214587 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/RECORD @@ -0,0 +1,608 @@ +../../../bin/pygmentize,sha256=X8XJo2AvwrXPwXq7UHJtJKhAfcd21krqX3S4VkTNaM0,278 +Pygments-2.14.0.dist-info/AUTHORS,sha256=OzlKwZii64dQrA-dyL7ayg_ogm5_OXUi_uwMmFwC3Xo,9737 +Pygments-2.14.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Pygments-2.14.0.dist-info/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +Pygments-2.14.0.dist-info/METADATA,sha256=SnXak0f9bFv55eBI1WcwoHxo06g8XQC13OzWyskwwxk,1606 +Pygments-2.14.0.dist-info/RECORD,, +Pygments-2.14.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +Pygments-2.14.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +Pygments-2.14.0.dist-info/entry_points.txt,sha256=uUXw-XhMKBEX4pWcCtpuTTnPhL3h7OEE2jWi51VQsa8,53 +Pygments-2.14.0.dist-info/top_level.txt,sha256=RjKKqrVIStoebLHdbs0yZ2Lk4rS7cxGguXsLCYvZ2Ak,9 +pygments/__init__.py,sha256=G6KeoinsUh_ixOUfvUm9YQbF853QdZ4xxinie2AxBi0,2975 +pygments/__main__.py,sha256=H4FKgvsbzW61IXZbQ7n6JYJg674Z5MrYCWl4SvdXl4k,348 +pygments/__pycache__/__init__.cpython-311.pyc,, +pygments/__pycache__/__main__.cpython-311.pyc,, +pygments/__pycache__/cmdline.cpython-311.pyc,, +pygments/__pycache__/console.cpython-311.pyc,, +pygments/__pycache__/filter.cpython-311.pyc,, +pygments/__pycache__/formatter.cpython-311.pyc,, +pygments/__pycache__/lexer.cpython-311.pyc,, +pygments/__pycache__/modeline.cpython-311.pyc,, +pygments/__pycache__/plugin.cpython-311.pyc,, +pygments/__pycache__/regexopt.cpython-311.pyc,, +pygments/__pycache__/scanner.cpython-311.pyc,, +pygments/__pycache__/sphinxext.cpython-311.pyc,, +pygments/__pycache__/style.cpython-311.pyc,, +pygments/__pycache__/token.cpython-311.pyc,, +pygments/__pycache__/unistring.cpython-311.pyc,, +pygments/__pycache__/util.cpython-311.pyc,, +pygments/cmdline.py,sha256=fHBl85_v0FRqjw-Krp9-QcjIjwoHsq7OcVhHAkUJDvA,23530 +pygments/console.py,sha256=hQfqCFuOlGk7DW2lPQYepsw-wkOH1iNt9ylNA1eRymM,1697 +pygments/filter.py,sha256=NglMmMPTRRv-zuRSE_QbWid7JXd2J4AvwjCW2yWALXU,1938 +pygments/filters/__init__.py,sha256=Te1zPTE-avGuVnGrBVrOuLP89Q_g8trWgJvsPR5A00A,40338 +pygments/filters/__pycache__/__init__.cpython-311.pyc,, +pygments/formatter.py,sha256=5mKfYy6YY9mvQ-t4vqMNuNqRBIeEIJCSdRixbniZ68E,2893 +pygments/formatters/__init__.py,sha256=DjQIdYN6tqUG-yaGE88vdRO84MORotwpDonds73WNtY,4764 +pygments/formatters/__pycache__/__init__.cpython-311.pyc,, +pygments/formatters/__pycache__/_mapping.cpython-311.pyc,, +pygments/formatters/__pycache__/bbcode.cpython-311.pyc,, +pygments/formatters/__pycache__/groff.cpython-311.pyc,, +pygments/formatters/__pycache__/html.cpython-311.pyc,, +pygments/formatters/__pycache__/img.cpython-311.pyc,, +pygments/formatters/__pycache__/irc.cpython-311.pyc,, +pygments/formatters/__pycache__/latex.cpython-311.pyc,, +pygments/formatters/__pycache__/other.cpython-311.pyc,, +pygments/formatters/__pycache__/pangomarkup.cpython-311.pyc,, +pygments/formatters/__pycache__/rtf.cpython-311.pyc,, +pygments/formatters/__pycache__/svg.cpython-311.pyc,, +pygments/formatters/__pycache__/terminal.cpython-311.pyc,, +pygments/formatters/__pycache__/terminal256.cpython-311.pyc,, +pygments/formatters/_mapping.py,sha256=fCZgvsM6UEuZUG7J6lr47eVss5owKd_JyaNbDfxeqmQ,4104 +pygments/formatters/bbcode.py,sha256=aHDvFf02NTvo1oyFh7d7Nc-diEuJENhYHfgO5ni8y-c,3290 +pygments/formatters/groff.py,sha256=YwId80AP8EkfwEJHmKhHC7cGSugSLROhALR6si4TaOI,5062 +pygments/formatters/html.py,sha256=hXjnogf8HKeUOU-Xfklt52ult8dWKbSIvswmrfQGqc8,35565 +pygments/formatters/img.py,sha256=ErNknc6pWtb20k3OmOmbx9Yk2ltn3gJfIf3kztIfnAg,21914 +pygments/formatters/irc.py,sha256=BvaE_RpG1sWZY-HsV3rSOB6yfHiursKer-z4FoMAplE,4945 +pygments/formatters/latex.py,sha256=hu2im0X4ORQ1ctOwWbHsDqyJwBMD7QQVps0IkgIz6yk,19303 +pygments/formatters/other.py,sha256=dLf0bcqCxQmSk0NdEUdQMjDBw6l-Uqr3ioLbM-xE8kQ,5025 +pygments/formatters/pangomarkup.py,sha256=zTQdtK1097fcGfOM4NOItdp966y448deui0pxveauHQ,2200 +pygments/formatters/rtf.py,sha256=VrqLrq7dGZpxCkydzWbE0PWz7CS3h8rcrNXgNotQ178,4990 +pygments/formatters/svg.py,sha256=hrRO4p-XoOGxf9_6y2SVysO-o1h_AdPMsVwaNbKf1Ww,7299 +pygments/formatters/terminal.py,sha256=ExPmcdOucmPNXeXGxHZe1gs1ihoDtiPVn8eswy0CTiM,4626 +pygments/formatters/terminal256.py,sha256=YWmNHtnycxCPe0IeIxSrUziquIzejubuZMkG9a4Mubw,11717 +pygments/lexer.py,sha256=pG-6VE6DB8oXjxmG9SgDBTsIpf4afTDG6USMFBlRhRY,31987 +pygments/lexers/__init__.py,sha256=na2cMfV3HpzF0xweVrzn3OT-AusS1xJv_SnOkcBBb6s,11116 +pygments/lexers/__pycache__/__init__.cpython-311.pyc,, +pygments/lexers/__pycache__/_ada_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_asy_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_cl_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_cocoa_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_csound_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_css_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_julia_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_lasso_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_lilypond_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_lua_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_mapping.cpython-311.pyc,, +pygments/lexers/__pycache__/_mql_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_mysql_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_openedge_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_php_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_postgres_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_qlik_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_scheme_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_scilab_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_sourcemod_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_stan_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_stata_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_tsql_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_usd_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_vbscript_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/_vim_builtins.cpython-311.pyc,, +pygments/lexers/__pycache__/actionscript.cpython-311.pyc,, +pygments/lexers/__pycache__/ada.cpython-311.pyc,, +pygments/lexers/__pycache__/agile.cpython-311.pyc,, +pygments/lexers/__pycache__/algebra.cpython-311.pyc,, +pygments/lexers/__pycache__/ambient.cpython-311.pyc,, +pygments/lexers/__pycache__/amdgpu.cpython-311.pyc,, +pygments/lexers/__pycache__/ampl.cpython-311.pyc,, +pygments/lexers/__pycache__/apdlexer.cpython-311.pyc,, +pygments/lexers/__pycache__/apl.cpython-311.pyc,, +pygments/lexers/__pycache__/archetype.cpython-311.pyc,, +pygments/lexers/__pycache__/arrow.cpython-311.pyc,, +pygments/lexers/__pycache__/arturo.cpython-311.pyc,, +pygments/lexers/__pycache__/asc.cpython-311.pyc,, +pygments/lexers/__pycache__/asm.cpython-311.pyc,, +pygments/lexers/__pycache__/automation.cpython-311.pyc,, +pygments/lexers/__pycache__/bare.cpython-311.pyc,, +pygments/lexers/__pycache__/basic.cpython-311.pyc,, +pygments/lexers/__pycache__/bdd.cpython-311.pyc,, +pygments/lexers/__pycache__/berry.cpython-311.pyc,, +pygments/lexers/__pycache__/bibtex.cpython-311.pyc,, +pygments/lexers/__pycache__/boa.cpython-311.pyc,, +pygments/lexers/__pycache__/business.cpython-311.pyc,, +pygments/lexers/__pycache__/c_cpp.cpython-311.pyc,, +pygments/lexers/__pycache__/c_like.cpython-311.pyc,, +pygments/lexers/__pycache__/capnproto.cpython-311.pyc,, +pygments/lexers/__pycache__/cddl.cpython-311.pyc,, +pygments/lexers/__pycache__/chapel.cpython-311.pyc,, +pygments/lexers/__pycache__/clean.cpython-311.pyc,, +pygments/lexers/__pycache__/comal.cpython-311.pyc,, +pygments/lexers/__pycache__/compiled.cpython-311.pyc,, +pygments/lexers/__pycache__/configs.cpython-311.pyc,, +pygments/lexers/__pycache__/console.cpython-311.pyc,, +pygments/lexers/__pycache__/cplint.cpython-311.pyc,, +pygments/lexers/__pycache__/crystal.cpython-311.pyc,, +pygments/lexers/__pycache__/csound.cpython-311.pyc,, +pygments/lexers/__pycache__/css.cpython-311.pyc,, +pygments/lexers/__pycache__/d.cpython-311.pyc,, +pygments/lexers/__pycache__/dalvik.cpython-311.pyc,, +pygments/lexers/__pycache__/data.cpython-311.pyc,, +pygments/lexers/__pycache__/devicetree.cpython-311.pyc,, +pygments/lexers/__pycache__/diff.cpython-311.pyc,, +pygments/lexers/__pycache__/dotnet.cpython-311.pyc,, +pygments/lexers/__pycache__/dsls.cpython-311.pyc,, +pygments/lexers/__pycache__/dylan.cpython-311.pyc,, +pygments/lexers/__pycache__/ecl.cpython-311.pyc,, +pygments/lexers/__pycache__/eiffel.cpython-311.pyc,, +pygments/lexers/__pycache__/elm.cpython-311.pyc,, +pygments/lexers/__pycache__/elpi.cpython-311.pyc,, +pygments/lexers/__pycache__/email.cpython-311.pyc,, +pygments/lexers/__pycache__/erlang.cpython-311.pyc,, +pygments/lexers/__pycache__/esoteric.cpython-311.pyc,, +pygments/lexers/__pycache__/ezhil.cpython-311.pyc,, +pygments/lexers/__pycache__/factor.cpython-311.pyc,, +pygments/lexers/__pycache__/fantom.cpython-311.pyc,, +pygments/lexers/__pycache__/felix.cpython-311.pyc,, +pygments/lexers/__pycache__/fift.cpython-311.pyc,, +pygments/lexers/__pycache__/floscript.cpython-311.pyc,, +pygments/lexers/__pycache__/forth.cpython-311.pyc,, +pygments/lexers/__pycache__/fortran.cpython-311.pyc,, +pygments/lexers/__pycache__/foxpro.cpython-311.pyc,, +pygments/lexers/__pycache__/freefem.cpython-311.pyc,, +pygments/lexers/__pycache__/func.cpython-311.pyc,, +pygments/lexers/__pycache__/functional.cpython-311.pyc,, +pygments/lexers/__pycache__/futhark.cpython-311.pyc,, +pygments/lexers/__pycache__/gcodelexer.cpython-311.pyc,, +pygments/lexers/__pycache__/gdscript.cpython-311.pyc,, +pygments/lexers/__pycache__/go.cpython-311.pyc,, +pygments/lexers/__pycache__/grammar_notation.cpython-311.pyc,, +pygments/lexers/__pycache__/graph.cpython-311.pyc,, +pygments/lexers/__pycache__/graphics.cpython-311.pyc,, +pygments/lexers/__pycache__/graphviz.cpython-311.pyc,, +pygments/lexers/__pycache__/gsql.cpython-311.pyc,, +pygments/lexers/__pycache__/haskell.cpython-311.pyc,, +pygments/lexers/__pycache__/haxe.cpython-311.pyc,, +pygments/lexers/__pycache__/hdl.cpython-311.pyc,, +pygments/lexers/__pycache__/hexdump.cpython-311.pyc,, +pygments/lexers/__pycache__/html.cpython-311.pyc,, +pygments/lexers/__pycache__/idl.cpython-311.pyc,, +pygments/lexers/__pycache__/igor.cpython-311.pyc,, +pygments/lexers/__pycache__/inferno.cpython-311.pyc,, +pygments/lexers/__pycache__/installers.cpython-311.pyc,, +pygments/lexers/__pycache__/int_fiction.cpython-311.pyc,, +pygments/lexers/__pycache__/iolang.cpython-311.pyc,, +pygments/lexers/__pycache__/j.cpython-311.pyc,, +pygments/lexers/__pycache__/javascript.cpython-311.pyc,, +pygments/lexers/__pycache__/jmespath.cpython-311.pyc,, +pygments/lexers/__pycache__/jslt.cpython-311.pyc,, +pygments/lexers/__pycache__/jsonnet.cpython-311.pyc,, +pygments/lexers/__pycache__/julia.cpython-311.pyc,, +pygments/lexers/__pycache__/jvm.cpython-311.pyc,, +pygments/lexers/__pycache__/kuin.cpython-311.pyc,, +pygments/lexers/__pycache__/lilypond.cpython-311.pyc,, +pygments/lexers/__pycache__/lisp.cpython-311.pyc,, +pygments/lexers/__pycache__/macaulay2.cpython-311.pyc,, +pygments/lexers/__pycache__/make.cpython-311.pyc,, +pygments/lexers/__pycache__/markup.cpython-311.pyc,, +pygments/lexers/__pycache__/math.cpython-311.pyc,, +pygments/lexers/__pycache__/matlab.cpython-311.pyc,, +pygments/lexers/__pycache__/maxima.cpython-311.pyc,, +pygments/lexers/__pycache__/meson.cpython-311.pyc,, +pygments/lexers/__pycache__/mime.cpython-311.pyc,, +pygments/lexers/__pycache__/minecraft.cpython-311.pyc,, +pygments/lexers/__pycache__/mips.cpython-311.pyc,, +pygments/lexers/__pycache__/ml.cpython-311.pyc,, +pygments/lexers/__pycache__/modeling.cpython-311.pyc,, +pygments/lexers/__pycache__/modula2.cpython-311.pyc,, +pygments/lexers/__pycache__/monte.cpython-311.pyc,, +pygments/lexers/__pycache__/mosel.cpython-311.pyc,, +pygments/lexers/__pycache__/ncl.cpython-311.pyc,, +pygments/lexers/__pycache__/nimrod.cpython-311.pyc,, +pygments/lexers/__pycache__/nit.cpython-311.pyc,, +pygments/lexers/__pycache__/nix.cpython-311.pyc,, +pygments/lexers/__pycache__/oberon.cpython-311.pyc,, +pygments/lexers/__pycache__/objective.cpython-311.pyc,, +pygments/lexers/__pycache__/ooc.cpython-311.pyc,, +pygments/lexers/__pycache__/other.cpython-311.pyc,, +pygments/lexers/__pycache__/parasail.cpython-311.pyc,, +pygments/lexers/__pycache__/parsers.cpython-311.pyc,, +pygments/lexers/__pycache__/pascal.cpython-311.pyc,, +pygments/lexers/__pycache__/pawn.cpython-311.pyc,, +pygments/lexers/__pycache__/perl.cpython-311.pyc,, +pygments/lexers/__pycache__/phix.cpython-311.pyc,, +pygments/lexers/__pycache__/php.cpython-311.pyc,, +pygments/lexers/__pycache__/pointless.cpython-311.pyc,, +pygments/lexers/__pycache__/pony.cpython-311.pyc,, +pygments/lexers/__pycache__/praat.cpython-311.pyc,, +pygments/lexers/__pycache__/procfile.cpython-311.pyc,, +pygments/lexers/__pycache__/prolog.cpython-311.pyc,, +pygments/lexers/__pycache__/promql.cpython-311.pyc,, +pygments/lexers/__pycache__/python.cpython-311.pyc,, +pygments/lexers/__pycache__/q.cpython-311.pyc,, +pygments/lexers/__pycache__/qlik.cpython-311.pyc,, +pygments/lexers/__pycache__/qvt.cpython-311.pyc,, +pygments/lexers/__pycache__/r.cpython-311.pyc,, +pygments/lexers/__pycache__/rdf.cpython-311.pyc,, +pygments/lexers/__pycache__/rebol.cpython-311.pyc,, +pygments/lexers/__pycache__/resource.cpython-311.pyc,, +pygments/lexers/__pycache__/ride.cpython-311.pyc,, +pygments/lexers/__pycache__/rita.cpython-311.pyc,, +pygments/lexers/__pycache__/rnc.cpython-311.pyc,, +pygments/lexers/__pycache__/roboconf.cpython-311.pyc,, +pygments/lexers/__pycache__/robotframework.cpython-311.pyc,, +pygments/lexers/__pycache__/ruby.cpython-311.pyc,, +pygments/lexers/__pycache__/rust.cpython-311.pyc,, +pygments/lexers/__pycache__/sas.cpython-311.pyc,, +pygments/lexers/__pycache__/savi.cpython-311.pyc,, +pygments/lexers/__pycache__/scdoc.cpython-311.pyc,, +pygments/lexers/__pycache__/scripting.cpython-311.pyc,, +pygments/lexers/__pycache__/sgf.cpython-311.pyc,, +pygments/lexers/__pycache__/shell.cpython-311.pyc,, +pygments/lexers/__pycache__/sieve.cpython-311.pyc,, +pygments/lexers/__pycache__/slash.cpython-311.pyc,, +pygments/lexers/__pycache__/smalltalk.cpython-311.pyc,, +pygments/lexers/__pycache__/smithy.cpython-311.pyc,, +pygments/lexers/__pycache__/smv.cpython-311.pyc,, +pygments/lexers/__pycache__/snobol.cpython-311.pyc,, +pygments/lexers/__pycache__/solidity.cpython-311.pyc,, +pygments/lexers/__pycache__/sophia.cpython-311.pyc,, +pygments/lexers/__pycache__/special.cpython-311.pyc,, +pygments/lexers/__pycache__/spice.cpython-311.pyc,, +pygments/lexers/__pycache__/sql.cpython-311.pyc,, +pygments/lexers/__pycache__/srcinfo.cpython-311.pyc,, +pygments/lexers/__pycache__/stata.cpython-311.pyc,, +pygments/lexers/__pycache__/supercollider.cpython-311.pyc,, +pygments/lexers/__pycache__/tal.cpython-311.pyc,, +pygments/lexers/__pycache__/tcl.cpython-311.pyc,, +pygments/lexers/__pycache__/teal.cpython-311.pyc,, +pygments/lexers/__pycache__/templates.cpython-311.pyc,, +pygments/lexers/__pycache__/teraterm.cpython-311.pyc,, +pygments/lexers/__pycache__/testing.cpython-311.pyc,, +pygments/lexers/__pycache__/text.cpython-311.pyc,, +pygments/lexers/__pycache__/textedit.cpython-311.pyc,, +pygments/lexers/__pycache__/textfmts.cpython-311.pyc,, +pygments/lexers/__pycache__/theorem.cpython-311.pyc,, +pygments/lexers/__pycache__/thingsdb.cpython-311.pyc,, +pygments/lexers/__pycache__/tlb.cpython-311.pyc,, +pygments/lexers/__pycache__/tnt.cpython-311.pyc,, +pygments/lexers/__pycache__/trafficscript.cpython-311.pyc,, +pygments/lexers/__pycache__/typoscript.cpython-311.pyc,, +pygments/lexers/__pycache__/ul4.cpython-311.pyc,, +pygments/lexers/__pycache__/unicon.cpython-311.pyc,, +pygments/lexers/__pycache__/urbi.cpython-311.pyc,, +pygments/lexers/__pycache__/usd.cpython-311.pyc,, +pygments/lexers/__pycache__/varnish.cpython-311.pyc,, +pygments/lexers/__pycache__/verification.cpython-311.pyc,, +pygments/lexers/__pycache__/web.cpython-311.pyc,, +pygments/lexers/__pycache__/webassembly.cpython-311.pyc,, +pygments/lexers/__pycache__/webidl.cpython-311.pyc,, +pygments/lexers/__pycache__/webmisc.cpython-311.pyc,, +pygments/lexers/__pycache__/whiley.cpython-311.pyc,, +pygments/lexers/__pycache__/wowtoc.cpython-311.pyc,, +pygments/lexers/__pycache__/wren.cpython-311.pyc,, +pygments/lexers/__pycache__/x10.cpython-311.pyc,, +pygments/lexers/__pycache__/xorg.cpython-311.pyc,, +pygments/lexers/__pycache__/yang.cpython-311.pyc,, +pygments/lexers/__pycache__/zig.cpython-311.pyc,, +pygments/lexers/_ada_builtins.py,sha256=9BqorV9ID_Z5RLBeVbQn9aKg6j7TCk-knBgfDn9D2Ec,1543 +pygments/lexers/_asy_builtins.py,sha256=quZMwEPSom6Fs9yC9kVxUdKuSaNiwDgwzR_XiuEDE2I,27287 +pygments/lexers/_cl_builtins.py,sha256=750nWBIkaITu_ox67sfmcwEx7AuV1_m-GvbUI3im_cs,13994 +pygments/lexers/_cocoa_builtins.py,sha256=6DmhiEpFVh6TQwfLNh1c5MQ3oQAPiPRHcNpIFPSbBII,105182 +pygments/lexers/_csound_builtins.py,sha256=tmvd0MF7849I7LncGtKGIAovDW-oOQNnLU-coNqoTxI,18414 +pygments/lexers/_css_builtins.py,sha256=fghSPBeEm_bOYuzDZH03s9YymLIGzGzKOnCFDMZPeW0,12446 +pygments/lexers/_julia_builtins.py,sha256=hNIvB343zzkLq2SEnlpx7lzOj5fwYZN6Dv6upo3gus4,11883 +pygments/lexers/_lasso_builtins.py,sha256=1FXcRTixcTHwYyUTlRGebtcfsn-m7_UbKSfoAtPMtY4,134510 +pygments/lexers/_lilypond_builtins.py,sha256=YtYhV518MbcENY59RQQDygsO7MBLDMD9A2NQc4lKbfw,106781 +pygments/lexers/_lua_builtins.py,sha256=D4C-HLWIHNfq-zKdI0h6ihL9psB-Vas8HH2r7Pgr0rI,8080 +pygments/lexers/_mapping.py,sha256=lSx8z1rjmGROaWOVpDJ6PcxYkgVVnlsXfErWRwgVXcg,64980 +pygments/lexers/_mql_builtins.py,sha256=g54kSJXLwIsMGQFUbvP-bB03_S0ypjt5P8_wOmIf_3s,24713 +pygments/lexers/_mysql_builtins.py,sha256=stNurNB2CsukMyF9InuKa1zstRdbVRy0SrngsNqBsjk,25806 +pygments/lexers/_openedge_builtins.py,sha256=b9Jaw-Ji90vnrlykHfHL5rre9Lu7A3Yp1_ICTQWraLE,49398 +pygments/lexers/_php_builtins.py,sha256=8gc7AiFhgs9f1RDcMj3v7f0jtVoZZ6snHC9zJCkWgSg,107876 +pygments/lexers/_postgres_builtins.py,sha256=pr7tgitRL_ramhQCIOHIoIV9okWgrnDg2DPtAbb0lMU,12316 +pygments/lexers/_qlik_builtins.py,sha256=Z3NWn8hW3SgjsKseHjIy4n_U-Ogli0JaXSf9AAO0WDc,12595 +pygments/lexers/_scheme_builtins.py,sha256=1jEwdJB0KQhsYX9zBicy7ny0UEb0C9087kofxHW0S2s,32564 +pygments/lexers/_scilab_builtins.py,sha256=oDqV7i2FzHfn_ac6Cope9C6FRO5UC1X9aMaiCju_ZcI,52377 +pygments/lexers/_sourcemod_builtins.py,sha256=Sg7_eD5LsZxmiAn5lP0I65Fg0xffjBW_jOQRxuTL73g,26745 +pygments/lexers/_stan_builtins.py,sha256=CYZl5FHihyFyL3DT_CBePc-6JsUYx5byZuykq6gwWik,13445 +pygments/lexers/_stata_builtins.py,sha256=FlTQXy1mTrU4dtDwHLn_BgjfXgLJHCjT5ppgfSk3-9k,27227 +pygments/lexers/_tsql_builtins.py,sha256=RVFZu9_cQBjE-6tbR32zPTmf9EbAmOfcEcscCjwvv-o,15460 +pygments/lexers/_usd_builtins.py,sha256=zzudqdYKn7_bsD2UimVTGCR8oW4O9ks2k0KVw-j8hkU,1658 +pygments/lexers/_vbscript_builtins.py,sha256=0zOPR5Slupgp8ZqWTXu7FHwMDgJYzT8x44ZbRGNtVpU,4225 +pygments/lexers/_vim_builtins.py,sha256=aKqubOlfPOl0F5ge8HWjdz7WAeV_sWNyEOx94wcQQCc,57066 +pygments/lexers/actionscript.py,sha256=PjwYIKgYMtaBbzMYkLxlxcVNoGrkwVd5O63nOiKwEuE,11676 +pygments/lexers/ada.py,sha256=93CBleaaRa6uL5LcXgNKSXmggNZ74bZK4ndwjuTNlms,5320 +pygments/lexers/agile.py,sha256=95de0QM13cXvQYPrDmy-A0hfNxsqq-nSsJtCpY3DCIY,876 +pygments/lexers/algebra.py,sha256=DOKWvYgOwGlLQVJ3xKd1gym5Vte21MH2C15V6C0r3ZA,9873 +pygments/lexers/ambient.py,sha256=8f7H7fqB5lWYRt7WRiI0S8ECbOwbthnL4qS6cbJc65s,2606 +pygments/lexers/amdgpu.py,sha256=UT5-b6ililJ6BwMpoBDvjBT2vQw0jeObH57c18KzfUg,1603 +pygments/lexers/ampl.py,sha256=VxuaBx5r5ilMLY9HxzGcKr2g4FyWl9xgd_IKOrorIHc,4177 +pygments/lexers/apdlexer.py,sha256=9ZO8SSyfTqQhkDzjbOflO-kALnheHiT5lWb_S1gW-2Q,26654 +pygments/lexers/apl.py,sha256=N_9pYe_LVWI6-0HD31VmBZsF-6or3h4Q2QYp0jxY-vU,3405 +pygments/lexers/archetype.py,sha256=55YRoEaaFiehkC_UMgUYOGxDFIgbXRXfUwMzG49ufCU,11469 +pygments/lexers/arrow.py,sha256=ed8-CWv3vVPw2UDDutxGBTFeEsnX-aI2a-LRtdE0EWg,3565 +pygments/lexers/arturo.py,sha256=xrtbS2mrQPqABQSo2plP0wpSvfxoteC8l-ivs5epitA,11417 +pygments/lexers/asc.py,sha256=1JaNmliU60TOhdRtVWDNEWz2AyQavIz-lj8HiypdwZ4,1621 +pygments/lexers/asm.py,sha256=ZrFEGjh2sM7HXPcFj3GiERjVLkhrgP57SJvhONxnsOo,41243 +pygments/lexers/automation.py,sha256=MVjc_Y-gtjhF9AnMzTt1gy8ZmLmpzqzMRrLyVomL-zs,19815 +pygments/lexers/bare.py,sha256=3aSBX6Y80dSblHZkfKSch8f4rBHC9kxPC5l5t4XWtbY,3021 +pygments/lexers/basic.py,sha256=3zrIqhU10IzBV3_BSNmkvijOhvFRjHoRRWyQZmi-ORQ,27923 +pygments/lexers/bdd.py,sha256=qBXpTsbDA-4oQtFmFEUrNON5N5jDMcDCS1xPMx6V4E8,1652 +pygments/lexers/berry.py,sha256=GW5guQj9TKMhiZWusJtA8ln9uqMOwyPx-RlhULEp20c,3211 +pygments/lexers/bibtex.py,sha256=zF5J72nLbshzVi9C-bnd0ZAp6wRApFG4y-opcqCPo3k,4723 +pygments/lexers/boa.py,sha256=uU2IZb1Q0Eg38dkCvJI0sUCeptv8nyLO8svJp8VXjXY,3915 +pygments/lexers/business.py,sha256=_oNhVsxHMz8o6VSM8QNId-LgbUBpt2jQYYX4Iv73P6s,28112 +pygments/lexers/c_cpp.py,sha256=Dpu955YDV1bFzgG0vUis76w6jiL-1SJDUGzt3aVW7LA,17791 +pygments/lexers/c_like.py,sha256=2L0E2kXTDdgdxeK4MG12wbEDCz1XYrV4PqHfr7mYJeU,29206 +pygments/lexers/capnproto.py,sha256=RJMrCyFnGC-2G8ABritL4n75GYeQSDEahe3S0nJ6k-I,2175 +pygments/lexers/cddl.py,sha256=toE1wOpZM03lDcJabkcdGtnbRtzYnfaDPsls5tcCHCM,5182 +pygments/lexers/chapel.py,sha256=JqJ5oLEdg-tkOSAxxzT11dwRYGWT9asNH_5VTgd0oWk,5014 +pygments/lexers/clean.py,sha256=lVUtk21Oj4qGIbfxmNkifuVvlMvAwv_-EM0p6V3lqUY,6395 +pygments/lexers/comal.py,sha256=6jEqPDl9tB4y3mnUa-op7u5p-4fjhcJ9jZfj8pMU8tU,3156 +pygments/lexers/compiled.py,sha256=vS3om47ex-h22RRuVCuV75235Q3Mw86UJ6qJy7fQZ5Q,1407 +pygments/lexers/configs.py,sha256=p28OShOMl53sG1EQ42p9-3XN98CgseQd6bLhhvjDoR8,41825 +pygments/lexers/console.py,sha256=AWdMlp5AZ42FS3P_VUkz50bDbVjLnHmks49hXTbt_IU,4148 +pygments/lexers/cplint.py,sha256=ImcRkFnOVtvBR67V23GgXBPXZrPeCqDXEAoTnDxTFbI,1390 +pygments/lexers/crystal.py,sha256=G0f2vAs1hb1I7rVCHpcDmlmGbiFyBfBB_vn7XMOLHEY,15756 +pygments/lexers/csound.py,sha256=J3y_XkCo3YkBccdVTm51SqNWsSdQY3PqNKeDAJrdVBw,16994 +pygments/lexers/css.py,sha256=BD55SDuaHybbq2_c4CQUGLpSsF9bwZ4WxCDs1rWlwT8,25314 +pygments/lexers/d.py,sha256=p2CPwH1nlmRkL9ZOuI-ngPsOgLahHY8jsXJa47g0f6M,9875 +pygments/lexers/dalvik.py,sha256=FPrqh0Utlf2rObbwAYlMy2NHdTUWsmoZO19NfxYvkpQ,4607 +pygments/lexers/data.py,sha256=mtCLYefrhV_9DOn4C4ccwSJHHOMXof3iY1LONYBRO5s,26940 +pygments/lexers/devicetree.py,sha256=eta85yUbVbdFz2qGxQpwEWgiP_oVwL8WlLlFibswJWI,4020 +pygments/lexers/diff.py,sha256=N24UjVL9y1CAdotLVWLCsNONm6ze6zJN7Obgz5Twvkg,5164 +pygments/lexers/dotnet.py,sha256=ajyH5dwlp2phHF9HtrqryLlrEgmovHmdUlxQ46YK6tg,29696 +pygments/lexers/dsls.py,sha256=rOq1dt09hd_9bLtwNVZNy0UGzu30OFe3r0SLCCGXnJw,36774 +pygments/lexers/dylan.py,sha256=fOgYUc9mXkf6tRGd82wUrsEOAroXa2gYxzMhJzDGZsU,10380 +pygments/lexers/ecl.py,sha256=udGTA3tPw86hP_v0J5YodBURQkMx2MeMd9ixPcWdOP0,6372 +pygments/lexers/eiffel.py,sha256=S6-eiKz9qjZXk_VJE9xLa8NrEO3V0F7hdbkVtu4Zt_w,2690 +pygments/lexers/elm.py,sha256=Ic3AK6wH19iQtETVAU71cvq2bS_x9F__NIm4KlOsBXg,3152 +pygments/lexers/elpi.py,sha256=wN0qfDD2ezOt6UdaNc66la9I7bmqsW7hfNTx6ZANNc4,6370 +pygments/lexers/email.py,sha256=Ieu8fO3GapDB0lV2dI_ZnNj-us7Ts-CVlbLQrVdYXoc,4742 +pygments/lexers/erlang.py,sha256=JTjs9wXOuydAy7gNM2Uw38_o3lFRHwV7c7Cpye_J3iI,19170 +pygments/lexers/esoteric.py,sha256=72umCmQ9PjY0Clo6dZW7mnY_XnDyDExAcwicyJaiN4U,10396 +pygments/lexers/ezhil.py,sha256=ozD2yYcKYuZy8Y3vkbKEQH9TlEMcgcUsmza5D1ORHcw,3273 +pygments/lexers/factor.py,sha256=wxWEioRLplqZ3ZJpibJCctp_rEuVJVZ2Z9YcWc8wqsA,19531 +pygments/lexers/fantom.py,sha256=lunJPZfkUF9-OAi0tNcy_JR0mudGYtHVM0y0HWsJ8Vo,10197 +pygments/lexers/felix.py,sha256=H5ttiqj4qDENOPl3g0hZUDuplxA_M16JAXstmbXWALI,9646 +pygments/lexers/fift.py,sha256=7R0YDRX1XmPPzFa1Tre1VIFxh89TkACkviAMK-voCyM,1621 +pygments/lexers/floscript.py,sha256=TKZZDq6wihSZwkjk3DrRZ2TXq0bOyEgqHycaReyHseY,2668 +pygments/lexers/forth.py,sha256=UFjgCW4XsvnQYO-DXKEISPp2RvbBB2JVSd6sBrM_Sbo,7194 +pygments/lexers/fortran.py,sha256=avYkb_TXUX8QWt7rrZGyFq3Z_Oug67_x8tnh88G0HhU,10336 +pygments/lexers/foxpro.py,sha256=s1qwixRW9cCywqsVnZJ0ik_1B17lxPjEF4T0k6Xz7xo,26212 +pygments/lexers/freefem.py,sha256=5QwQX6ciIm2dW63VlAF9uD09Elsgy6drU0CCvdsv-OU,26914 +pygments/lexers/func.py,sha256=uFfm5KAI09ahrDYb6HOnIaUxjdFQ4-OhWFEGCdGxwuY,3622 +pygments/lexers/functional.py,sha256=XPPWLCDQt1LYwLXZxpTfAatB0BkjiubUiVOIHQvCjQ0,674 +pygments/lexers/futhark.py,sha256=5RlltUlCtYzAB9iAWvobVsEV4S6X_qFypT0ZxiB3tXY,3732 +pygments/lexers/gcodelexer.py,sha256=8kvQasYLSeFa7yjipOx8e0zgGGFQOJgFWLz4PtXpLt4,826 +pygments/lexers/gdscript.py,sha256=HpLvRVPYswGG2aSDSUq-JtOvDCtdyj6wkDxK-wIB9SU,7543 +pygments/lexers/go.py,sha256=m9ZZlulHhx_5D-tyrVrqOng9maHrPLYBnxxVg61PvhI,3761 +pygments/lexers/grammar_notation.py,sha256=LjsPCa5mtiVfagjiLuf9bEHZQTFW6nsbDeQYLC0kCa8,7980 +pygments/lexers/graph.py,sha256=Cm_dBin3iW31jXPy1yRq9dU0-3Zksyy-IKVlKLJF8Nc,3861 +pygments/lexers/graphics.py,sha256=m-uomUeUeQIxZAIcza4OrZ5y7UuDK9akXWVyvyxLzcI,38931 +pygments/lexers/graphviz.py,sha256=ZujNvllszspMI0BD6BaZgxO1UKZHTCM8WmQ-OmKube4,1935 +pygments/lexers/gsql.py,sha256=tbOWBBzp17gicHj5CXzY1SP8d218ubqhl56bga6bFUE,3991 +pygments/lexers/haskell.py,sha256=bsWwYungJg1QEZALjPJP3rlY2pCRSmbBYpDoNGC6FTA,32898 +pygments/lexers/haxe.py,sha256=aBXcC389HlgCTjP0cfzFlC9t0lHrXwW4uerZ4HQy3NM,30976 +pygments/lexers/hdl.py,sha256=xlunVpnsP05fMYaL1XDOFJ8lK9I_TtE48G4CI3p0uQg,22520 +pygments/lexers/hexdump.py,sha256=kuh1bgrn_zrulUZyc8vv1-7ZBppKVOZN8BaK7vTqjwU,3603 +pygments/lexers/html.py,sha256=B9LiFqh9VB9CL5BWhdi3_VcFE6tCs3__owyh6cqJP_0,19879 +pygments/lexers/idl.py,sha256=1vtdhCVNzDel2Z-xAXKfteKRPYzjqOzmVqWV2xyunVE,15450 +pygments/lexers/igor.py,sha256=cEGrCOODpPGVRJ5mQlfyOcvi0qUHwWro6X1Mm1G_1P4,30631 +pygments/lexers/inferno.py,sha256=jd1m9w-NuLYi_X-xTx_lBoXloTZ2UDVoPUKyndBgaHg,3136 +pygments/lexers/installers.py,sha256=HBZL4pkolvNL78X9hF5WFoDWA_LY9Ev0oOoBAKAVNcw,13178 +pygments/lexers/int_fiction.py,sha256=qeYrFla6FqxJedcw0IXzOciKbJP1kV2QmDpEJ7rpkPU,57119 +pygments/lexers/iolang.py,sha256=5wdWeVVgybETc94uPmLGUcsedr5viXQ0bnpNTVua0Bo,1906 +pygments/lexers/j.py,sha256=Yut2H1YqXaIobVCPd0vHPGVrqfVmDpsjAoHqvnVWPpo,4854 +pygments/lexers/javascript.py,sha256=c4Shx5iwskXf2crPPE8xwi_yEwQE-TWJfbtF_NovFlw,62859 +pygments/lexers/jmespath.py,sha256=OtJKUxa_heEFHEOcFkxfFW4iKkp9FQX3thwxJV7yeK4,2059 +pygments/lexers/jslt.py,sha256=ihdYKXtQGA6ODksKVTuujbv9pZNI_VkejgX2IRQEuq4,3701 +pygments/lexers/jsonnet.py,sha256=a2RSW6jmVHJ967m4g_ScEXDEvfVnqGQa-3feIqntgS8,5635 +pygments/lexers/julia.py,sha256=_BUPw1GQL-HlGcoXsldV6IIMfliqSqhaExxwJiDHMWY,11646 +pygments/lexers/jvm.py,sha256=GWd3TIJO4vxbkagY6P2PnlDsqBPdyssXi89XdSyiX_8,72929 +pygments/lexers/kuin.py,sha256=S-BE0wZXAixkA4HkgXEs7CX9AWS89msaeqNxacQ7Ij4,11406 +pygments/lexers/lilypond.py,sha256=C8Kaipx58ESHp25shlS0x3m1AVO6Nlmy78b8CsRjeg8,9753 +pygments/lexers/lisp.py,sha256=tvWRtYFrCuC7ZI4HEbK9hhnj_DlRARjqs1_tYBntfN4,144039 +pygments/lexers/macaulay2.py,sha256=NnhQW3v-jRTSgupYv-pCKpixQBxR8vMfPvsQAErGnNQ,31914 +pygments/lexers/make.py,sha256=XhIe6D0hx1gNm-K8lr7fFhu6Shv-kEZZubzK6wZw1VQ,7550 +pygments/lexers/markup.py,sha256=Oj38Jny-qi6fpMkx6Ww5cYAtJeK-jzKU_U2dmUYhbe0,26797 +pygments/lexers/math.py,sha256=QG4321ZoGwGMptGIUZbVkVP6nrYiKLKMocWEiYemdUI,676 +pygments/lexers/matlab.py,sha256=BmYtM5CkDfuIWVgFfLLTuzZEu2NN9P6h9Nj4zL-gvFM,132852 +pygments/lexers/maxima.py,sha256=sp3epFxSmL9aMtavhkPxHObjHCo9CUayrE5GCwPBOVc,2716 +pygments/lexers/meson.py,sha256=KGfRzEHGeurl6nmyCyvLPBp6sKgH9-dmGe89elmGzTA,4337 +pygments/lexers/mime.py,sha256=BuvfQO-eDIXWRJePQnjd_U3WQ__ViSoNHPG_nOsYi7w,7538 +pygments/lexers/minecraft.py,sha256=iw2qLM8gFrXh0XkmrmygqO1JV4UrYgBllgUMb4BnQao,13846 +pygments/lexers/mips.py,sha256=Byt2XnCaAkkLsFdutzj_JjgoHPOUSnb0IonjFDN6XbU,4604 +pygments/lexers/ml.py,sha256=X0VM9cbykg3QpAY6GyAcIFLt5BftnTMUS4VzgZWKj1k,35324 +pygments/lexers/modeling.py,sha256=-ZV9DRyDQcCcJJG0yvXD_PS_KoRd-u9zfJYnHX0d_lk,13524 +pygments/lexers/modula2.py,sha256=eeuDGPFGFTdnGRXgyvI0HGpO8Iakadc2ZOOXbeRT9Yc,53073 +pygments/lexers/monte.py,sha256=4l8-KCATmILYp2q_5Sa0gUfiwj37y_ttw99ILapY1e4,6290 +pygments/lexers/mosel.py,sha256=StaXhfFdQTKB0mSCStxmA58VSD88sJnkjwNxHmk5qmI,9187 +pygments/lexers/ncl.py,sha256=XJaGVsSHlAavTRuP8dUB9Fn7duwmTsiNRKKVL9pmhlQ,63962 +pygments/lexers/nimrod.py,sha256=kwqLEglbD7KvOcYPa4lGM2sRKCMmUXQN93Zc1WDtPls,6416 +pygments/lexers/nit.py,sha256=6hXbZnP3JabZ_VW0XrZDwruqvxavvwYwwsWqyCog0Cs,2726 +pygments/lexers/nix.py,sha256=t9hU1L9H703AAaYAJjbA24C2LRXNxOl0_TZKw3ZpcVw,4015 +pygments/lexers/oberon.py,sha256=ilMvauo5aCL_qmp0D4dOF_1QHuNNBbsJzPUwo_R1Zks,4169 +pygments/lexers/objective.py,sha256=a15atNurUMQVN-16fPVnVGk9BRwhXUOb8N_ghIwb_PY,22961 +pygments/lexers/ooc.py,sha256=0xLNAdkdaBA7ntHA5i3MKjOs0mh-9c77VQu3ELeYU4E,2982 +pygments/lexers/other.py,sha256=9-OZdK9kQ3rHtEBOnyZ6YYn0FL8evjU1X2YiDMdr72I,1744 +pygments/lexers/parasail.py,sha256=et4cbhlJ2tFgBpYPC6x_dUNHoHUVoQZEVL8cjSTuPHQ,2720 +pygments/lexers/parsers.py,sha256=lP8WV60UmS910YydAGg3utDFH6XmXMxV4YY4Ydvf8Tw,25904 +pygments/lexers/pascal.py,sha256=Gdxz8judFa9SNdaQddXbePCd6Efs-y4PhhE9gbhQuQY,30880 +pygments/lexers/pawn.py,sha256=lJXk4GDxSAmf17l9YTuogxJa2QLZP_1ClvTH1Ijx_wQ,8146 +pygments/lexers/perl.py,sha256=daB5xbHa2KNy8iPjCsQBVc0ITO3j85Tj7T5JovHmrtM,39170 +pygments/lexers/phix.py,sha256=r607k8eFtYRWRcd3jF8SOYVN76PFs7wASFZCUdshOhQ,23252 +pygments/lexers/php.py,sha256=8kg3Ag31akIHkL0jyTS0zcea0tolK6V6HhL1jPTL3tE,12505 +pygments/lexers/pointless.py,sha256=FoxCSiJKEaYKKvbZ9kA6JqUZ-v3bDcadr9KaMhoNy-U,1975 +pygments/lexers/pony.py,sha256=spaEuSzStQqCwAsMOyj8PCzkZ7yt2kzQVGUyLF89CuI,3244 +pygments/lexers/praat.py,sha256=Lni_aar1kVyTX4mjmpPJphNKCZv4Y5NiwNmHkozo-_4,12677 +pygments/lexers/procfile.py,sha256=LZO6OtDRwqaC4H0qI7ixRxOKrugOEdj-Ea6rarRfpoI,1156 +pygments/lexers/prolog.py,sha256=xtXLU_SDHAWZ47PcpW_LdxdJTQQbIt0i1YZRWiAuQ9k,12351 +pygments/lexers/promql.py,sha256=DYp4nDL_XcOqv98aProsERw1WoE_IyZdXhIAKk2oCYk,4715 +pygments/lexers/python.py,sha256=YgccSRDRVwTiSipcoGTI3JukSFokKb6psKNRSgDICbQ,53524 +pygments/lexers/q.py,sha256=GXhzLT6qbsc0LzuERBD2Gj_evoXsGZJNBhVrjOs4T0Q,6932 +pygments/lexers/qlik.py,sha256=vLcdNqAz5fGmc0sRzY4USgip-IfXUI_pq151LepCR48,3665 +pygments/lexers/qvt.py,sha256=6n_eR2ewHMH4FbXHQYMVlKxTwzlpA47vZHi6lmC774M,6072 +pygments/lexers/r.py,sha256=rVeYGy-2QPLUFYxPKsOdmKJHto226A-9zoQawR7ATiY,6185 +pygments/lexers/rdf.py,sha256=xTL6e309sP14fufIEenM2xgdYVH1MSAwUPFIPmQOCfw,15790 +pygments/lexers/rebol.py,sha256=JQZ51eBw8l5oB5-mJU8a5MghibS7on0HX3opXVMQlNI,18600 +pygments/lexers/resource.py,sha256=c5tALpcF4NXW0aaVDqMPW52mA3iA1L82QD6JUqat9sg,2902 +pygments/lexers/ride.py,sha256=05W-azkVXs-vDcUj7rPjHa0dgYADNsVrFEw-iW4Pdl8,5056 +pygments/lexers/rita.py,sha256=7e4bKm6GJ0xJVxOrxRXyWdfBvnYcWcvk4Ldnv-e-WvE,1128 +pygments/lexers/rnc.py,sha256=myQvBz1qIMlXCsk89e5bUSt9xzhWwB8xIOQ0loIZjmU,1973 +pygments/lexers/roboconf.py,sha256=7tqDyH39xHWW4BwFW3Om9EGIEKFE_Ni536-jiyU86Hw,1962 +pygments/lexers/robotframework.py,sha256=yGrjLaHIasu61f_t3YNcjU783VGoU2tS2AGrHjznsq0,18449 +pygments/lexers/ruby.py,sha256=Vx_knTQ2iQoBAlR_desRWdMQvT0De30AuHcSTnaZvvI,22775 +pygments/lexers/rust.py,sha256=vO6yuMivmnS-gMFPsjkZF3PMinR8m_kZzhiT_7zMCNs,8216 +pygments/lexers/sas.py,sha256=Qz-lJKNVIRtOnvLcApjMz3ZZnwny55EJft7nK1MRd8g,9400 +pygments/lexers/savi.py,sha256=i5i3tzl96ZElfzYlCaT8kROG7BYCOTigocHAnzDyZ28,4645 +pygments/lexers/scdoc.py,sha256=NpJ2F0Orr_ayE3OoNrF0R8o_4aCquBtqAVBiqZNvCvg,2239 +pygments/lexers/scripting.py,sha256=b4tK7psshicd8sEVPP-REXHE8Qbi_F12s_u317rAig0,70014 +pygments/lexers/sgf.py,sha256=mQxTXsHkgL3hvY1zdwppghJXVN4icKGGWqMK8_dOKzw,1986 +pygments/lexers/shell.py,sha256=MolJW_OgLwseg-9F3k3ckps0ao6T6HwoDM9lF2AlDnc,36344 +pygments/lexers/sieve.py,sha256=gYZmeIRE-RcEeJt8hELzhVKHdOKwkPaB3uFqlc5Le6M,2441 +pygments/lexers/slash.py,sha256=qfYTQ3WWkn8Pe1NmWAvehyWwJbT1juA5WUS4R3VCjmQ,8482 +pygments/lexers/smalltalk.py,sha256=JYnNJBqtimRyR9ykEsQkFplJiCLGoXW7VvA4BKXqCiQ,7206 +pygments/lexers/smithy.py,sha256=0QhvgEQ101fF1yBSBdqJFBxSGUZFy43phlMNfCTFBRs,2660 +pygments/lexers/smv.py,sha256=bXloPLxcyDntLqvKqh_Po5qe0cqYe_DOpiLRFKEV5oo,2773 +pygments/lexers/snobol.py,sha256=YBwLeRfXaT8dZTMqnzeuAsBeu7Pix0vfUlpTSLye4XY,2732 +pygments/lexers/solidity.py,sha256=9tkwj6imMH9GMG5g7iyC2rCuTMEUURW7bRxt77EgWh8,3127 +pygments/lexers/sophia.py,sha256=jHWK3Cnznj7rqbSpEIZS_GE5a_TNfNv0OC1Afy8YtFE,3330 +pygments/lexers/special.py,sha256=IwOkIlL8a555xHR7hMLWiuYVWTb-Gix19BoZ0d24LaQ,3414 +pygments/lexers/spice.py,sha256=g5f8iqkKG6UV_8BcclZWVYH-btNk6nvZW7aSyiq2UF8,2694 +pygments/lexers/sql.py,sha256=dGo7jbOcqbLSBPElnH45AkeGKhjZAvGf67euotbbGtw,34151 +pygments/lexers/srcinfo.py,sha256=7csqeQayNYiNT6HYRwfH7xVM5ZzYlrtHGDBXpojzN3A,1693 +pygments/lexers/stata.py,sha256=Bprs-_8e6BhgNuO4xl5toeL2-cPnr7TobR0X-l8svhU,6416 +pygments/lexers/supercollider.py,sha256=5TxrBNAERHERxWLeEcQoc65a0eQdk3UmMRwhpQP3CwE,3698 +pygments/lexers/tal.py,sha256=-VVpJ8R0YenDzPZjW47x8zuRkc3wGuXiynHD5_U5TaQ,2639 +pygments/lexers/tcl.py,sha256=G52CxD_OkWm4WtRc5mr51RMyngU0UoMT4DssseRdi1c,5513 +pygments/lexers/teal.py,sha256=m4yWBEetrTC-brmLt5fv6t1GEnFF8Wlb1RFYkRK5fA8,3523 +pygments/lexers/templates.py,sha256=R6O_ATWGVvMLiQJW45Z5ZHqN1mkyG-bM2ofC_y1NiOk,72695 +pygments/lexers/teraterm.py,sha256=dipxLoZEBB9L4-QPH3hcbgjr4puRO9a5_jzCz8tcoPI,9719 +pygments/lexers/testing.py,sha256=Oz6NAbYkthNO8pVbqXoFCCyg7EHZ-KziTXCGMJ8VMDI,10767 +pygments/lexers/text.py,sha256=T598C2HVyjqr3SLJOyrSaOAyvWMdRkh0KJVto4Reey4,1029 +pygments/lexers/textedit.py,sha256=9BqDTqb_yyW4kLXG8o9JJZraHuRY3G3Rjp7-m2cnGFI,7609 +pygments/lexers/textfmts.py,sha256=tB12c6K46HNlPnMkYJsOkHq1WgOU5Q9iJBRk43jR2RY,15192 +pygments/lexers/theorem.py,sha256=nudVRoSM49oG7lIqr8vtQuijaUL291YA0TlMljsZVaY,20157 +pygments/lexers/thingsdb.py,sha256=OoxmXDAFqdjDoFCTMpRFaTkcFsb2ysNqf6xu6RnnufY,4228 +pygments/lexers/tlb.py,sha256=WISgfhh5GezvA1GbEQR8PPhZTuDy4m8F9r0tmZrGR4w,1377 +pygments/lexers/tnt.py,sha256=yelC-PhBToBfpQIZiDbzKHlQasNCETMvrczpRGg2oCE,10457 +pygments/lexers/trafficscript.py,sha256=w5Mdqgijll6h8P8VfFEHtxrwgDoQVpjFQ_M9026QjBw,1474 +pygments/lexers/typoscript.py,sha256=M3CIQyFyoWtWnL4qtCgcYN6W4_Qt_oOX2tOt564q2rc,8207 +pygments/lexers/ul4.py,sha256=kAtCB6j6zZ6dSttxGyabWRBoik-Vy4TU5kIT-7zEYqg,8956 +pygments/lexers/unicon.py,sha256=UuyTkgoXTq3Kl-J5FW7rka1yrwBCvO_AFhWomNDxbR8,18512 +pygments/lexers/urbi.py,sha256=kz7TX7UPHOr_liD_FQRpBn3268KD4InI9SmPIp7BlhY,6037 +pygments/lexers/usd.py,sha256=N9MUFsDqJZzw8ZdDWYaMrX_XIKxk9T2M0402pJZpYTE,3513 +pygments/lexers/varnish.py,sha256=eHQM2dbFYi-TI_413YOFB33YhldIjrNa5uefHOPFojs,7273 +pygments/lexers/verification.py,sha256=A8BIr2VUYLJCGjKgfH5-aAxqWoph_iTzg5mbpXihMpY,3885 +pygments/lexers/web.py,sha256=hgpw-BBgmZfROb8xFI5ZNBLF12MP9fRmlI2DQ79TRYM,894 +pygments/lexers/webassembly.py,sha256=4s2yUHCgioi2KhbRiB3MCphhGxh0IJatCPKmRLp3cVY,5699 +pygments/lexers/webidl.py,sha256=-XzNoWNfCbFk7Os7y8cvjR0U7iek9cdDZrcxkxxTzCU,10517 +pygments/lexers/webmisc.py,sha256=kPeKj6PdhqEWUs5jpfRmKJQwDPDx2PbIFXwOuARx6dg,40549 +pygments/lexers/whiley.py,sha256=xbY5vJKypVrRNnfgqDAVhzSbtyFEmOnnG9YrXDnhEPc,4018 +pygments/lexers/wowtoc.py,sha256=QZD9uT1Npm1gmhsx-Lg7HuLZUYmsf_DdPlQZvqJam18,4021 +pygments/lexers/wren.py,sha256=oQaw4I2-VTnSLlQ5SDDxMT62HVZrekJbsPUbvhpkxJM,3239 +pygments/lexers/x10.py,sha256=slBniBkTAoeARUp-8Pq0Uf_P7Xw0_ZdmKPjYaazZ92Q,1920 +pygments/lexers/xorg.py,sha256=HyrsUM9WU0ktPnMn-7DYgyrPyb2xQHpClPl5O5XWLkk,902 +pygments/lexers/yang.py,sha256=360AyTrEZNSFFN4xACs4xsAls4XqgjqOvVGOX7uJJ1c,4500 +pygments/lexers/zig.py,sha256=5pzDDrS-b5QRa9LC9bxO19e7JR6nCan7JAJa1FJdbhM,3953 +pygments/modeline.py,sha256=gIbMSYrjSWPk0oATz7W9vMBYkUyTK2OcdVyKjioDRvA,986 +pygments/plugin.py,sha256=0JlFSNPQ2SXzB4IEQDnHeKlmjhsT-0ONajPUHCbShb4,2579 +pygments/regexopt.py,sha256=c6xcXGpGgvCET_3VWawJJqAnOp0QttFpQEdOPNY2Py0,3072 +pygments/scanner.py,sha256=F2T2G6cpkj-yZtzGQr-sOBw5w5-96UrJWveZN6va2aM,3092 +pygments/sphinxext.py,sha256=ZZtT7K-3_CV2894C3m-K1rZcsZPo94pcZuZfvLQy2ns,6816 +pygments/style.py,sha256=DkiPE6Gj8YmBigEnlln1cI9QVEOHbi8IY-RyVSwQbYE,6245 +pygments/styles/__init__.py,sha256=RvZV2e6sBYYz5DJ-Z3bNeMm1UEBAJBQbxkIf1-w1BAI,3395 +pygments/styles/__pycache__/__init__.cpython-311.pyc,, +pygments/styles/__pycache__/abap.cpython-311.pyc,, +pygments/styles/__pycache__/algol.cpython-311.pyc,, +pygments/styles/__pycache__/algol_nu.cpython-311.pyc,, +pygments/styles/__pycache__/arduino.cpython-311.pyc,, +pygments/styles/__pycache__/autumn.cpython-311.pyc,, +pygments/styles/__pycache__/borland.cpython-311.pyc,, +pygments/styles/__pycache__/bw.cpython-311.pyc,, +pygments/styles/__pycache__/colorful.cpython-311.pyc,, +pygments/styles/__pycache__/default.cpython-311.pyc,, +pygments/styles/__pycache__/dracula.cpython-311.pyc,, +pygments/styles/__pycache__/emacs.cpython-311.pyc,, +pygments/styles/__pycache__/friendly.cpython-311.pyc,, +pygments/styles/__pycache__/friendly_grayscale.cpython-311.pyc,, +pygments/styles/__pycache__/fruity.cpython-311.pyc,, +pygments/styles/__pycache__/gh_dark.cpython-311.pyc,, +pygments/styles/__pycache__/gruvbox.cpython-311.pyc,, +pygments/styles/__pycache__/igor.cpython-311.pyc,, +pygments/styles/__pycache__/inkpot.cpython-311.pyc,, +pygments/styles/__pycache__/lilypond.cpython-311.pyc,, +pygments/styles/__pycache__/lovelace.cpython-311.pyc,, +pygments/styles/__pycache__/manni.cpython-311.pyc,, +pygments/styles/__pycache__/material.cpython-311.pyc,, +pygments/styles/__pycache__/monokai.cpython-311.pyc,, +pygments/styles/__pycache__/murphy.cpython-311.pyc,, +pygments/styles/__pycache__/native.cpython-311.pyc,, +pygments/styles/__pycache__/nord.cpython-311.pyc,, +pygments/styles/__pycache__/onedark.cpython-311.pyc,, +pygments/styles/__pycache__/paraiso_dark.cpython-311.pyc,, +pygments/styles/__pycache__/paraiso_light.cpython-311.pyc,, +pygments/styles/__pycache__/pastie.cpython-311.pyc,, +pygments/styles/__pycache__/perldoc.cpython-311.pyc,, +pygments/styles/__pycache__/rainbow_dash.cpython-311.pyc,, +pygments/styles/__pycache__/rrt.cpython-311.pyc,, +pygments/styles/__pycache__/sas.cpython-311.pyc,, +pygments/styles/__pycache__/solarized.cpython-311.pyc,, +pygments/styles/__pycache__/staroffice.cpython-311.pyc,, +pygments/styles/__pycache__/stata_dark.cpython-311.pyc,, +pygments/styles/__pycache__/stata_light.cpython-311.pyc,, +pygments/styles/__pycache__/tango.cpython-311.pyc,, +pygments/styles/__pycache__/trac.cpython-311.pyc,, +pygments/styles/__pycache__/vim.cpython-311.pyc,, +pygments/styles/__pycache__/vs.cpython-311.pyc,, +pygments/styles/__pycache__/xcode.cpython-311.pyc,, +pygments/styles/__pycache__/zenburn.cpython-311.pyc,, +pygments/styles/abap.py,sha256=7QsS8lOQh_M3dr76KQ3-RaYIBfEWeNosEgMsBvu_2QU,705 +pygments/styles/algol.py,sha256=kz5ILXpiYKJF11_4xOPxpG91Wm8zGt6RBqiL3sH3oRQ,2216 +pygments/styles/algol_nu.py,sha256=_-Pl6edSeuJDt5XLQLwXPhy85nYhnjKiMTSFtMZ-2O8,2231 +pygments/styles/arduino.py,sha256=xydJymOW4pLjX_ZUuyRpKNL0Cpbi-dGNT3-VQUW9wu4,4443 +pygments/styles/autumn.py,sha256=Se4tPjreFfhX3XskKNGUEdvfobNL-itd5qO33-zsrZg,2096 +pygments/styles/borland.py,sha256=6hivq7_FxLDJoXXy-2HBsojFca8b7ulblJCYt8LbTUs,1514 +pygments/styles/bw.py,sha256=QcIDiosv20OYBe0D5mu1KZx6ll6L7w06__IUsivjvH0,1308 +pygments/styles/colorful.py,sha256=K2uHGWEd3fO5bAhHbp_5Z3FBTioHlZFSbPsvvvCKjbc,2730 +pygments/styles/default.py,sha256=Y2OCPkQo96Nhut9JJ5gKiSUdw34jZyF0mztYKt8sdHk,2488 +pygments/styles/dracula.py,sha256=n5y1aKUaRVeFAyQUhzCYfAYAYb_mB0qGNNHbOoEgMvI,3314 +pygments/styles/emacs.py,sha256=hPlFDlCx5uOxQS2-lfJ_iWp7t6NzKzV8pG-nRL4QaDY,2439 +pygments/styles/friendly.py,sha256=lk-aZIv6oU6BcP1nuzk7YnCyfT8N09zvGPLTijZaptU,2502 +pygments/styles/friendly_grayscale.py,sha256=jU9YccoY0WEZbLnyQ6Ms_GD9UU9nIxyYJ01cOK3vK74,2707 +pygments/styles/fruity.py,sha256=YSLME-2ldk806w2q8KwPEwJG_lUwbDjgUOON6GWqPOQ,1274 +pygments/styles/gh_dark.py,sha256=1uTXQQLge1c0sSlYdWpO8E-oHZpA3cZtp8lg697V_5c,3481 +pygments/styles/gruvbox.py,sha256=wXEvbg9oUVIberD9NK09HqxXZF9hJZU_wWID6N91auM,3230 +pygments/styles/igor.py,sha256=HShSIPfYFBjb7-_OKS1ybrJ7LGFAYf-mIbbFVORONvg,692 +pygments/styles/inkpot.py,sha256=PuYymWU-qaLZlJUvm9IV8apx0cud8JyTUMMc7yKVZes,2302 +pygments/styles/lilypond.py,sha256=fvNi0XzR4Y_nikorTzBBiACkrgbb6XKWrElT4x6fwhQ,2016 +pygments/styles/lovelace.py,sha256=O1B_qFnjWqMvjPkmrdJjP6L14C1nodQst_TkbcsQPzQ,3117 +pygments/styles/manni.py,sha256=0wijm_0LctgoBpeJFR_spuriSafmxEm_r8AQLGwiMJc,2350 +pygments/styles/material.py,sha256=DenZF_lrw1qy9vYyb78VJMRQd9WvjgZvkuquG7Pjcno,4083 +pygments/styles/monokai.py,sha256=WXuj6DE33quDFva-6iG2h0UPXJd9AttKRsQCKzsFFvA,5063 +pygments/styles/murphy.py,sha256=XBQ8cnbj8-UJ1BPSrVZ71RJLg4cnG_27G3JMEJhH3q8,2703 +pygments/styles/native.py,sha256=4dM251hEDZSHpn3TDmKESTw4ipkKhJbjUEGoioiqsdU,1948 +pygments/styles/nord.py,sha256=XgccTxZtXlQg04u4ZTrsFBviCoqqCEp2REiAj5flQbM,5244 +pygments/styles/onedark.py,sha256=to_IrmgcfyPsjRiikCDi3ApkDXMrtmv1C_zA7DmF4Ok,1664 +pygments/styles/paraiso_dark.py,sha256=wqFMXgju9IpuODurCHwdRpyE0vzZCh3sjvEfIy_elvU,5526 +pygments/styles/paraiso_light.py,sha256=1EbYrcy_zP081a0iad2x2jE7789F28T8TSVD_A5eb0Y,5530 +pygments/styles/pastie.py,sha256=hT4VrEWQiTK26ewereBf124SQ6nvBRXQU1QQa8DGb-I,2425 +pygments/styles/perldoc.py,sha256=3hw_EUgbnm_JhvMhDZq7B54oRhIM1N8hsovlZIqUF_k,2128 +pygments/styles/rainbow_dash.py,sha256=tRcpKBqufUNlbtbxWuZhlXEMg98kUcWlIfIvBvgt098,2432 +pygments/styles/rrt.py,sha256=MgLkoeLMjoyqEd52hZSutH54LzPQG7fPAcyAG8iDo9I,874 +pygments/styles/sas.py,sha256=NhE8FA1mbbleQlJnC6m7sQVBKrOE1vSqbPc3v7c6Lno,1393 +pygments/styles/solarized.py,sha256=fgBJq12PRG1UspBft3r0fMdZxFjI9Etm8XfiTNdiZRg,4078 +pygments/styles/staroffice.py,sha256=YLgK5QjuZndITzDTlzbQs5tztp-p2-5MbUJ2XY7aX00,770 +pygments/styles/stata_dark.py,sha256=tWF-PomZTIodRJaZVbv1gZK_PAkFr86qQwzAq0_xC4M,1198 +pygments/styles/stata_light.py,sha256=VBSqRQQJJWOHOwpInZ0ClSRETRCcTRARJ3-PWzZ92D0,1227 +pygments/styles/tango.py,sha256=rAVsYXJtnDwf1Zpy8JCC5zasEiUx-XNemZGe8kqBGC8,7039 +pygments/styles/trac.py,sha256=Ota0CWC6xlZmXWoDWlhQAe71ruww_ZURrOAXZSI6Xfk,1885 +pygments/styles/vim.py,sha256=Z3D9OFwGm8LhgIgzlwL4nBcke8t2YtL1R93k3mx_RJk,1922 +pygments/styles/vs.py,sha256=rZHvA7M5hjZ1ZHB1zMP06Ktn6MU55_8z6AyHCTFXxxM,1026 +pygments/styles/xcode.py,sha256=SyX3Rh4_g5F_Vz9ntWDhSFuBerPp_tj4j3VrMSG2zcE,1453 +pygments/styles/zenburn.py,sha256=1EF8-K8mbl4Ooe0idwQo0rZOVXSaZK5m3HJrVT7HkZk,2148 +pygments/token.py,sha256=vA2yNHGJBHfq4jNQSah7C9DmIOp34MmYHPA8P-cYAHI,6184 +pygments/unistring.py,sha256=gP3gK-6C4oAFjjo9HvoahsqzuV4Qz0jl0E0OxfDerHI,63187 +pygments/util.py,sha256=KgwpWWC3By5AiNwxGTI7oI9aXupH2TyZWukafBJe0Mg,9110 diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/WHEEL new file mode 100644 index 0000000..57e3d84 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/entry_points.txt b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/entry_points.txt new file mode 100644 index 0000000..15498e3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +pygmentize = pygments.cmdline:main diff --git a/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/top_level.txt new file mode 100644 index 0000000..a9f49e0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/Pygments-2.14.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pygments diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/LICENSE new file mode 100644 index 0000000..a343a62 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/LICENSE @@ -0,0 +1,25 @@ +Copyright 2012-2018 Dmitry Shachnev +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/METADATA b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/METADATA new file mode 100644 index 0000000..f0323fd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/METADATA @@ -0,0 +1,114 @@ +Metadata-Version: 2.1 +Name: SecretStorage +Version: 3.3.3 +Summary: Python bindings to FreeDesktop.org Secret Service API +Home-page: https://github.com/mitya57/secretstorage +Author: Dmitry Shachnev +Author-email: mitya57@gmail.com +License: BSD 3-Clause License +Platform: Linux +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Security +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: cryptography (>=2.0) +Requires-Dist: jeepney (>=0.6) + +.. image:: https://github.com/mitya57/secretstorage/workflows/tests/badge.svg + :target: https://github.com/mitya57/secretstorage/actions + :alt: GitHub Actions status +.. image:: https://codecov.io/gh/mitya57/secretstorage/branch/master/graph/badge.svg + :target: https://codecov.io/gh/mitya57/secretstorage + :alt: Coverage status +.. image:: https://readthedocs.org/projects/secretstorage/badge/?version=latest + :target: https://secretstorage.readthedocs.io/en/latest/ + :alt: ReadTheDocs status + +Module description +================== + +This module provides a way for securely storing passwords and other secrets. + +It uses D-Bus `Secret Service`_ API that is supported by GNOME Keyring, +KWallet (since version 5.97) and KeePassXC. + +The main classes provided are ``secretstorage.Item``, representing a secret +item (that has a *label*, a *secret* and some *attributes*) and +``secretstorage.Collection``, a place items are stored in. + +SecretStorage supports most of the functions provided by Secret Service, +including creating and deleting items and collections, editing items, +locking and unlocking collections (asynchronous unlocking is also supported). + +The documentation can be found on `secretstorage.readthedocs.io`_. + +.. _`Secret Service`: https://specifications.freedesktop.org/secret-service/ +.. _`secretstorage.readthedocs.io`: https://secretstorage.readthedocs.io/en/latest/ + +Building the module +=================== + +.. note:: + SecretStorage 3.x supports Python 3.6 and newer versions. + If you have an older version of Python, install SecretStorage 2.x:: + + pip install "SecretStorage < 3" + +SecretStorage requires these packages to work: + +* Jeepney_ +* `python-cryptography`_ + +To build SecretStorage, use this command:: + + python3 setup.py build + +If you have Sphinx_ installed, you can also build the documentation:: + + python3 setup.py build_sphinx + +.. _Jeepney: https://pypi.org/project/jeepney/ +.. _`python-cryptography`: https://pypi.org/project/cryptography/ +.. _Sphinx: http://sphinx-doc.org/ + +Testing the module +================== + +First, make sure that you have the Secret Service daemon installed. +The `GNOME Keyring`_ is the reference server-side implementation for the +Secret Service specification. + +.. _`GNOME Keyring`: https://download.gnome.org/sources/gnome-keyring/ + +Then, start the daemon and unlock the ``default`` collection, if needed. +The testsuite will fail to run if the ``default`` collection exists and is +locked. If it does not exist, the testsuite can also use the temporary +``session`` collection, as provided by the GNOME Keyring. + +Then, run the Python unittest module:: + + python3 -m unittest discover -s tests + +If you want to run the tests in an isolated or headless environment, run +this command in a D-Bus session:: + + dbus-run-session -- python3 -m unittest discover -s tests + +Get the code +============ + +SecretStorage is available under BSD license. The source code can be found +on GitHub_. + +.. _GitHub: https://github.com/mitya57/secretstorage diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/RECORD b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/RECORD new file mode 100644 index 0000000..63b1c6f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/RECORD @@ -0,0 +1,22 @@ +SecretStorage-3.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +SecretStorage-3.3.3.dist-info/LICENSE,sha256=cPa_yndjPDXvohgyjtpUhtcFTCkU1hggmA43h5dSCiU,1504 +SecretStorage-3.3.3.dist-info/METADATA,sha256=ZScD5voEgjme04wlw9OZigESMxLa2xG_eaIeZ_IMqJI,4027 +SecretStorage-3.3.3.dist-info/RECORD,, +SecretStorage-3.3.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +SecretStorage-3.3.3.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +SecretStorage-3.3.3.dist-info/top_level.txt,sha256=hveSi1OWGaEt3kEVbjmZ0M-ASPxi6y-nTPVa-d3c0B4,14 +secretstorage/__init__.py,sha256=W1p-HB1Qh12Dv6K8ml0Wj_MzN09_dEeezJjQZAHf-O4,3364 +secretstorage/__pycache__/__init__.cpython-311.pyc,, +secretstorage/__pycache__/collection.cpython-311.pyc,, +secretstorage/__pycache__/defines.cpython-311.pyc,, +secretstorage/__pycache__/dhcrypto.cpython-311.pyc,, +secretstorage/__pycache__/exceptions.cpython-311.pyc,, +secretstorage/__pycache__/item.cpython-311.pyc,, +secretstorage/__pycache__/util.cpython-311.pyc,, +secretstorage/collection.py,sha256=lHwSOkFO5sRspgcUBoBI8ZG2au2bcUSO6X64ksVdnsQ,9436 +secretstorage/defines.py,sha256=DzUrEWzSvBlN8kK2nVXnLGlCZv7HWNyfN1AYqRmjKGE,807 +secretstorage/dhcrypto.py,sha256=BiuDoNvNmd8i7Ul4ENouRnbqFE3SUmTUSAn6RVvn7Tg,2578 +secretstorage/exceptions.py,sha256=1uUZXTua4jRZf4PKDIT2SVWcSKP2lP97s8r3eJZudio,1655 +secretstorage/item.py,sha256=3niFSjOzwrB2hV1jrg78RXgBsTrpw44852VpZHXUpeE,5813 +secretstorage/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +secretstorage/util.py,sha256=vHu01QaooMQ5sRdRDFX9pg7rrzfJWF9vg0plm3Zg0Po,6755 diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/WHEEL new file mode 100644 index 0000000..becc9a6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/top_level.txt new file mode 100644 index 0000000..0ec6ae8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/SecretStorage-3.3.3.dist-info/top_level.txt @@ -0,0 +1 @@ +secretstorage diff --git a/.venv/lib/python3.11/site-packages/_black_version.py b/.venv/lib/python3.11/site-packages/_black_version.py new file mode 100644 index 0000000..358412d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_black_version.py @@ -0,0 +1 @@ +version = "23.1.0" diff --git a/.venv/lib/python3.11/site-packages/_cffi_backend.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/_cffi_backend.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..d940aac Binary files /dev/null and b/.venv/lib/python3.11/site-packages/_cffi_backend.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/_distutils_hack/__init__.py b/.venv/lib/python3.11/site-packages/_distutils_hack/__init__.py new file mode 100644 index 0000000..f987a53 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_distutils_hack/__init__.py @@ -0,0 +1,222 @@ +# don't import any costly modules +import sys +import os + + +is_pypy = '__pypy__' in sys.builtin_module_names + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + if is_pypy and sys.version_info < (3, 7): + # PyPy for 3.6 unconditionally imports distutils, so bypass the warning + # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 + return + import warnings + + warnings.warn( + "Distutils was imported before Setuptools, but importing Setuptools " + "also replaces the `distutils` module in `sys.modules`. This may lead " + "to undesirable behaviors or errors. To avoid these issues, avoid " + "using distutils directly, ensure that setuptools is installed in the " + "traditional way (e.g. not an editable install), and/or make sure " + "that setuptools is always imported before distutils." + ) + + +def clear_distutils(): + if 'distutils' not in sys.modules: + return + import warnings + + warnings.warn("Setuptools is replacing distutils.") + mods = [ + name + for name in sys.modules + if name == "distutils" or name.startswith("distutils.") + ] + for name in mods: + del sys.modules[name] + + +def enabled(): + """ + Allow selection of distutils by environment variable. + """ + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') + return which == 'local' + + +def ensure_local_distutils(): + import importlib + + clear_distutils() + + # With the DistutilsMetaFinder in place, + # perform an import to cause distutils to be + # loaded from setuptools._distutils. Ref #2906. + with shim(): + importlib.import_module('distutils') + + # check that submodules load as expected + core = importlib.import_module('distutils.core') + assert '_distutils' in core.__file__, core.__file__ + assert 'setuptools._distutils.log' not in sys.modules + + +def do_override(): + """ + Ensure that the local copy of distutils is preferred over stdlib. + + See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 + for more motivation. + """ + if enabled(): + warn_distutils_present() + ensure_local_distutils() + + +class _TrivialRe: + def __init__(self, *patterns): + self._patterns = patterns + + def match(self, string): + return all(pat in string for pat in self._patterns) + + +class DistutilsMetaFinder: + def find_spec(self, fullname, path, target=None): + # optimization: only consider top level modules and those + # found in the CPython test suite. + if path is not None and not fullname.startswith('test.'): + return + + method_name = 'spec_for_{fullname}'.format(**locals()) + method = getattr(self, method_name, lambda: None) + return method() + + def spec_for_distutils(self): + if self.is_cpython(): + return + + import importlib + import importlib.abc + import importlib.util + + try: + mod = importlib.import_module('setuptools._distutils') + except Exception: + # There are a couple of cases where setuptools._distutils + # may not be present: + # - An older Setuptools without a local distutils is + # taking precedence. Ref #2957. + # - Path manipulation during sitecustomize removes + # setuptools from the path but only after the hook + # has been loaded. Ref #2980. + # In either case, fall back to stdlib behavior. + return + + class DistutilsLoader(importlib.abc.Loader): + def create_module(self, spec): + mod.__name__ = 'distutils' + return mod + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader( + 'distutils', DistutilsLoader(), origin=mod.__file__ + ) + + @staticmethod + def is_cpython(): + """ + Suppress supplying distutils for CPython (build and tests). + Ref #2965 and #3007. + """ + return os.path.isfile('pybuilddir.txt') + + def spec_for_pip(self): + """ + Ensure stdlib distutils when running under pip. + See pypa/pip#8761 for rationale. + """ + if self.pip_imported_during_build(): + return + clear_distutils() + self.spec_for_distutils = lambda: None + + @classmethod + def pip_imported_during_build(cls): + """ + Detect if pip is being imported in a build script. Ref #2355. + """ + import traceback + + return any( + cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None) + ) + + @staticmethod + def frame_file_is_setup(frame): + """ + Return True if the indicated frame suggests a setup.py file. + """ + # some frames may not have __file__ (#2940) + return frame.f_globals.get('__file__', '').endswith('setup.py') + + def spec_for_sensitive_tests(self): + """ + Ensure stdlib distutils when running select tests under CPython. + + python/cpython#91169 + """ + clear_distutils() + self.spec_for_distutils = lambda: None + + sensitive_tests = ( + [ + 'test.test_distutils', + 'test.test_peg_generator', + 'test.test_importlib', + ] + if sys.version_info < (3, 10) + else [ + 'test.test_distutils', + ] + ) + + +for name in DistutilsMetaFinder.sensitive_tests: + setattr( + DistutilsMetaFinder, + f'spec_for_{name}', + DistutilsMetaFinder.spec_for_sensitive_tests, + ) + + +DISTUTILS_FINDER = DistutilsMetaFinder() + + +def add_shim(): + DISTUTILS_FINDER in sys.meta_path or insert_shim() + + +class shim: + def __enter__(self): + insert_shim() + + def __exit__(self, exc, value, tb): + remove_shim() + + +def insert_shim(): + sys.meta_path.insert(0, DISTUTILS_FINDER) + + +def remove_shim(): + try: + sys.meta_path.remove(DISTUTILS_FINDER) + except ValueError: + pass diff --git a/.venv/lib/python3.11/site-packages/_distutils_hack/override.py b/.venv/lib/python3.11/site-packages/_distutils_hack/override.py new file mode 100644 index 0000000..2cc433a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_distutils_hack/override.py @@ -0,0 +1 @@ +__import__('_distutils_hack').do_override() diff --git a/.venv/lib/python3.11/site-packages/_pytest/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/__init__.py new file mode 100644 index 0000000..8eb8ec9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/__init__.py @@ -0,0 +1,13 @@ +from __future__ import annotations + + +__all__ = ["__version__", "version_tuple"] + +try: + from ._version import version as __version__ + from ._version import version_tuple +except ImportError: # pragma: no cover + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + version_tuple = (0, 0, "unknown") diff --git a/.venv/lib/python3.11/site-packages/_pytest/_argcomplete.py b/.venv/lib/python3.11/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000..59426ef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,117 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +from __future__ import annotations + +import argparse +from glob import glob +import os +import sys +from typing import Any + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> list[str]: + # Only called on non option completions. + if os.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: FastFilesCompleter | None = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/.venv/lib/python3.11/site-packages/_pytest/_code/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000..7f67a2e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,26 @@ +"""Python inspection/code generation API.""" + +from __future__ import annotations + +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + + +__all__ = [ + "Code", + "ExceptionInfo", + "Frame", + "Source", + "Traceback", + "TracebackEntry", + "filter_traceback", + "getfslineno", + "getrawcode", +] diff --git a/.venv/lib/python3.11/site-packages/_pytest/_code/code.py b/.venv/lib/python3.11/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000..f1241f1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_code/code.py @@ -0,0 +1,1567 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import inspect +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +import os +from pathlib import Path +import re +import sys +from traceback import extract_tb +from traceback import format_exception +from traceback import format_exception_only +from traceback import FrameSummary +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import ClassVar +from typing import Final +from typing import final +from typing import Generic +from typing import Literal +from typing import overload +from typing import SupportsIndex +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import pluggy + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + +EXCEPTION_OR_MORE = Union[type[BaseException], tuple[type[BaseException], ...]] + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> Code: + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Path | str: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = absolutepath(self.raw.co_filename) + # maybe don't try this checking + if not p.exists(): + raise OSError("path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Source | None: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> Source: + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> Source: + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + repr_style: Literal["short", "long"] | None = None, + ) -> None: + self._rawentry: Final = rawentry + self._repr_style: Final = repr_style + + def with_repr_style( + self, repr_style: Literal["short", "long"] | None + ) -> TracebackEntry: + return TracebackEntry(self._rawentry, repr_style) + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + def get_python_framesummary(self) -> FrameSummary: + # Python's built-in traceback module implements all the nitty gritty + # details to get column numbers of out frames. + stack_summary = extract_tb(self._rawentry, limit=1) + return stack_summary[0] + + # Column and end line numbers introduced in python 3.11 + if sys.version_info < (3, 11): + + @property + def end_lineno_relative(self) -> int | None: + return None + + @property + def colno(self) -> int | None: + return None + + @property + def end_colno(self) -> int | None: + return None + else: + + @property + def end_lineno_relative(self) -> int | None: + frame_summary = self.get_python_framesummary() + if frame_summary.end_lineno is None: # pragma: no cover + return None + return frame_summary.end_lineno - 1 - self.frame.code.firstlineno + + @property + def colno(self) -> int | None: + """Starting byte offset of the expression in the traceback entry.""" + return self.get_python_framesummary().colno + + @property + def end_colno(self) -> int | None: + """Ending byte offset of the expression in the traceback entry.""" + return self.get_python_framesummary().end_colno + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return f"" + + @property + def statement(self) -> Source: + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Path | str: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource( + self, astcache: dict[str | Path, ast.AST] | None = None + ) -> Source | None: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None and astcache is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self, excinfo: ExceptionInfo[BaseException] | None) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: bool | Callable[[ExceptionInfo[BaseException] | None], bool] = False + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(excinfo) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return f" File '{self.path}':{self.lineno + 1} in {name}\n {line}\n" + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(list[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: TracebackType | Iterable[TracebackEntry], + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: TracebackType | None = cur + while cur_ is not None: + yield TracebackEntry(cur_) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path: os.PathLike[str] | str | None = None, + lineno: int | None = None, + firstlineno: int | None = None, + excludepath: os.PathLike[str] | None = None, + ) -> Traceback: + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) + for x in self: + code = x.frame.code + codepath = code.path + if path is not None and str(codepath) != path_: + continue + if ( + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] + ): + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry) + return self + + @overload + def __getitem__(self, key: SupportsIndex) -> TracebackEntry: ... + + @overload + def __getitem__(self, key: slice) -> Traceback: ... + + def __getitem__(self, key: SupportsIndex | slice) -> TracebackEntry | Traceback: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, + excinfo_or_fn: ExceptionInfo[BaseException] | Callable[[TracebackEntry], bool], + /, + ) -> Traceback: + """Return a Traceback instance with certain items removed. + + If the filter is an `ExceptionInfo`, removes all the ``TracebackEntry``s + which are hidden (see ishidden() above). + + Otherwise, the filter is a function that gets a single argument, a + ``TracebackEntry`` instance, and should return True when the item should + be added to the ``Traceback``, False when not. + """ + if isinstance(excinfo_or_fn, ExceptionInfo): + fn = lambda x: not x.ishidden(excinfo_or_fn) # noqa: E731 + else: + fn = excinfo_or_fn + return Traceback(filter(fn, self)) + + def recursionindex(self) -> int | None: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: dict[tuple[Any, int, int], list[dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + values = cache.setdefault(key, []) + # Since Python 3.13 f_locals is a proxy, freeze it. + loc = dict(entry.frame.f_locals) + if values: + for otherloc in values: + if otherloc == loc: + return i + values.append(loc) + return None + + +def stringify_exception( + exc: BaseException, include_subexception_msg: bool = True +) -> str: + try: + notes = getattr(exc, "__notes__", []) + except KeyError: + # Workaround for https://github.com/python/cpython/issues/98778 on + # Python <= 3.9, and some 3.10 and 3.11 patch versions. + HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ()) + if sys.version_info < (3, 12) and isinstance(exc, HTTPError): + notes = [] + else: # pragma: no cover + # exception not related to above bug, reraise + raise + if not include_subexception_msg and isinstance(exc, BaseExceptionGroup): + message = exc.message + else: + message = str(exc) + + return "\n".join( + [ + message, + *notes, + ] + ) + + +E = TypeVar("E", bound=BaseException, covariant=True) + + +@final +@dataclasses.dataclass +class ExceptionInfo(Generic[E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: tuple[type[E], E, TracebackType] | None + _striptext: str + _traceback: Traceback | None + + def __init__( + self, + excinfo: tuple[type[E], E, TracebackType] | None, + striptext: str = "", + traceback: Traceback | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback + + @classmethod + def from_exception( + cls, + # Ignoring error: "Cannot use a covariant type variable as a parameter". + # This is OK to ignore because this class is (conceptually) readonly. + # See https://github.com/python/mypy/issues/7049. + exception: E, # type: ignore[misc] + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Return an ExceptionInfo for an existing exception. + + The exception must have a non-``None`` ``__traceback__`` attribute, + otherwise this function fails with an assertion error. This means that + the exception must have been raised, or added a traceback with the + :py:meth:`~BaseException.with_traceback()` method. + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + + .. versionadded:: 7.4 + """ + assert exception.__traceback__, ( + "Exceptions passed to ExcInfo.from_exception(...)" + " must have a non-None __traceback__." + ) + exc_info = (type(exception), exception, exception.__traceback__) + return cls.from_exc_info(exc_info, exprinfo) + + @classmethod + def from_exc_info( + cls, + exc_info: tuple[type[E], E, TracebackType], + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Like :func:`from_exception`, but using old-style exc_info tuple.""" + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext, _ispytest=True) + + @classmethod + def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]: + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> ExceptionInfo[E]: + """Return an unfilled ExceptionInfo.""" + return cls(None, _ispytest=True) + + def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> type[E]: + """The exception class.""" + assert self._excinfo is not None, ( + ".type can only be used after the context manager exits" + ) + return self._excinfo[0] + + @property + def value(self) -> E: + """The exception value.""" + assert self._excinfo is not None, ( + ".value can only be used after the context manager exits" + ) + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert self._excinfo is not None, ( + ".tb can only be used after the context manager exits" + ) + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert self._excinfo is not None, ( + ".typename can only be used after the context manager exits" + ) + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>" + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). + """ + + def _get_single_subexc( + eg: BaseExceptionGroup[BaseException], + ) -> BaseException | None: + if len(eg.exceptions) != 1: + return None + if isinstance(e := eg.exceptions[0], BaseExceptionGroup): + return _get_single_subexc(e) + return e + + if ( + tryshort + and isinstance(self.value, BaseExceptionGroup) + and (subexc := _get_single_subexc(self.value)) is not None + ): + return f"{subexc!r} [single exception in {type(self.value).__name__}]" + + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> ReprFileLocation | None: + # Find last non-hidden traceback entry that led to the exception of the + # traceback, or None if all hidden. + for i in range(-1, -len(self.traceback) - 1, -1): + entry = self.traceback[i] + if not entry.ishidden(self): + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + exconly = self.exconly(tryshort=True) + return ReprFileLocation(path, lineno + 1, exconly) + return None + + def getrepr( + self, + showlocals: bool = False, + style: TracebackStyle = "long", + abspath: bool = False, + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True, + funcargs: bool = False, + truncate_locals: bool = True, + truncate_args: bool = True, + chain: bool = True, + ) -> ReprExceptionInfo | ExceptionChainRepr: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|line|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param tbfilter: + A filter for traceback entries. + + * If false, don't hide any entries. + * If true, hide internal entries and entries that contain a local + variable ``__tracebackhide__ = True``. + * If a callable, delegates the filtering to the callable. + + Ignored if ``style`` is ``"native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool truncate_args: + With ``showargs==True``, make sure args can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + reprtraceback=ReprTracebackNative( + format_exception( + self.type, + self.value, + self.traceback[0]._rawentry if self.traceback else None, + ) + ), + reprcrash=self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def match(self, regexp: str | re.Pattern[str]) -> Literal[True]: + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + value = stringify_exception(self.value) + msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" + if regexp == value: + msg += "\n Did you mean to `re.escape()` the regex?" + assert re.search(regexp, value), msg + # Return True to allow for "assert excinfo.match()". + return True + + def _group_contains( + self, + exc_group: BaseExceptionGroup[BaseException], + expected_exception: EXCEPTION_OR_MORE, + match: str | re.Pattern[str] | None, + target_depth: int | None = None, + current_depth: int = 1, + ) -> bool: + """Return `True` if a `BaseExceptionGroup` contains a matching exception.""" + if (target_depth is not None) and (current_depth > target_depth): + # already descended past the target depth + return False + for exc in exc_group.exceptions: + if isinstance(exc, BaseExceptionGroup): + if self._group_contains( + exc, expected_exception, match, target_depth, current_depth + 1 + ): + return True + if (target_depth is not None) and (current_depth != target_depth): + # not at the target depth, no match + continue + if not isinstance(exc, expected_exception): + continue + if match is not None: + value = stringify_exception(exc) + if not re.search(match, value): + continue + return True + return False + + def group_contains( + self, + expected_exception: EXCEPTION_OR_MORE, + *, + match: str | re.Pattern[str] | None = None, + depth: int | None = None, + ) -> bool: + """Check whether a captured exception group contains a matching exception. + + :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. + + :param str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its `PEP-678 ` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + :param Optional[int] depth: + If `None`, will search for a matching exception at any nesting depth. + If >= 1, will only match an exception if it's at the specified depth (depth = 1 being + the exceptions contained within the topmost exception group). + + .. versionadded:: 8.0 + + .. warning:: + This helper makes it easy to check for the presence of specific exceptions, + but it is very bad for checking that the group does *not* contain + *any other exceptions*. + You should instead consider using :class:`pytest.RaisesGroup` + + """ + msg = "Captured exception is not an instance of `BaseExceptionGroup`" + assert isinstance(self.value, BaseExceptionGroup), msg + msg = "`depth` must be >= 1 if specified" + assert (depth is None) or (depth >= 1), msg + return self._group_contains(self.value, expected_exception, match, depth) + + +if TYPE_CHECKING: + from typing_extensions import TypeAlias + + # Type alias for the `tbfilter` setting: + # bool: If True, it should be filtered using Traceback.filter() + # callable: A callable that takes an ExceptionInfo and returns the filtered traceback. + TracebackFilter: TypeAlias = Union[ + bool, Callable[[ExceptionInfo[BaseException]], Traceback] + ] + + +@dataclasses.dataclass +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: TracebackStyle = "long" + abspath: bool = True + tbfilter: TracebackFilter = True + funcargs: bool = False + truncate_locals: bool = True + truncate_args: bool = True + chain: bool = True + astcache: dict[str | Path, ast.AST] = dataclasses.field( + default_factory=dict, init=False, repr=False + ) + + def _getindent(self, source: Source) -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Source | None: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> ReprFuncArgs | None: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + if self.truncate_args: + str_repr = saferepr(argvalue) + else: + str_repr = saferepr(argvalue, maxsize=None) + args.append((argname, str_repr)) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Source | None, + line_index: int = -1, + excinfo: ExceptionInfo[BaseException] | None = None, + short: bool = False, + end_line_index: int | None = None, + colno: int | None = None, + end_colno: int | None = None, + ) -> list[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is not None and line_index < 0: + line_index += len(source) + if source is None or line_index >= len(source.lines) or line_index < 0: + # `line_index` could still be outside `range(len(source.lines))` if + # we're processing AST with pathological position attributes. + source = Source("???") + line_index = 0 + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + lines.extend( + self.get_highlight_arrows_for_line( + raw_line=source.raw_lines[line_index], + line=source.lines[line_index].strip(), + lineno=line_index, + end_lineno=end_line_index, + colno=colno, + end_colno=end_colno, + ) + ) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + lines.extend( + self.get_highlight_arrows_for_line( + raw_line=source.raw_lines[line_index], + line=source.lines[line_index], + lineno=line_index, + end_lineno=end_line_index, + colno=colno, + end_colno=end_colno, + ) + ) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_highlight_arrows_for_line( + self, + line: str, + raw_line: str, + lineno: int | None, + end_lineno: int | None, + colno: int | None, + end_colno: int | None, + ) -> list[str]: + """Return characters highlighting a source line. + + Example with colno and end_colno pointing to the bar expression: + "foo() + bar()" + returns " ^^^^^" + """ + if lineno != end_lineno: + # Don't handle expressions that span multiple lines. + return [] + if colno is None or end_colno is None: + # Can't do anything without column information. + return [] + + num_stripped_chars = len(raw_line) - len(line) + + start_char_offset = _byte_offset_to_character_offset(raw_line, colno) + end_char_offset = _byte_offset_to_character_offset(raw_line, end_colno) + num_carets = end_char_offset - start_char_offset + # If the highlight would span the whole line, it is redundant, don't + # show it. + if num_carets >= len(line.strip()): + return [] + + highlights = " " + highlights += " " * (start_char_offset - num_stripped_chars + 1) + highlights += "^" * num_carets + return [highlights] + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> list[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> ReprLocals | None: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: TracebackEntry | None, + excinfo: ExceptionInfo[BaseException] | None = None, + ) -> ReprEntry: + lines: list[str] = [] + style = ( + entry._repr_style + if entry is not None and entry._repr_style is not None + else self.style + ) + if style in ("short", "long") and entry is not None: + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + end_line_index, colno, end_colno = None, None, None + else: + line_index = entry.relline + end_line_index = entry.end_lineno_relative + colno = entry.colno + end_colno = entry.end_colno + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source( + source=source, + line_index=line_index, + excinfo=excinfo, + short=short, + end_line_index=end_line_index, + colno=colno, + end_colno=end_colno, + ) + lines.extend(s) + if short: + message = f"in {entry.name}" + else: + message = (excinfo and excinfo.typename) or "" + entry_path = entry.path + path = self._makepath(entry_path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path: Path | str) -> str: + if not self.abspath and isinstance(path, Path): + try: + np = bestrelpath(Path.cwd(), path) + except OSError: + return str(path) + if len(np) < len(str(path)): + return np + return str(path) + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> ReprTraceback: + traceback = filter_excinfo_traceback(self.tbfilter, excinfo) + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + if not traceback: + if extraline is None: + extraline = "All traceback entries are hidden. Pass `--full-trace` to see hidden and internal frames." + entries = [self.repr_traceback_entry(None, excinfo)] + return ReprTraceback(entries, extraline, style=self.style) + + last = traceback[-1] + if self.style == "value": + entries = [self.repr_traceback_entry(last, excinfo)] + return ReprTraceback(entries, None, style=self.style) + + entries = [ + self.repr_traceback_entry(entry, excinfo if last == entry else None) + for entry in traceback + ] + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> tuple[Traceback, str | None]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: str | None = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + f" {type(e).__name__}: {e!s}\n" + f" Displaying first and last {max_frames} stack frames out of {len(traceback)}." + ) + # Type ignored because adding two instances of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo: ExceptionInfo[BaseException]) -> ExceptionChainRepr: + repr_chain: list[tuple[ReprTraceback, ReprFileLocation | None, str | None]] = [] + e: BaseException | None = excinfo.value + excinfo_: ExceptionInfo[BaseException] | None = excinfo + descr = None + seen: set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + + if excinfo_: + # Fall back to native traceback as a temporary workaround until + # full support for exception groups added to ExceptionInfo. + # See https://github.com/pytest-dev/pytest/issues/9159 + reprtraceback: ReprTraceback | ReprTracebackNative + if isinstance(e, BaseExceptionGroup): + # don't filter any sub-exceptions since they shouldn't have any internal frames + traceback = filter_excinfo_traceback(self.tbfilter, excinfo) + reprtraceback = ReprTracebackNative( + format_exception( + type(excinfo.value), + excinfo.value, + traceback[0]._rawentry, + ) + ) + else: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash = excinfo_._getreprcrash() + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative(format_exception(type(e), e, None)) + reprcrash = None + repr_chain += [(reprtraceback, reprcrash, descr)] + + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@dataclasses.dataclass(eq=False) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return f"<{self.__class__} instance at {id(self):0x}>" + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@dataclasses.dataclass(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + sections: list[tuple[str, str, str]] = dataclasses.field( + init=False, default_factory=list + ) + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@dataclasses.dataclass(eq=False) +class ExceptionChainRepr(ExceptionRepr): + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]] + + def __init__( + self, + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]], + ) -> None: + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + super().__init__( + reprtraceback=chain[-1][0], + reprcrash=chain[-1][1], + ) + self.chain = chain + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprTraceback(TerminalRepr): + reprentries: Sequence[ReprEntry | ReprEntryNative] + extraline: str | None + style: TracebackStyle + + entrysep: ClassVar = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if entry.style == "long" or ( + entry.style == "short" and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + self.style = "native" + + +@dataclasses.dataclass(eq=False) +class ReprEntryNative(TerminalRepr): + lines: Sequence[str] + + style: ClassVar[TracebackStyle] = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@dataclasses.dataclass(eq=False) +class ReprEntry(TerminalRepr): + lines: Sequence[str] + reprfuncargs: ReprFuncArgs | None + reprlocals: ReprLocals | None + reprfileloc: ReprFileLocation | None + style: TracebackStyle + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + if not self.lines: + return + + if self.style == "value": + # Using tw.write instead of tw.line for testing purposes due to TWMock implementation; + # lines written with TWMock.line and TWMock._write_source cannot be distinguished + # from each other, whereas lines written with TWMock.write are marked with TWMock.WRITE + for line in self.lines: + tw.write(line) + tw.write("\n") + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: list[str] = [] + source_lines: list[str] = [] + failure_lines: list[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + if self.reprfileloc: + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@dataclasses.dataclass(eq=False) +class ReprFileLocation(TerminalRepr): + path: str + lineno: int + message: str + + def __post_init__(self) -> None: + self.path = str(self.path) + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@dataclasses.dataclass(eq=False) +class ReprLocals(TerminalRepr): + lines: Sequence[str] + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@dataclasses.dataclass(eq=False) +class ReprFuncArgs(TerminalRepr): + args: Sequence[tuple[str, object]] + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> tuple[str | Path, int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = (fn and absolutepath(fn)) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +def _byte_offset_to_character_offset(str, offset): + """Converts a byte based offset in a string to a code-point.""" + as_utf8 = str.encode("utf-8") + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + + return True + + +def filter_excinfo_traceback( + tbfilter: TracebackFilter, excinfo: ExceptionInfo[BaseException] +) -> Traceback: + """Filter the exception traceback in ``excinfo`` according to ``tbfilter``.""" + if callable(tbfilter): + return tbfilter(excinfo) + elif tbfilter: + return excinfo.traceback.filter(excinfo) + else: + return excinfo.traceback diff --git a/.venv/lib/python3.11/site-packages/_pytest/_code/source.py b/.venv/lib/python3.11/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000..a8f7201 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_code/source.py @@ -0,0 +1,225 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +from bisect import bisect_right +from collections.abc import Iterable +from collections.abc import Iterator +import inspect +import textwrap +import tokenize +import types +from typing import overload +import warnings + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: list[str] = [] + self.raw_lines: list[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + self.raw_lines = obj.raw_lines + elif isinstance(obj, (tuple, list)): + self.lines = deindent(x.rstrip("\n") for x in obj) + self.raw_lines = list(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + self.raw_lines = obj.split("\n") + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + self.raw_lines = src.split("\n") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: ... + + @overload + def __getitem__(self, key: slice) -> Source: ... + + def __getitem__(self, key: int | slice) -> str | Source: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + newsource.raw_lines = self.raw_lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> Source: + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.raw_lines = self.raw_lines + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> Source: + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.raw_lines = self.raw_lines + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> Source: + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> Source: + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + newsource.raw_lines = self.raw_lines + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> tuple[Source | None, int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + source.raw_lines = sourcelines + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> list[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> tuple[int, int | None]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: list[int] = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + # The lineno points to the class/def, so need to include the decorators. + if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + for d in x.decorator_list: + values.append(d.lineno - 1) + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: list[ast.stmt] | None = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: ast.AST | None = None, +) -> tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = ( + bool(source.lines[start]) and source.lines[start][0].isspace() + ) + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/.venv/lib/python3.11/site-packages/_pytest/_io/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000..b0155b1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/.venv/lib/python3.11/site-packages/_pytest/_io/pprint.py b/.venv/lib/python3.11/site-packages/_pytest/_io/pprint.py new file mode 100644 index 0000000..28f0690 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_io/pprint.py @@ -0,0 +1,673 @@ +# mypy: allow-untyped-defs +# This module was imported from the cpython standard library +# (https://github.com/python/cpython/) at commit +# c5140945c723ae6c4b7ee81ff720ac8ea4b52cfd (python3.12). +# +# +# Original Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. +from __future__ import annotations + +import collections as _collections +from collections.abc import Callable +from collections.abc import Iterator +import dataclasses as _dataclasses +from io import StringIO as _StringIO +import re +import types as _types +from typing import Any +from typing import IO + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ["obj"] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return (str(type(self.obj)), id(self.obj)) < ( + str(type(other.obj)), + id(other.obj), + ) + + +def _safe_tuple(t): + """Helper function for comparing 2-tuples""" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__( + self, + indent: int = 4, + width: int = 80, + depth: int | None = None, + ) -> None: + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + """ + if indent < 0: + raise ValueError("indent must be >= 0") + if depth is not None and depth <= 0: + raise ValueError("depth must be > 0") + if not width: + raise ValueError("width must be != 0") + self._depth = depth + self._indent_per_level = indent + self._width = width + + def pformat(self, object: Any) -> str: + sio = _StringIO() + self._format(object, sio, 0, 0, set(), 0) + return sio.getvalue() + + def _format( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + return + + p = self._dispatch.get(type(object).__repr__, None) + if p is not None: + context.add(objid) + p(self, object, stream, indent, allowance, context, level + 1) + context.remove(objid) + elif ( + _dataclasses.is_dataclass(object) + and not isinstance(object, type) + and object.__dataclass_params__.repr # type:ignore[attr-defined] + and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") + and "__create_fn__" in object.__repr__.__wrapped__.__qualname__ + ): + context.add(objid) + self._pprint_dataclass( + object, stream, indent, allowance, context, level + 1 + ) + context.remove(objid) + else: + stream.write(self._repr(object, context, level)) + + def _pprint_dataclass( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + cls_name = object.__class__.__name__ + items = [ + (f.name, getattr(object, f.name)) + for f in _dataclasses.fields(object) + if f.repr + ] + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch: dict[ + Callable[..., str], + Callable[[PrettyPrinter, Any, IO[str], int, int, set[int], int], None], + ] = {} + + def _pprint_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("{") + items = sorted(object.items(), key=_safe_tuple) + self._format_dict_items(items, stream, indent, allowance, context, level) + write("}") + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + "(") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("[") + self._format_items(object, stream, indent, allowance, context, level) + stream.write("]") + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("(") + self._format_items(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write("{") + endchar = "}" + else: + stream.write(typ.__name__ + "({") + endchar = "})" + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance, context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # A list of alternating (non-space, space) strings + parts = re.findall(r"\S*\s*", line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = "" + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write("(") + for i, rep in enumerate(chunks): + if i > 0: + write("\n" + " " * indent) + write(rep) + if level == 1: + write(")") + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write("(") + delim = "" + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = "\n" + " " * indent + if parens: + write(")") + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("bytearray(") + self._pprint_bytes( + bytes(object), stream, indent + 10, allowance + 1, context, level + 1 + ) + write(")") + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("mappingproxy(") + self._format(object.copy(), stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = "namespace" + else: + cls_name = object.__class__.__name__ + items = object.__dict__.items() + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(self._repr(key, context, level)) + write(": ") + self._format(ent, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _format_namespace_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(key) + write("=") + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format( + ent, + stream, + item_indent + len(key) + 1, + 1, + context, + level, + ) + + write(",") + + write("\n" + " " * indent) + + def _format_items( + self, + items: list[Any], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + + for item in items: + write(delimnl) + self._format(item, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _repr(self, object: Any, context: set[int], level: int) -> str: + return self._safe_repr(object, context.copy(), self._depth, level) + + def _pprint_default_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + rdf = self._repr(object.default_factory, context, level) + stream.write(f"{object.__class__.__name__}({rdf}, ") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + + if object: + stream.write("{") + items = object.most_common() + self._format_dict_items(items, stream, indent, allowance, context, level) + stream.write("}") + + stream.write(")") + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object.maps) or (len(object.maps) == 1 and not len(object.maps[0])): + stream.write(repr(object)) + return + + stream.write(object.__class__.__name__ + "(") + self._format_items(object.maps, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + if object.maxlen is not None: + stream.write(f"maxlen={object.maxlen}, ") + stream.write("[") + + self._format_items(object, stream, indent, allowance + 1, context, level) + stream.write("])") + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr( + self, object: Any, context: set[int], maxlevels: int | None, level: int + ) -> str: + typ = type(object) + if typ in _builtin_scalars: + return repr(object) + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}" + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}" + if objid in context: + return _recursion(object) + context.add(objid) + components: list[str] = [] + append = components.append + level += 1 + for k, v in sorted(object.items(), key=_safe_tuple): + krepr = self._safe_repr(k, context, maxlevels, level) + vrepr = self._safe_repr(v, context, maxlevels, level) + append(f"{krepr}: {vrepr}") + context.remove(objid) + return "{{{}}}".format(", ".join(components)) + + if (issubclass(typ, list) and r is list.__repr__) or ( + issubclass(typ, tuple) and r is tuple.__repr__ + ): + if issubclass(typ, list): + if not object: + return "[]" + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()" + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "..." + if objid in context: + return _recursion(object) + context.add(objid) + components = [] + append = components.append + level += 1 + for o in object: + orepr = self._safe_repr(o, context, maxlevels, level) + append(orepr) + context.remove(objid) + return format % ", ".join(components) + + return repr(object) + + +_builtin_scalars = frozenset( + {str, bytes, bytearray, float, complex, bool, type(None), int} +) + + +def _recursion(object: Any) -> str: + return f"" + + +def _wrap_bytes_repr(object: Any, width: int, allowance: int) -> Iterator[str]: + current = b"" + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i : i + 4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/.venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py b/.venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000..cee70e3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import pprint +import reprlib + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return f'{type(obj).__name__}("{obj}")' + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as inner_exc: + exc_info = f"unpresentable exception ({_try_repr_or_str(inner_exc)})" + return ( + f"<[{exc_info} raised in repr()] {type(obj).__name__} object at 0x{id(obj):x}>" + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ + + def __init__(self, maxsize: int | None, use_ascii: bool = False) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ + super().__init__() + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 + self.maxsize = maxsize + self.use_ascii = use_ascii + + def repr(self, x: object) -> str: + try: + if self.use_ascii: + s = ascii(x) + else: + s = super().repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr( + obj: object, maxsize: int | None = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False +) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + stdlib. + """ + return SafeRepr(maxsize, use_ascii).repr(obj) + + +def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str: + """Return an unlimited-size safe repr-string for the given object. + + As with saferepr, failing __repr__ functions of user instances + will be represented with a short exception info. + + This function is a wrapper around simple repr. + + Note: a cleaner solution would be to alter ``saferepr``this way + when maxsize=None, but that might affect some other code. + """ + try: + if use_ascii: + return ascii(obj) + return repr(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) diff --git a/.venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py b/.venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 0000000..fd808f8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,254 @@ +"""Helper functions for writing to terminals and files.""" + +from __future__ import annotations + +from collections.abc import Sequence +import os +import shutil +import sys +from typing import final +from typing import Literal +from typing import TextIO + +import pygments +from pygments.formatters.terminal import TerminalFormatter +from pygments.lexer import Lexer +from pygments.lexers.diff import DiffLexer +from pygments.lexers.python import PythonLexer + +from ..compat import assert_never +from .wcwidth import wcswidth + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if os.environ.get("NO_COLOR"): + return False + if os.environ.get("FORCE_COLOR"): + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: TextIO | None = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: int | None = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join(f"\x1b[{cod}m" for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + f"indents size ({len(indents)}) should have same size as lines ({len(lines)})" + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + for indent, new_line in zip(indents, new_lines): + self.line(indent + new_line) + + def _get_pygments_lexer(self, lexer: Literal["python", "diff"]) -> Lexer: + if lexer == "python": + return PythonLexer() + elif lexer == "diff": + return DiffLexer() + else: + assert_never(lexer) + + def _get_pygments_formatter(self) -> TerminalFormatter: + from _pytest.config.exceptions import UsageError + + theme = os.getenv("PYTEST_THEME") + theme_mode = os.getenv("PYTEST_THEME_MODE", "dark") + + try: + return TerminalFormatter(bg=theme_mode, style=theme) + except pygments.util.ClassNotFound as e: + raise UsageError( + f"PYTEST_THEME environment variable has an invalid value: '{theme}'. " + "Hint: See available pygments styles with `pygmentize -L styles`." + ) from e + except pygments.util.OptionError as e: + raise UsageError( + f"PYTEST_THEME_MODE environment variable has an invalid value: '{theme_mode}'. " + "The allowed values are 'dark' (default) and 'light'." + ) from e + + def _highlight( + self, source: str, lexer: Literal["diff", "python"] = "python" + ) -> str: + """Highlight the given source if we have markup support.""" + if not source or not self.hasmarkup or not self.code_highlight: + return source + + pygments_lexer = self._get_pygments_lexer(lexer) + pygments_formatter = self._get_pygments_formatter() + + highlighted: str = pygments.highlight( + source, pygments_lexer, pygments_formatter + ) + # pygments terminal formatter may add a newline when there wasn't one. + # We don't want this, remove. + if highlighted[-1] == "\n" and source[-1] != "\n": + highlighted = highlighted[:-1] + + # Some lexers will not set the initial color explicitly + # which may lead to the previous color being propagated to the + # start of the expression, so reset first. + highlighted = "\x1b[0m" + highlighted + + return highlighted diff --git a/.venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py b/.venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 0000000..23886ff --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from functools import lru_cache +import unicodedata + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/.venv/lib/python3.11/site-packages/_pytest/_py/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/_py/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/_pytest/_py/error.py b/.venv/lib/python3.11/site-packages/_pytest/_py/error.py new file mode 100644 index 0000000..dace237 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_py/error.py @@ -0,0 +1,119 @@ +"""create errno-specific classes for IO or os calls.""" + +from __future__ import annotations + +from collections.abc import Callable +import errno +import os +import sys +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + P = ParamSpec("P") + +R = TypeVar("R") + + +class Error(EnvironmentError): + def __repr__(self) -> str: + return "{}.{} {!r}: {} ".format( + self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + # repr(self.args) + ) + + def __str__(self) -> str: + s = "[{}]: {}".format( + self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + + +class ErrorMaker: + """lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + + _errno2class: dict[int, type[Error]] = {} + + def __getattr__(self, name: str) -> type[Error]: + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno: int) -> type[Error]: + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, f"UnknownErrno{eno}") + errorcls = type( + clsname, + (Error,), + {"__module__": "py.error", "__doc__": os.strerror(eno)}, + ) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + """Call a function and raise an errno-exception if applicable.""" + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except Error: + raise + except OSError as value: + if not hasattr(value, "errno"): + raise + if sys.platform == "win32": + try: + # error: Invalid index type "Optional[int]" for "dict[int, int]"; expected type "int" [index] + # OK to ignore because we catch the KeyError below. + cls = self._geterrnoclass(_winerrnomap[value.errno]) # type:ignore[index] + except KeyError: + raise value + else: + # we are not on Windows, or we got a proper OSError + if value.errno is None: + cls = type( + "UnknownErrnoNone", + (Error,), + {"__module__": "py.error", "__doc__": None}, + ) + else: + cls = self._geterrnoclass(value.errno) + + raise cls(f"{func.__name__}{args!r}") + + +_error_maker = ErrorMaker() +checked_call = _error_maker.checked_call + + +def __getattr__(attr: str) -> type[Error]: + return getattr(_error_maker, attr) # type: ignore[no-any-return] diff --git a/.venv/lib/python3.11/site-packages/_pytest/_py/path.py b/.venv/lib/python3.11/site-packages/_pytest/_py/path.py new file mode 100644 index 0000000..e353c1a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_py/path.py @@ -0,0 +1,1475 @@ +# mypy: allow-untyped-defs +"""local path implementation.""" + +from __future__ import annotations + +import atexit +from collections.abc import Callable +from contextlib import contextmanager +import fnmatch +import importlib.util +import io +import os +from os.path import abspath +from os.path import dirname +from os.path import exists +from os.path import isabs +from os.path import isdir +from os.path import isfile +from os.path import islink +from os.path import normpath +import posixpath +from stat import S_ISDIR +from stat import S_ISLNK +from stat import S_ISREG +import sys +from typing import Any +from typing import cast +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import uuid +import warnings + +from . import error + + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt") + + +class Checkers: + _depend_on_existence = "exists", "link", "dir", "file" + + def __init__(self, path): + self.path = path + + def dotfile(self): + return self.path.basename.startswith(".") + + def ext(self, arg): + if not arg.startswith("."): + arg = "." + arg + return self.path.ext == arg + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + from .._code.source import getrawcode + + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == "not": + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError(f"no {name!r} checker available for {self.path!r}") + try: + if getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (error.ENOENT, error.ENOTDIR, error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = "not" + name + if name in kw: + if not kw.get(name): + return False + return True + + _statcache: Stat + + def _stat(self) -> Stat: + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + +class NeverRaised(Exception): + pass + + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, str): + fil = FNMatcher(fil) + if isinstance(rec, str): + self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) + elif not hasattr(rec, "__call__") and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort( + [p for p in entries if p.check(dir=1) and (rec is None or rec(p))] + ) + if not self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if ( + pattern.find(path.sep) == -1 + and iswin32 + and pattern.find(posixpath.sep) != -1 + ): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = "*" + path.sep + pattern + return fnmatch.fnmatch(name, pattern) + + +def map_as_list(func, iter): + return list(map(func, iter)) + + +class Stat: + if TYPE_CHECKING: + + @property + def size(self) -> int: ... + + @property + def mtime(self) -> float: ... + + def __getattr__(self, name: str) -> Any: + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + + entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + @property + def group(self): + """Return group name of file.""" + if iswin32: + raise NotImplementedError("XXX win32") + import grp + + entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + + +def getuserid(user): + import pwd + + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] # type:ignore[attr-defined,unused-ignore] + return user + + +def getgroupid(group): + import grp + + if not isinstance(group, int): + group = grp.getgrnam(group)[2] # type:ignore[attr-defined,unused-ignore] + return group + + +class LocalPath: + """Object oriented interface to os.path and other local filesystem + related information. + """ + + class ImportMismatchError(ImportError): + """raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + + def __init__(self, path=None, expanduser=False): + """Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = error.checked_call(os.getcwd) + else: + try: + path = os.fspath(path) + except TypeError: + raise ValueError( + "can only pass None, Path instances " + "or non-empty strings to LocalPath" + ) + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + if sys.platform != "win32": + + def chown(self, user, group, rec=0): + """Change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + error.checked_call(os.chown, str(x), uid, gid) + error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self) -> str: + """Return value of a symbolic link.""" + # https://github.com/python/mypy/issues/12278 + return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value,unused-ignore] + + def mklinkto(self, oldname): + """Posix style hard link to another name.""" + error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """Create a symbolic link with the given value (pointing to another name).""" + if absolute: + error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(("..",) * n + (relsource,)) + error.checked_call(os.symlink, target, self.strpath) + + def __div__(self, other): + return self.join(os.fspath(other)) + + __truediv__ = __div__ # py3k + + @property + def basename(self): + """Basename part of path.""" + return self._getbyspec("basename")[0] + + @property + def dirname(self): + """Dirname part of path.""" + return self._getbyspec("dirname")[0] + + @property + def purebasename(self): + """Pure base name of the path.""" + return self._getbyspec("purebasename")[0] + + @property + def ext(self): + """Extension of the path (including the '.').""" + return self._getbyspec("ext")[0] + + def read_binary(self): + """Read and return a bytestring from reading the path.""" + with self.open("rb") as f: + return f.read() + + def read_text(self, encoding): + """Read and return a Unicode string from reading the path.""" + with self.open("r", encoding=encoding) as f: + return f.read() + + def read(self, mode="r"): + """Read and return a bytestring from reading the path.""" + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """Read and return a list of lines from the path. if cr is False, the + newline will be removed from the end of each line.""" + mode = "r" + + if not cr: + content = self.read(mode) + return content.split("\n") + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """(deprecated) return object unpickled from self.read()""" + f = self.open("rb") + try: + import pickle + + return error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """Move this path to target.""" + if target.relto(self): + raise error.EINVAL(target, "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def fnmatch(self, pattern): + """Return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """Return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, LocalPath)): + raise TypeError(f"{relpath!r}: not a string or path object") + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + # assert strrelpath[-1] == self.sep + # assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, "_name", None) == "nt": + if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)): + return strself[len(strrelpath) :] + elif strself.startswith(strrelpath): + return strself[len(strrelpath) :] + return "" + + def ensure_dir(self, *args): + """Ensure the path joined with args is a directory.""" + return self.ensure(*args, dir=True) + + def bestrelpath(self, dest): + """Return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + lst = [os.pardir] * n + if reldest: + lst.append(reldest) + target = dest.sep.join(lst) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """Return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + lst = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + lst.append(current) + if not reverse: + lst.reverse() + return lst + + def common(self, other): + """Return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """Return new path object with 'other' added to the basename""" + return self.new(basename=self.basename + str(other)) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """Yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + yield from Visitor(fil, rec, ignore, bf, sort).gen(self) + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, "__call__"): + warnings.warn( + DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), + stacklevel=3, + ) + res.sort(sort) + else: + res.sort() + + def __fspath__(self): + return self.strpath + + def __hash__(self): + s = self.strpath + if iswin32: + s = s.lower() + return hash(s) + + def __eq__(self, other): + s1 = os.fspath(self) + try: + s2 = os.fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return os.fspath(self) < os.fspath(other) + + def __gt__(self, other): + return os.fspath(self) > os.fspath(other) + + def samefile(self, other): + """Return True if 'other' references the same file as 'self'.""" + other = os.fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if not hasattr(os.path, "samefile"): + return False + return error.checked_call(os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """Remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + import shutil + + error.checked_call( + shutil.rmtree, self.strpath, ignore_errors=ignore_errors + ) + else: + error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """Return hexdigest of hashvalue for this file.""" + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError(f"Don't know how to compute {hashtype!r} hash") + f = self.open("rb") + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """Create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename, ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext" + ) + if "basename" in kw: + if "purebasename" in kw or "ext" in kw: + raise ValueError(f"invalid specification {kw!r}") + else: + pb = kw.setdefault("purebasename", purebasename) + try: + ext = kw["ext"] + except KeyError: + pass + else: + if ext and not ext.startswith("."): + ext = "." + ext + kw["basename"] = pb + ext + + if "dirname" in kw and not kw["dirname"]: + kw["dirname"] = drive + else: + kw.setdefault("dirname", dirname) + kw.setdefault("sep", self.sep) + obj.strpath = normpath("{dirname}{sep}{basename}".format(**kw)) + return obj + + def _getbyspec(self, spec: str) -> list[str]: + """See new for what 'spec' can be.""" + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(",")) + for name in args: + if name == "drive": + res.append(parts[0]) + elif name == "dirname": + res.append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == "basename": + res.append(basename) + else: + i = basename.rfind(".") + if i == -1: + purebasename, ext = basename, "" + else: + purebasename, ext = basename[:i], basename[i:] + if name == "purebasename": + res.append(purebasename) + elif name == "ext": + res.append(ext) + else: + raise ValueError(f"invalid part specification {name!r}") + return res + + def dirpath(self, *args, **kwargs): + """Return the directory path joined with any given path arguments.""" + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return self.new(basename="").join(*args, **kwargs) + + def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath: + """Return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [os.fspath(arg) for arg in args] + strpath = self.strpath + if abs: + newargs: list[str] = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + # special case for when we have e.g. strpath == "/" + actual_sep = "" if strpath.endswith(sep) else sep + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip("/") + arg = arg.replace("/", sep) + strpath = strpath + actual_sep + arg + actual_sep = sep + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode="r", ensure=False, encoding=None): + """Return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return error.checked_call( + io.open, + self.strpath, + mode, + encoding=encoding, + ) + return error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + """Check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file = 1 # is a file + file = 0 # is not a file (may not even exist) + dir = 1 # is a dir + link = 1 # is a link + exists = 1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + if not kw: + kw = {"exists": 1} + return Checkers(self)._evaluate(kw) + + _patternchars = set("*?[" + os.sep) + + def listdir(self, fil=None, sort=None): + """List directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, str): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = FNMatcher(fil) + names = error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self) -> int: + """Return size of the underlying file object""" + return self.stat().size + + def mtime(self) -> float: + """Return last modification time of the path.""" + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """Copy path to target. + + If mode is True, will copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self != target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + + def rec(p): + return p.check(link=0) + + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """Rename this path to target.""" + target = os.fspath(target) + return error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """Pickle object into path location""" + f = self.open("wb") + import pickle + + try: + error.checked_call(pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """Create & return the directory joined with args.""" + p = self.join(*args) + error.checked_call(os.mkdir, os.fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """Write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("wb") as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """Write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("w", encoding=encoding) as f: + f.write(data) + + def write(self, data, mode="w", ensure=False): + """Write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if "b" in mode: + if not isinstance(data, bytes): + raise ValueError("can only process bytes") + else: + if not isinstance(data, str): + if not isinstance(data, bytes): + data = str(data) + else: + data = data.decode(sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """Ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get("dir", 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open("wb").close() + return p + + @overload + def stat(self, raising: Literal[True] = ...) -> Stat: ... + + @overload + def stat(self, raising: Literal[False]) -> Stat | None: ... + + def stat(self, raising: bool = True) -> Stat | None: + """Return an os.stat() tuple.""" + if raising: + return Stat(self, error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self) -> Stat: + """Return an os.lstat() tuple.""" + return Stat(self, error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """Set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return error.checked_call(os.utime, self.strpath, mtime) + try: + return error.checked_call(os.utime, self.strpath, (-1, mtime)) + except error.EINVAL: + return error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """Change directory to self and return old current directory""" + try: + old = self.__class__() + except error.ENOENT: + old = None + error.checked_call(os.chdir, self.strpath) + return old + + @contextmanager + def as_cwd(self): + """ + Return a context manager, which changes to the path's dir during the + managed "with" context. + On __enter__ it returns the old dir, which might be ``None``. + """ + old = self.chdir() + try: + yield old + finally: + if old is not None: + old.chdir() + + def realpath(self): + """Return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """Return last access time of the path.""" + return self.stat().atime + + def __repr__(self): + return f"local({self.strpath!r})" + + def __str__(self): + """Return string representation of the Path.""" + return self.strpath + + def chmod(self, mode, rec=0): + """Change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError(f"mode {mode!r} must be an integer") + if rec: + for x in self.visit(rec=rec): + error.checked_call(os.chmod, str(x), mode) + error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath cannot be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join("__init__.py").exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """Return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + + Special value of ensuresyspath=="importlib" is intended + purely for using in pytest, it is capable only of importing + separate .py files outside packages, e.g. for test suite + without any __init__.py file. It effectively allows having + same-named test modules in different places and offers + mild opt-in via this option. Note that it works only in + recent versions of python. + """ + if not self.check(): + raise error.ENOENT(self) + + if ensuresyspath == "importlib": + if modname is None: + modname = self.purebasename + spec = importlib.util.spec_from_file_location(modname, str(self)) + if spec is None or spec.loader is None: + raise ImportError(f"Can't find module {modname} at location {self!s}") + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # be in a namespace package ... too icky to check + modfile = mod.__file__ + assert modfile is not None + if modfile[-4:] in (".pyc", ".pyo"): + modfile = modfile[:-1] + elif modfile.endswith("$py.class"): + modfile = modfile[:-9] + ".py" + if modfile.endswith(os.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except error.ENOENT: + issame = False + if not issame: + ignore = os.getenv("PY_IGNORE_IMPORTMISMATCH") + if ignore != "1": + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + import types + + mod = types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + with open(str(self), "rb") as f: + exec(f.read(), mod.__dict__) + except BaseException: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str: + """Return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import PIPE + from subprocess import Popen + + popen_opts.pop("stdout", None) + popen_opts.pop("stderr", None) + proc = Popen( + [str(self)] + [str(arg) for arg in argv], + **popen_opts, + stdout=PIPE, + stderr=PIPE, + ) + stdout: str | bytes + stdout, stderr = proc.communicate() + ret = proc.wait() + if isinstance(stdout, bytes): + stdout = stdout.decode(sys.getdefaultencoding()) + if ret != 0: + if isinstance(stderr, bytes): + stderr = stderr.decode(sys.getdefaultencoding()) + raise RuntimeError( + ret, + ret, + str(self), + stdout, + stderr, + ) + return stdout + + @classmethod + def sysfind(cls, name, checker=None, paths=None): + """Return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = os.environ["Path"].split(";") + if "" not in paths and "." not in paths: + paths.append(".") + try: + systemroot = os.environ["SYSTEMROOT"] + except KeyError: + pass + else: + paths = [ + path.replace("%SystemRoot%", systemroot) for path in paths + ] + else: + paths = os.environ["PATH"].split(":") + tryadd = [] + if iswin32: + tryadd += os.environ["PATHEXT"].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except error.EACCES: + pass + return None + + @classmethod + def _gethomedir(cls): + try: + x = os.environ["HOME"] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"] + except KeyError: + return None + return cls(x) + + # """ + # special class constructors for local filesystem paths + # """ + @classmethod + def get_temproot(cls): + """Return the system's temporary directory + (where tempfiles are usually created in) + """ + import tempfile + + return local(tempfile.gettempdir()) + + @classmethod + def mkdtemp(cls, rootdir=None): + """Return a Path object pointing to a fresh new temporary directory + (which we created ourselves). + """ + import tempfile + + if rootdir is None: + rootdir = cls.get_temproot() + path = error.checked_call(tempfile.mkdtemp, dir=str(rootdir)) + return cls(path) + + @classmethod + def make_numbered_dir( + cls, prefix="session-", rootdir=None, keep=3, lock_timeout=172800 + ): # two days + """Return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. If .lock files are used (lock_timeout non-zero), + algorithm is multi-process safe. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = prefix.lower() + + def parse_num(path): + """Parse the number out of a path (if it matches the prefix)""" + nbasename = path.basename.lower() + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix) :]) + except ValueError: + pass + + def create_lockfile(path): + """Exclusively create lockfile. Throws when failed""" + mypid = os.getpid() + lockfile = path.join(".lock") + if hasattr(lockfile, "mksymlinkto"): + lockfile.mksymlinkto(str(mypid)) + else: + fd = error.checked_call( + os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644 + ) + with os.fdopen(fd, "w") as f: + f.write(str(mypid)) + return lockfile + + def atexit_remove_lockfile(lockfile): + """Ensure lockfile is removed at process exit""" + mypid = os.getpid() + + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except error.Error: + pass + + atexit.register(try_remove_lockfile) + + # compute the maximum number currently in use with the prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum + 1)) + if lock_timeout: + lockfile = create_lockfile(udir) + atexit_remove_lockfile(lockfile) + except (error.EEXIST, error.ENOENT, error.EBUSY): + # race condition (1): another thread/process created the dir + # in the meantime - try again + # race condition (2): another thread/process spuriously acquired + # lock treating empty directory as candidate + # for removal - try again + # race condition (3): another thread/process tried to create the lock at + # the same time (happened in Python 3.3 on Windows) + # https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + def get_mtime(path): + """Read file modification time""" + try: + return path.lstat().mtime + except error.Error: + pass + + garbage_prefix = prefix + "garbage-" + + def is_garbage(path): + """Check if path denotes directory scheduled for removal""" + bn = path.basename + return bn.startswith(garbage_prefix) + + # prune old directories + udir_time = get_mtime(udir) + if keep and udir_time: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + try: + # try acquiring lock to remove directory as exclusive user + if lock_timeout: + create_lockfile(path) + except (error.EEXIST, error.ENOENT, error.EBUSY): + path_time = get_mtime(path) + if not path_time: + # assume directory doesn't exist now + continue + if abs(udir_time - path_time) < lock_timeout: + # assume directory with lockfile exists + # and lock timeout hasn't expired yet + continue + + # path dir locked for exclusive use + # and scheduled for removal to avoid another thread/process + # treating it as a new directory or removal candidate + garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4())) + try: + path.rename(garbage_path) + garbage_path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + if is_garbage(path): + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ["USER"] # linux, et al + except KeyError: + try: + username = os.environ["USERNAME"] # windows + except KeyError: + username = "current" + + src = str(udir) + dest = src[: src.rfind("-")] + "-" + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + + +def copymode(src, dest): + """Copy permission from src to dst.""" + import shutil + + shutil.copymode(src, dest) + + +def copystat(src, dest): + """Copy permission, last modification time, + last access time, and flags from src to dst.""" + import shutil + + shutil.copystat(str(src), str(dest)) + + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open("rb") + try: + fdest = dest.open("wb") + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == "_"): + name = name.replace("_", "") + return not name or name.isalnum() + + +local = LocalPath diff --git a/.venv/lib/python3.11/site-packages/_pytest/_version.py b/.venv/lib/python3.11/site-packages/_pytest/_version.py new file mode 100644 index 0000000..1eaa448 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/_version.py @@ -0,0 +1,34 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] +else: + VERSION_TUPLE = object + COMMIT_ID = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '8.4.2' +__version_tuple__ = version_tuple = (8, 4, 2) + +__commit_id__ = commit_id = None diff --git a/.venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000..22f3ca8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,208 @@ +# mypy: allow-untyped-defs +"""Support for presenting detailed information in failing assertions.""" + +from __future__ import annotations + +from collections.abc import Generator +import sys +from typing import Any +from typing import Protocol +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook. " + "Make sure to delete any previously generated pyc cache files.", + ) + + parser.addini( + "truncation_limit_lines", + default=None, + help="Set threshold of LINES after which truncation will take effect", + ) + parser.addini( + "truncation_limit_chars", + default=None, + help=("Set threshold of CHARS after which truncation will take effect"), + ) + + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_ASSERTIONS, + help=( + "Specify a verbosity level for assertions, overriding the main level. " + "Higher levels will provide more detailed explanation when an assertion fails." + ), + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :param names: The module names to register. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + rewrite_hook: RewriteHook + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + rewrite_hook = hook + break + else: + rewrite_hook = DummyRewriteHook() + rewrite_hook.mark_rewrite(*names) + + +class RewriteHook(Protocol): + def mark_rewrite(self, *names: str) -> None: ... + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: rewrite.AssertionRewritingHook | None = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config.stash[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config.stash[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: Session) -> None: + # This hook is only called when test modules are collected + # so for example not in the managing process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> str | None: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + util._config = item.config + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + try: + return (yield) + finally: + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None + + +def pytest_sessionfinish(session: Session) -> None: + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> list[str] | None: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/.venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py b/.venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000..c4782c7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1216 @@ +"""Rewrite assertion AST to produce nice error messages.""" + +from __future__ import annotations + +import ast +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Sequence +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +from pathlib import Path +from pathlib import PurePath +import struct +import sys +import tokenize +import types +from typing import IO +from typing import TYPE_CHECKING + +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest._version import version +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.fixtures import FixtureFunctionDefinition +from _pytest.main import Session +from _pytest.pathlib import absolutepath +from _pytest.pathlib import fnmatch_ex +from _pytest.stash import StashKey + + +# fmt: off +from _pytest.assertion.util import format_explanation as _format_explanation # noqa:F401, isort:skip +# fmt:on + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + + +class Sentinel: + pass + + +assertstate_key = StashKey["AssertionState"]() + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + ((__debug__ and "c") or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +# Special marker that denotes we have just left a scope definition +_SCOPE_END_MARKER = Sentinel() + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Session | None = None + self._rewritten_names: dict[str, Path] = {} + self._must_rewrite: set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Session | None) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Sequence[str | bytes] | None = None, + target: types.ModuleType | None = None, + ) -> importlib.machinery.ModuleSpec | None: + if self._writing_pyc: + return None + state = self.config.stash[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace(f"find_module called for: {name}") + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + + if spec is None and path is not None: + # With --import-mode=importlib, PathFinder cannot find spec without modifying `sys.path`, + # causing inability to assert rewriting (#12659). + # At this point, try using the file path to find the module spec. + for _path_str in path: + spec = importlib.util.spec_from_file_location(name, _path_str) + if spec is not None: + break + + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> types.ModuleType | None: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config.stash[assertstate_key] + + self._rewritten_names[module.__name__] = fn + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: AssertionState) -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(*parts).with_suffix(".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: AssertionState) -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(absolutepath(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: AssertionState) -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + f"Module already imported so cannot be rewritten; {name}" + ), + stacklevel=5, + ) + + def get_data(self, pathname: str | bytes) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + from importlib.resources.abc import TraversableResources + else: + from importlib.abc import TraversableResources + + def get_resource_reader(self, name: str) -> TraversableResources: + if sys.version_info < (3, 11): + from importlib.readers import FileReader + else: + from importlib.resources.readers import FileReader + + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + with open(proc_pyc, "wb") as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + os.replace(proc_pyc, pyc) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + + +def _rewrite_test(fn: Path, config: Config) -> tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> types.CodeType | None: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(pyc, "rb") + except OSError: + return None + with fp: + try: + stat_result = os.stat(source) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16): + trace(f"_read_pyc({source}): invalid pyc (too short)") + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace(f"_read_pyc({source}): invalid pyc (bad magic number)") + return None + if data[4:8] != b"\x00\x00\x00\x00": + trace(f"_read_pyc({source}): invalid pyc (unsupported flags)") + return None + mtime_data = data[8:12] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace(f"_read_pyc({source}): out of date") + return None + size_data = data[12:16] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace(f"_read_pyc({source}): invalid pyc (incorrect size)") + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace(f"_read_pyc({source}): not a code object") + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: str | None = None, + config: Config | None = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + if isinstance(obj, types.MethodType): + # for bound methods, skip redundant information + return obj.__name__ + + maxsize = _get_maxsize_for_saferepr(util._config) + if not maxsize: + return saferepr_unlimited(obj).replace("\n", "\\n") + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Config | None) -> int | None: + """Get `maxsize` configuration for saferepr based on the given config object.""" + if config is None: + verbosity = 0 + else: + verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj, _get_maxsize_for_saferepr(util._config)) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + # For pytest fixtures the __repr__ method provides more information than the function name. + return isinstance(obj, FixtureFunctionDefinition) + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + ((is_or and " or ") or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unnecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) + + +@functools.lru_cache(maxsize=1) +def _get_assertion_exprs(src: bytes) -> dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: dict[int, str] = {} + + depth = 0 + lines: list[str] = [] + assert_lineno: int | None = None + seen_lines: set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escaped newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + :scope: A tuple containing the current scope used for variables_overwrite. + + :variables_overwrite: A dict filled with references to variables + that change value within an assert. This happens when a variable is + reassigned with the walrus operator + + This state, except the variables_overwrite, is reset on every new assert + statement visited and used by the other visitors. + """ + + def __init__( + self, module_path: str | None, config: Config | None, source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + self.scope: tuple[ast.AST, ...] = () + self.variables_overwrite: defaultdict[tuple[ast.AST, ...], dict[str, str]] = ( + defaultdict(dict) + ) + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + item = None + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Constant) + and isinstance(item.value.value, str) + ): + doc = item.value.value + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + isinstance(item, ast.ImportFrom) + and item.level == 0 + and item.module == "__future__" + ): + pass + else: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + if sys.version_info >= (3, 10): + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + else: + aliases = [ + ast.alias("builtins", "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + self.scope = (mod,) + nodes: list[ast.AST | Sentinel] = [mod] + while nodes: + node = nodes.pop() + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): + self.scope = tuple((*self.scope, node)) + nodes.append(_SCOPE_END_MARKER) + if node == _SCOPE_END_MARKER: + self.scope = self.scope[:-1] + continue + assert isinstance(node, ast.AST) + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: list[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.copy_location(ast.Name(name, ast.Load()), expr) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys: list[ast.expr | None] = [ast.Constant(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> list[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + import warnings + + from _pytest.warning_types import PytestAssertRewriteWarning + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=self.module_path, + lineno=assert_.lineno, + ) + + self.statements: list[ast.stmt] = [] + self.variables: list[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: list[str] = [] + + self.stack: list[dict[str, ast.expr]] = [] + self.expl_stmts: list[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(ast.Constant(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = ast.Constant("") + gluestr = "assert " + err_explanation = ast.BinOp(ast.Constant(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = _get_assertion_exprs(self.source)[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + ast.Constant(assert_.lineno), + ast.Constant(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + [*self.expl_stmts, hook_call_pass], + [], + ) + statements_pass: list[ast.stmt] = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables: list[ast.expr] = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Constant("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Constant(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear) + # Fix locations (line numbers/column offsets). + for stmt in self.statements: + for node in traverse_node(stmt): + if getattr(node, "lineno", None) is None: + # apply the assertion location to all generated ast nodes without source location + # and preserve the location of existing nodes or generated nodes with an correct location. + ast.copy_location(node, assert_) + return self.statements + + def visit_NamedExpr(self, name: ast.NamedExpr) -> tuple[ast.NamedExpr, str]: + # This method handles the 'walrus operator' repr of the target + # name if it's a local variable or _should_repr_global_name() + # thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + target_id = name.target.id + inlocs = ast.Compare(ast.Constant(target_id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(target_id)) + return name, self.explanation_param(expr) + + def visit_Name(self, name: ast.Name) -> tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Constant(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: list[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa: F821 + self.expl_stmts = fail_inner + # Check if the left operand is a ast.NamedExpr and the value has already been visited + if ( + isinstance(v, ast.Compare) + and isinstance(v.left, ast.NamedExpr) + and v.left.target.id + in [ + ast_expr.id + for ast_expr in boolop.values[:i] + if hasattr(ast_expr, "id") + ] + ): + pytest_temp = self.variable() + self.variables_overwrite[self.scope][v.left.target.id] = v.left # type:ignore[assignment] + v.left.target.id = pytest_temp + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Constant(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: list[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, ast.Constant(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.copy_location(ast.UnaryOp(unary.op, operand_res), unary)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign( + ast.copy_location(ast.BinOp(left_expr, binop.op, right_expr), binop) + ) + return res, explanation + + def visit_Call(self, call: ast.Call) -> tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get( + self.scope, {} + ): + arg = self.variables_overwrite[self.scope][arg.id] # type:ignore[assignment] + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + if isinstance( + keyword.value, ast.Name + ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}): + keyword.value = self.variables_overwrite[self.scope][keyword.value.id] # type:ignore[assignment] + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.copy_location(ast.Call(new_func, new_args, new_kwargs), call) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign( + ast.copy_location(ast.Attribute(value, attr.attr, ast.Load()), attr) + ) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> tuple[ast.expr, str]: + self.push_format_context() + # We first check if we have overwritten a variable in the previous assert + if isinstance( + comp.left, ast.Name + ) and comp.left.id in self.variables_overwrite.get(self.scope, {}): + comp.left = self.variables_overwrite[self.scope][comp.left.id] # type:ignore[assignment] + if isinstance(comp.left, ast.NamedExpr): + self.variables_overwrite[self.scope][comp.left.target.id] = comp.left # type:ignore[assignment] + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names: list[ast.expr] = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls: list[ast.expr] = [] + syms: list[ast.expr] = [] + results = [left_res] + for i, op, next_operand in it: + if ( + isinstance(next_operand, ast.NamedExpr) + and isinstance(left_res, ast.Name) + and next_operand.target.id == left_res.id + ): + next_operand.target.id = self.variable() + self.variables_overwrite[self.scope][left_res.id] = next_operand # type:ignore[assignment] + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(ast.Constant(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(ast.Constant(expl)) + res_expr = ast.copy_location(ast.Compare(left_res, [op], [next_res]), comp) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(cache_dir, exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + # + # squashfuse_ll returns ENOSYS "OSError: [Errno 38] Function not + # implemented" for a read-only error + if e.errno in {errno.EROFS, errno.ENOSYS}: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/.venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py b/.venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000..4854a62 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,137 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +terminal lines, unless running with an assertions verbosity level of at least 2 or running on CI. +""" + +from __future__ import annotations + +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = DEFAULT_MAX_LINES * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation: list[str], item: Item) -> list[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + should_truncate, max_lines, max_chars = _get_truncation_parameters(item) + if should_truncate: + return _truncate_explanation( + explanation, + max_lines=max_lines, + max_chars=max_chars, + ) + return explanation + + +def _get_truncation_parameters(item: Item) -> tuple[bool, int, int]: + """Return the truncation parameters related to the given item, as (should truncate, max lines, max chars).""" + # We do not need to truncate if one of conditions is met: + # 1. Verbosity level is 2 or more; + # 2. Test is being run in CI environment; + # 3. Both truncation_limit_lines and truncation_limit_chars + # .ini parameters are set to 0 explicitly. + max_lines = item.config.getini("truncation_limit_lines") + max_lines = int(max_lines if max_lines is not None else DEFAULT_MAX_LINES) + + max_chars = item.config.getini("truncation_limit_chars") + max_chars = int(max_chars if max_chars is not None else DEFAULT_MAX_CHARS) + + verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + should_truncate = verbose < 2 and not util.running_on_ci() + should_truncate = should_truncate and (max_lines > 0 or max_chars > 0) + + return should_truncate, max_lines, max_chars + + +def _truncate_explanation( + input_lines: list[str], + max_lines: int, + max_chars: int, +) -> list[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either max_lines, or max_chars - whichever the input reaches + first, taking the truncation explanation into account. The remaining lines + will be replaced by a usage message. + """ + # Check if truncation required + input_char_count = len("".join(input_lines)) + # The length of the truncation explanation depends on the number of lines + # removed but is at least 68 characters: + # The real value is + # 64 (for the base message: + # '...\n...Full output truncated (1 line hidden), use '-vv' to show")' + # ) + # + 1 (for plural) + # + int(math.log10(len(input_lines) - max_lines)) (number of hidden line, at least 1) + # + 3 for the '...' added to the truncated line + # But if there's more than 100 lines it's very likely that we're going to + # truncate, so we don't need the exact value using log10. + tolerable_max_chars = ( + max_chars + 70 # 64 + 1 (for plural) + 2 (for '99') + 3 for '...' + ) + # The truncation explanation add two lines to the output + tolerable_max_lines = max_lines + 2 + if ( + len(input_lines) <= tolerable_max_lines + and input_char_count <= tolerable_max_chars + ): + return input_lines + # Truncate first to max_lines, and then truncate to max_chars if necessary + if max_lines > 0: + truncated_explanation = input_lines[:max_lines] + else: + truncated_explanation = input_lines + truncated_char = True + # We reevaluate the need to truncate chars following removal of some lines + if len("".join(truncated_explanation)) > tolerable_max_chars and max_chars > 0: + truncated_explanation = _truncate_by_char_count( + truncated_explanation, max_chars + ) + else: + truncated_char = False + + if truncated_explanation == input_lines: + # No truncation happened, so we do not need to add any explanations + return truncated_explanation + + truncated_line_count = len(input_lines) - len(truncated_explanation) + if truncated_explanation[-1]: + # Add ellipsis and take into account part-truncated final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + if truncated_char: + # It's possible that we did not remove any char from this line + truncated_line_count += 1 + else: + # Add proper ellipsis when we were able to fit a full line exactly + truncated_explanation[-1] = "..." + return [ + *truncated_explanation, + "", + f"...Full output truncated ({truncated_line_count} line" + f"{'' if truncated_line_count == 1 else 's'} hidden), {USAGE_MSG}", + ] + + +def _truncate_by_char_count(input_lines: list[str], max_chars: int) -> list[str]: + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/.venv/lib/python3.11/site-packages/_pytest/assertion/util.py b/.venv/lib/python3.11/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000..c545e6c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/assertion/util.py @@ -0,0 +1,621 @@ +# mypy: allow-untyped-defs +"""Utilities for assertion debugging.""" + +from __future__ import annotations + +import collections.abc +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Mapping +from collections.abc import Sequence +from collections.abc import Set as AbstractSet +import os +import pprint +from typing import Any +from typing import Literal +from typing import Protocol +from unicodedata import normalize + +from _pytest import outcomes +import _pytest._code +from _pytest._io.pprint import PrettyPrinter +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest.config import Config + + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Callable[[str, object, object], str | None] | None = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Callable[[int, str, str], None] | None = None + +# Config object which is assigned during pytest_runtest_protocol. +_config: Config | None = None + + +class _HighlightFunc(Protocol): + def __call__(self, source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Apply highlighting to the given source.""" + + +def dummy_highlighter(source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Dummy highlighter that returns the text unprocessed. + + Needed for _notin_text, as the diff gets post-processed to only show the "+" part. + """ + return source + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> list[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> list[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, (set, frozenset)) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except Exception: + return False + + +def has_default_eq( + obj: object, +) -> bool: + """Check if an instance of an object contains the default eq + + First, we check if the object's __eq__ attribute has __code__, + if so, we check the equally of the method code filename (__code__.co_filename) + to the default one generated by the dataclass and attr module + for dataclasses the default co_filename is , for attrs class, the __eq__ should contain "attrs eq generated" + """ + # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68 + if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"): + code_filename = obj.__eq__.__code__.co_filename + + if isattrs(obj): + return "attrs generated " in code_filename + + return code_filename == "" # data class + return True + + +def assertrepr_compare( + config, op: str, left: Any, right: Any, use_ascii: bool = False +) -> list[str] | None: + """Return specialised explanations for some operators/operands.""" + verbose = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. + # See issue #3246. + use_ascii = ( + isinstance(left, str) + and isinstance(right, str) + and normalize("NFD", left) == normalize("NFD", right) + ) + + if verbose > 1: + left_repr = saferepr_unlimited(left, use_ascii=use_ascii) + right_repr = saferepr_unlimited(right, use_ascii=use_ascii) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + + left_repr = saferepr(left, maxsize=maxsize, use_ascii=use_ascii) + right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii) + + summary = f"{left_repr} {op} {right_repr}" + highlighter = config.get_terminal_writer()._highlight + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, highlighter, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + elif op == "!=": + if isset(left) and isset(right): + explanation = ["Both sets are equal"] + elif op == ">=": + if isset(left) and isset(right): + explanation = _compare_gte_set(left, right, highlighter, verbose) + elif op == "<=": + if isset(left) and isset(right): + explanation = _compare_lte_set(left, right, highlighter, verbose) + elif op == ">": + if isset(left) and isset(right): + explanation = _compare_gt_set(left, right, highlighter, verbose) + elif op == "<": + if isset(left) and isset(right): + explanation = _compare_lt_set(left, right, highlighter, verbose) + + except outcomes.Exit: + raise + except Exception: + repr_crash = _pytest._code.ExceptionInfo.from_current()._getreprcrash() + explanation = [ + f"(pytest_assertion plugin: representation of details failed: {repr_crash}.", + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + if explanation[0] != "": + explanation = ["", *explanation] + return [summary, *explanation] + + +def _compare_eq_any( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int = 0 +) -> list[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, highlighter, verbose) + else: + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) is type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, highlighter, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, highlighter, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, highlighter, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, highlighter, verbose) + + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, highlighter, verbose) + explanation.extend(expl) + + return explanation + + +def _diff_text( + left: str, right: str, highlighter: _HighlightFunc, verbose: int = 0 +) -> list[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: list[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + f"Skipping {i} identical leading characters in diff, use -v to show" + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + f"Skipping {i} identical trailing " + "characters in diff, use -v to show" + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + highlighter( + "\n".join( + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ), + lexer="diff", + ).splitlines() + ) + return explanation + + +def _compare_eq_iterable( + left: Iterable[Any], + right: Iterable[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + if verbose <= 0 and not running_on_ci(): + return ["Use -v to get more diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = PrettyPrinter().pformat(left).splitlines() + right_formatting = PrettyPrinter().pformat(right).splitlines() + + explanation = ["", "Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + highlighter( + "\n".join( + line.rstrip() + for line in difflib.ndiff(right_formatting, left_formatting) + ), + lexer="diff", + ).splitlines() + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], + right: Sequence[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: list[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation.append( + f"At index {i} diff:" + f" {highlighter(repr(left_value))} != {highlighter(repr(right_value))}" + ) + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [ + f"{dir_with_more} contains one more item: {highlighter(extra)}" + ] + else: + explanation += [ + f"{dir_with_more} contains {len_diff} more items, first extra item: {highlighter(extra)}" + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = [] + explanation.extend(_set_one_sided_diff("left", left, right, highlighter)) + explanation.extend(_set_one_sided_diff("right", right, left, highlighter)) + return explanation + + +def _compare_gt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_gte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_lt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_lte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_gte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("right", right, left, highlighter) + + +def _compare_lte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("left", left, right, highlighter) + + +def _set_one_sided_diff( + posn: str, + set1: AbstractSet[Any], + set2: AbstractSet[Any], + highlighter: _HighlightFunc, +) -> list[str]: + explanation = [] + diff = set1 - set2 + if diff: + explanation.append(f"Extra items in the {posn} set:") + for item in diff: + explanation.append(highlighter(saferepr(item))) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], + right: Mapping[Any, Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation: list[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [f"Omitting {len(same)} identical items, use -vv to show"] + elif same: + explanation += ["Common items:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [ + highlighter(saferepr({k: left[k]})) + + " != " + + highlighter(saferepr({k: right[k]})) + ] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + f"Left contains {len_extra_left} more item{'' if len_extra_left == 1 else 's'}:" + ) + explanation.extend( + highlighter(pprint.pformat({k: left[k] for k in extra_left})).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + f"Right contains {len_extra_right} more item{'' if len_extra_right == 1 else 's'}:" + ) + explanation.extend( + highlighter(pprint.pformat({k: right[k] for k in extra_right})).splitlines() + ) + return explanation + + +def _compare_eq_cls( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int +) -> list[str]: + if not has_default_eq(left): + return [] + if isdatacls(left): + import dataclasses + + all_fields = dataclasses.fields(left) + fields_to_check = [info.name for info in all_fields if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append(f"Omitting {len(same)} identical items, use -vv to show") + elif same: + explanation += ["Matching attributes:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += highlighter(pprint.pformat(diff)).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + f"Drill down into differing attribute {field}:", + f"{indent}{field}: {highlighter(repr(field_left))} != {highlighter(repr(field_right))}", + ] + explanation += [ + indent + line + for line in _compare_eq_any( + field_left, field_right, highlighter, verbose + ) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> list[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, dummy_highlighter, verbose) + newdiff = [f"{saferepr(term, maxsize=42)} is contained here:"] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) diff --git a/.venv/lib/python3.11/site-packages/_pytest/cacheprovider.py b/.venv/lib/python3.11/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000..dea6010 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,625 @@ +# mypy: allow-untyped-defs +"""Implementation of the cache provider.""" + +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Iterable +import dataclasses +import errno +import json +import os +from pathlib import Path +import tempfile +from typing import final + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.nodes import Directory +from _pytest.nodes import File +from _pytest.reports import TestReport + + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html +""" + + +@final +@dataclasses.dataclass +class Cache: + """Instance of the `cache` fixture.""" + + _cachedir: Path = dataclasses.field(repr=False) + _config: Config = dataclasses.field(repr=False) + + # Sub-directory under cache-dir for directories created by `mkdir()`. + _CACHE_PREFIX_DIRS = "d" + + # Sub-directory under cache-dir for values created by `set()`. + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache: + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def _mkdir(self, path: Path) -> None: + self._ensure_cache_dir_and_supporting_files() + path.mkdir(exist_ok=True, parents=True) + + def mkdir(self, name: str) -> Path: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + .. versionadded:: 7.0 + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + self._mkdir(res) + return res + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r", encoding="UTF-8") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + self._mkdir(path.parent) + except OSError as exc: + self.warn( + f"could not create cache path {path}: {exc}", + _ispytest=True, + ) + return + data = json.dumps(value, ensure_ascii=False, indent=2) + try: + f = path.open("w", encoding="UTF-8") + except OSError as exc: + self.warn( + f"cache could not write path {path}: {exc}", + _ispytest=True, + ) + else: + with f: + f.write(data) + + def _ensure_cache_dir_and_supporting_files(self) -> None: + """Create the cache dir and its supporting files.""" + if self._cachedir.is_dir(): + return + + self._cachedir.parent.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory( + prefix="pytest-cache-files-", + dir=self._cachedir.parent, + ) as newpath: + path = Path(newpath) + + # Reset permissions to the default, see #12308. + # Note: there's no way to get the current umask atomically, eek. + umask = os.umask(0o022) + os.umask(umask) + path.chmod(0o777 - umask) + + with open(path.joinpath("README.md"), "x", encoding="UTF-8") as f: + f.write(README_CONTENT) + with open(path.joinpath(".gitignore"), "x", encoding="UTF-8") as f: + f.write("# Created by pytest automatically.\n*\n") + with open(path.joinpath("CACHEDIR.TAG"), "xb") as f: + f.write(CACHEDIR_TAG_CONTENT) + + try: + path.rename(self._cachedir) + except OSError as e: + # If 2 concurrent pytests both race to the rename, the loser + # gets "Directory not empty" from the rename. In this case, + # everything is handled so just continue (while letting the + # temporary directory be cleaned up). + # On Windows, the error is a FileExistsError which translates to EEXIST. + if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): + raise + else: + # Create a directory in place of the one we just moved so that + # `TemporaryDirectory`'s cleanup doesn't complain. + # + # TODO: pass ignore_cleanup_errors=True when we no longer support python < 3.10. + # See https://github.com/python/cpython/issues/74168. Note that passing + # delete=False would do the wrong thing in case of errors and isn't supported + # until python 3.12. + path.mkdir() + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Generator[None, CollectReport, CollectReport]: + res = yield + if isinstance(collector, (Session, Directory)): + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + + # Use stable sort to prioritize last failed. + def sort_key(node: nodes.Item | nodes.Collector) -> bool: + return node.path in lf_paths + + res.result = sorted( + res.result, + key=sort_key, + reverse=True, + ) + + elif isinstance(collector, File): + if collector.path in self.lfplugin._last_failed_paths: + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return res + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.path) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + + return res + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> CollectReport | None: + if isinstance(collector, File): + if collector.path not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: int | None = None + self._report_status: str | None = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> set[Path]: + """Return a set with all Paths of the previously failed nodeids and + their parents.""" + rootpath = self.config.rootpath + result = set() + for nodeid in self.lastfailed: + path = rootpath / nodeid.split("::")[0] + result.add(path) + result.update(path.parents) + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> str | None: + if self.active and self.config.get_verbosity() >= 0: + return f"run-last-failure: {self._report_status}" + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> Generator[None]: + res = yield + + if not self.active: + return res + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = ( + f"{len(self.lastfailed)} known failures not in selected tests" + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = ( + f"rerun previous {self._previously_failed_count} {noun}{suffix}" + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += f" (skipped {self._skipped_files} {files_noun})" + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + return res + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> Generator[None]: + res = yield + + if self.active: + new_items: dict[str, nodes.Item] = {} + other_items: dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + return res + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> list[nodes.Item]: + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="Rerun only the tests that failed at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="Run all tests, but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="Run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "Show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="Remove all cache contents at start of test run", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="With ``--lf``, determines whether to execute tests when there " + "are no previously (known) failures or when no " + "cached ``lastfailed`` data was found. " + "``all`` (the default) runs the full test suite again. " + "``none`` just emits a message about no known failures and exits successfully.", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.cacheshow and not config.option.help: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> str | None: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", f"cache values for {glob!r}") + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line(f"{key} contains unreadable content, will be ignored") + else: + tw.line(f"{key} contains:") + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", f"cache directories for {glob!r}") + for p in contents: + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size}") + return 0 diff --git a/.venv/lib/python3.11/site-packages/_pytest/capture.py b/.venv/lib/python3.11/site-packages/_pytest/capture.py new file mode 100644 index 0000000..6d98676 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/capture.py @@ -0,0 +1,1144 @@ +# mypy: allow-untyped-defs +"""Per-test stdout/stderr capturing mechanism.""" + +from __future__ import annotations + +import abc +import collections +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +import contextlib +import io +from io import UnsupportedOperation +import os +import sys +from tempfile import TemporaryFile +from types import TracebackType +from typing import Any +from typing import AnyStr +from typing import BinaryIO +from typing import cast +from typing import Final +from typing import final +from typing import Generic +from typing import Literal +from typing import NamedTuple +from typing import TextIO +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from typing_extensions import Self + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item +from _pytest.reports import CollectReport + + +_CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="Per-test capturing method: one of fd|sys|no|tee-sys", + ) + group._addoption( # private to use reserved lower-case short option + "-s", + action="store_const", + const="no", + dest="capture", + help="Shortcut for --capture=no", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _readline_workaround() -> None: + """Ensure readline is imported early so it attaches to the correct stdio handles. + + This isn't a problem with the default GNU readline implementation, but in + some configurations, Python uses libedit instead (on macOS, and for prebuilt + binaries such as used by uv). + + In theory this is only needed if readline.backend == "libedit", but the + workaround consists of importing readline here, so we already worked around + the issue by the time we could check if we need to. + """ + try: + import readline # noqa: F401 + except ImportError: + pass + + +def _windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable,unused-ignore] + return + + raw_stdout = stream.buffer.raw if hasattr(stream.buffer, "raw") else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined,unused-ignore] + return + + def _reopen_stdio(f, mode): + if not hasattr(stream.buffer, "raw") and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(wrapper=True) +def pytest_load_initial_conftests(early_config: Config) -> Generator[None]: + ns = early_config.known_args_namespace + if ns.capture == "fd": + _windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + try: + try: + yield + finally: + capman.suspend_global_capture() + except BaseException: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + raise + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + assert hasattr(self.buffer, "mode") + return cast(str, self.buffer.mode.replace("b", "")) + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput(TextIO): + @property + def encoding(self) -> str: + assert sys.__stdin__ is not None + return sys.__stdin__.encoding + + def read(self, size: int = -1) -> str: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + + def __next__(self) -> str: + return self.readline() + + def readlines(self, hint: int | None = -1) -> list[str]: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + def __iter__(self) -> Iterator[str]: + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def flush(self) -> None: + raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + def readable(self) -> bool: + return False + + def seek(self, offset: int, whence: int = 0) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)") + + def seekable(self) -> bool: + return False + + def tell(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()") + + def truncate(self, size: int | None = None) -> int: + raise UnsupportedOperation("cannot truncate stdin") + + def write(self, data: str) -> int: + raise UnsupportedOperation("cannot write to stdin") + + def writelines(self, lines: Iterable[str]) -> None: + raise UnsupportedOperation("Cannot write to stdin") + + def writable(self) -> bool: + return False + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + pass + + @property + def buffer(self) -> BinaryIO: + # The str/bytes doesn't actually matter in this type, so OK to fake. + return self # type: ignore[return-value] + + +# Capture classes. + + +class CaptureBase(abc.ABC, Generic[AnyStr]): + EMPTY_BUFFER: AnyStr + + @abc.abstractmethod + def __init__(self, fd: int) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def start(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def done(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def suspend(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def resume(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def writeorg(self, data: AnyStr) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def snap(self) -> AnyStr: + raise NotImplementedError() + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture(CaptureBase[str]): + EMPTY_BUFFER = "" + + def __init__(self, fd: int) -> None: + pass + + def start(self) -> None: + pass + + def done(self) -> None: + pass + + def suspend(self) -> None: + pass + + def resume(self) -> None: + pass + + def snap(self) -> str: + return "" + + def writeorg(self, data: str) -> None: + pass + + +class SysCaptureBase(CaptureBase[AnyStr]): + def __init__( + self, fd: int, tmpfile: TextIO | None = None, *, tee: bool = False + ) -> None: + name = patchsysdict[fd] + self._old: TextIO = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + (hasattr(self, "_old") and repr(self._old)) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + (hasattr(self, "_old") and repr(self._old)) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert self._state in states, ( + "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + +class SysCaptureBinary(SysCaptureBase[bytes]): + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBase[str]): + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + assert isinstance(self.tmpfile, CaptureIO) + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBase(CaptureBase[AnyStr]): + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this lovely scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: int | None = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull, encoding="utf-8") + self.syscapture: CaptureBase[str] = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture(targetfd) + + self._state = "initialized" + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} {self.targetfd} oldfd={self.targetfd_save} " + f"_state={self._state!r} tmpfile={self.tmpfile!r}>" + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert self._state in states, ( + "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + +class FDCaptureBinary(FDCaptureBase[bytes]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res # type: ignore[return-value] + + def writeorg(self, data: bytes) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBase[str]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + # XXX use encoding of original stream + os.write(self.targetfd_save, data.encode("utf-8")) + + +# MultiCapture + + +# Generic NamedTuple only supported since Python 3.11. +if sys.version_info >= (3, 11) or TYPE_CHECKING: + + @final + class CaptureResult(NamedTuple, Generic[AnyStr]): + """The result of :method:`caplog.readouterr() `.""" + + out: AnyStr + err: AnyStr + +else: + + class CaptureResult( + collections.namedtuple("CaptureResult", ["out", "err"]), # noqa: PYI024 + Generic[AnyStr], + ): + """The result of :method:`caplog.readouterr() `.""" + + __slots__ = () + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__( + self, + in_: CaptureBase[AnyStr] | None, + out: CaptureBase[AnyStr] | None, + err: CaptureBase[AnyStr] | None, + ) -> None: + self.in_: CaptureBase[AnyStr] | None = in_ + self.out: CaptureBase[AnyStr] | None = out + self.err: CaptureBase[AnyStr] | None = err + + def __repr__(self) -> str: + return ( + f"" + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + assert self.out is not None + self.out.writeorg(out) + if err: + assert self.err is not None + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + assert self.in_ is not None + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" + # TODO: This type error is real, need to fix. + return CaptureResult(out, err) # type: ignore[arg-type] + + +def _get_multicapture(method: _CaptureMethod) -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: _CaptureMethod) -> None: + self._method: Final = method + self._global_capturing: MultiCapture[str] | None = None + self._capture_fixture: CaptureFixture[Any] | None = None + + def __repr__(self) -> str: + return ( + f"" + ) + + def is_capturing(self) -> str | bool: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return f"fixture {self._capture_fixture.request.fixturename}" + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: CaptureFixture[Any]) -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + f"cannot use {requested_fixture} and {current_fixture} at the same time" + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: Collector + ) -> Generator[None, CollectReport, CollectReport]: + if isinstance(collector, File): + self.resume_global_capture() + try: + rep = yield + finally: + self.suspend_global_capture() + out, err = self.read_global_capture() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + rep = yield + return rep + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None]: + with self.item_capture("setup", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None]: + with self.item_capture("call", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None]: + with self.item_capture("teardown", item): + return (yield) + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, + captureclass: type[CaptureBase[AnyStr]], + request: SubRequest, + *, + config: dict[str, Any] | None = None, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.captureclass: type[CaptureBase[AnyStr]] = captureclass + self.request = request + self._config = config if config else {} + self._capture: MultiCapture[AnyStr] | None = None + self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER + self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, + out=self.captureclass(1, **self._config), + err=self.captureclass(2, **self._config), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( + "capturemanager" + ) + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capteesys(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable simultaneous text capturing and pass-through of writes + to ``sys.stdout`` and ``sys.stderr`` as defined by ``--capture=``. + + + The captured output is made available via ``capteesys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + The output is also passed-through, allowing it to be "live-printed", + reported, or both as defined by ``--capture=``. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capteesys): + print("hello") + captured = capteesys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture( + SysCapture, request, config=dict(tee=True), _ispytest=True + ) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_output(capsysbinary): + print("hello") + captured = capsysbinary.readouterr() + assert captured.out == b"hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfd): + os.system('echo "hello"') + captured = capfd.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfdbinary): + os.system('echo "hello"') + captured = capfdbinary.readouterr() + assert captured.out == b"hello\n" + + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/.venv/lib/python3.11/site-packages/_pytest/compat.py b/.venv/lib/python3.11/site-packages/_pytest/compat.py new file mode 100644 index 0000000..bef8c31 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/compat.py @@ -0,0 +1,333 @@ +# mypy: allow-untyped-defs +"""Python version compatibility code.""" + +from __future__ import annotations + +from collections.abc import Callable +import enum +import functools +import inspect +from inspect import Parameter +from inspect import Signature +import os +from pathlib import Path +import sys +from typing import Any +from typing import Final +from typing import NoReturn + +import py + + +if sys.version_info >= (3, 14): + from annotationlib import Format + + +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: Final = NotSetType.token +# fmt: on + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin coroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def signature(obj: Callable[..., Any]) -> Signature: + """Return signature without evaluating annotations.""" + if sys.version_info >= (3, 14): + return inspect.signature(obj, annotation_format=Format.STRING) + return inspect.signature(obj) + + +def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return f"{relfn}:{lineno + 1}" + return f"{fn}:{lineno + 1}" + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., object], + *, + name: str = "", + cls: type | None = None, +) -> tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The cls arguments indicate that the function should be treated as a bound + method even though it's not unless the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters.values() + except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + + fail( + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, and its first parameter + # wasn't defined as positional only, remove the first parameter name. + if not any(p.kind is Parameter.POSITIONAL_ONLY for p in parameters) and ( + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def ascii_escaped(val: bytes | str) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes strings into a sequence of escaped unicode ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = val.decode("ascii", "backslashreplace") + else: + ret = val.encode("unicode_escape").decode("ascii") + return ret.translate(_non_printable_ascii_translate_table) + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + :func:`functools.wraps`, or :func:`functools.partial`.""" + obj = inspect.unwrap(obj) + + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + from _pytest.outcomes import TEST_OUTCOME + + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +def get_user_id() -> int | None: + """Return the current process's real user id or None if it could not be + determined. + + :return: The user id or None if it could not be determined. + """ + # mypy follows the version and platform checking expectation of PEP 484: + # https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks + # Containment checks are too complex for mypy v1.5.0 and cause failure. + if sys.platform == "win32" or sys.platform == "emscripten": + # win32 does not have a getuid() function. + # Emscripten has a return 0 stub. + return None + else: + # On other platforms, a return value of -1 is assumed to indicate that + # the current process's real user id could not be determined. + ERROR = -1 + uid = os.getuid() + return uid if uid != ERROR else None + + +# Perform exhaustiveness checking. +# +# Consider this example: +# +# MyUnion = Union[int, str] +# +# def handle(x: MyUnion) -> int { +# if isinstance(x, int): +# return 1 +# elif isinstance(x, str): +# return 2 +# else: +# raise Exception('unreachable') +# +# Now suppose we add a new variant: +# +# MyUnion = Union[int, str, bytes] +# +# After doing this, we must remember ourselves to go and update the handle +# function to handle the new variant. +# +# With `assert_never` we can do better: +# +# // raise Exception('unreachable') +# return assert_never(x) +# +# Now, if we forget to handle the new variant, the type-checker will emit a +# compile-time error, instead of the runtime error we would have gotten +# previously. +# +# This also work for Enums (if you use `is` to compare) and Literals. +def assert_never(value: NoReturn) -> NoReturn: + assert False, f"Unhandled value: {value} ({type(value).__name__})" + + +class CallableBool: + """ + A bool-like object that can also be called, returning its true/false value. + + Used for backwards compatibility in cases where something was supposed to be a method + but was implemented as a simple attribute by mistake (see `TerminalReporter.isatty`). + + Do not use in new code. + """ + + def __init__(self, value: bool) -> None: + self._value = value + + def __bool__(self) -> bool: + return self._value + + def __call__(self) -> bool: + return self._value diff --git a/.venv/lib/python3.11/site-packages/_pytest/config/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000..468018f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/config/__init__.py @@ -0,0 +1,2029 @@ +# mypy: allow-untyped-defs +"""Command line options, ini-file and conftest.py processing.""" + +from __future__ import annotations + +import argparse +import collections.abc +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Sequence +import contextlib +import copy +import dataclasses +import enum +from functools import lru_cache +import glob +import importlib.metadata +import inspect +import os +import pathlib +import re +import shlex +import sys +from textwrap import dedent +import types +from types import FunctionType +from typing import Any +from typing import cast +from typing import Final +from typing import final +from typing import IO +from typing import TextIO +from typing import TYPE_CHECKING +import warnings + +import pluggy +from pluggy import HookimplMarker +from pluggy import HookimplOpts +from pluggy import HookspecMarker +from pluggy import HookspecOpts +from pluggy import PluginManager + +from .compat import PathAwareHookProxy +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest import __version__ +import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._code.code import TracebackStyle +from _pytest._io import TerminalWriter +from _pytest.config.argparsing import Argument +from _pytest.config.argparsing import Parser +import _pytest.deprecated +import _pytest.hookspec +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.pathlib import resolve_package_path +from _pytest.pathlib import safe_exists +from _pytest.stash import Stash +from _pytest.warning_types import PytestConfigWarning +from _pytest.warning_types import warn_explicit_for + + +if TYPE_CHECKING: + from _pytest.assertion.rewrite import AssertionRewritingHook + from _pytest.cacheprovider import Cache + from _pytest.terminal import TerminalReporter + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: pathlib.Path, + *, + cause: Exception, + ) -> None: + self.path = path + self.cause = cause + + def __str__(self) -> str: + return f"{type(self.cause).__name__}: {self.cause} (from {self.path})" + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def main( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> int | ExitCode: + """Perform an in-process test run. + + :param args: + List of command line arguments. If `None` or not given, defaults to reading + arguments directly from the process command line (:data:`sys.argv`). + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + old_pytest_version = os.environ.get("PYTEST_VERSION") + try: + os.environ["PYTEST_VERSION"] = __version__ + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo.from_exception(e.cause) + tw = TerminalWriter(sys.stderr) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return ExitCode.USAGE_ERROR + else: + try: + ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = TerminalWriter(sys.stderr) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + return ExitCode.USAGE_ERROR + finally: + if old_pytest_version is None: + os.environ.pop("PYTEST_VERSION", None) + else: + os.environ["PYTEST_VERSION"] = old_pytest_version + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = ( + *essential_plugins, + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "legacypath", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "unraisableexception", + "threadexception", + "warnings", + "logging", + "reports", + "faulthandler", +) + +builtin_plugins = { + *default_plugins, + "pytester", + "pytester_assertions", +} + + +def get_config( + args: list[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config( + pluginmanager, + invocation_params=Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=pathlib.Path.cwd(), + ), + ) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> PytestPluginManager: + """Obtain a new instance of the + :py:class:`pytest.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + if args is None: + args = sys.argv[1:] + elif isinstance(args, os.PathLike): + args = [os.fspath(args)] + elif not isinstance(args, list): + msg = ( # type:ignore[unreachable] + "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + ) + raise TypeError(msg.format(args, type(args))) + + config = get_config(args, plugins) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + config._ensure_unconfigure() + raise + + +def _get_directory(path: pathlib.Path) -> pathlib.Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + +def _get_legacy_hook_marks( + method: Any, + hook_type: str, + opt_names: tuple[str, ...], +) -> dict[str, bool]: + if TYPE_CHECKING: + # abuse typeguard from importlib to avoid massive method type union that's lacking an alias + assert inspect.isroutine(method) + known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])} + must_warn: list[str] = [] + opts: dict[str, bool] = {} + for opt_name in opt_names: + opt_attr = getattr(method, opt_name, AttributeError) + if opt_attr is not AttributeError: + must_warn.append(f"{opt_name}={opt_attr}") + opts[opt_name] = True + elif opt_name in known_marks: + must_warn.append(f"{opt_name}=True") + opts[opt_name] = True + else: + opts[opt_name] = False + if must_warn: + hook_opts = ", ".join(must_warn) + message = _pytest.deprecated.HOOK_LEGACY_MARKING.format( + type=hook_type, + fullname=method.__qualname__, + hook_opts=hook_opts, + ) + warn_explicit_for(cast(FunctionType, method), message) + return opts + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + from _pytest.assertion import DummyRewriteHook + from _pytest.assertion import RewriteHook + + super().__init__("pytest") + + # -- State related to local conftest plugins. + # All loaded conftest modules. + self._conftest_plugins: set[types.ModuleType] = set() + # All conftest modules applicable for a directory. + # This includes the directory's own conftest modules as well + # as those of its parent directories. + self._dirpath2confmods: dict[pathlib.Path, list[types.ModuleType]] = {} + # Cutoff directory above which conftests are no longer discovered. + self._confcutdir: pathlib.Path | None = None + # If set, conftest loading is skipped. + self._noconftest = False + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: list[tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook: RewriteHook = DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts( + self, plugin: _PluggyPlugin, name: str + ) -> HookimplOpts | None: + """:meta private:""" + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return None + # Ignore names which cannot be hooks. + if name == "pytest_plugins": + return None + + opts = super().parse_hookimpl_opts(plugin, name) + if opts is not None: + return opts + + method = getattr(plugin, name) + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return None + # Collect unmarked hooks as long as they have the `pytest_' prefix. + legacy = _get_legacy_hook_marks( + method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") + ) + return cast(HookimplOpts, legacy) + + def parse_hookspec_opts(self, module_or_class, name: str) -> HookspecOpts | None: + """:meta private:""" + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + legacy = _get_legacy_hook_marks( + method, "spec", ("firstresult", "historic") + ) + opts = cast(HookspecOpts, legacy) + return opts + + def register(self, plugin: _PluggyPlugin, name: str | None = None) -> str | None: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + plugin_name = super().register(plugin, name) + if plugin_name is not None: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict( + plugin=plugin, + plugin_name=plugin_name, + manager=self, + ) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return plugin_name + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: _PluggyPlugin | None = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: Config) -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible. " + "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible. " + "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests( + self, + args: Sequence[str | pathlib.Path], + pyargs: bool, + noconftest: bool, + rootpath: pathlib.Path, + confcutdir: pathlib.Path | None, + invocation_dir: pathlib.Path, + importmode: ImportMode | str, + *, + consider_namespace_packages: bool, + ) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + self._confcutdir = ( + absolutepath(invocation_dir / confcutdir) if confcutdir else None + ) + self._noconftest = noconftest + self._using_pyargs = pyargs + foundanchor = False + for initial_path in args: + path = str(initial_path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = absolutepath(invocation_dir / path) + + # Ensure we do not break if what appears to be an anchor + # is in fact a very long option (#10169, #11394). + if safe_exists(anchor): + self._try_load_conftest( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + foundanchor = True + if not foundanchor: + self._try_load_conftest( + invocation_dir, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _is_in_confcutdir(self, path: pathlib.Path) -> bool: + """Whether to consider the given path to load conftests from.""" + if self._confcutdir is None: + return True + # The semantics here are literally: + # Do not load a conftest if it is found upwards from confcut dir. + # But this is *not* the same as: + # Load only conftests from confcutdir or below. + # At first glance they might seem the same thing, however we do support use cases where + # we want to load conftests that are not found in confcutdir or below, but are found + # in completely different directory hierarchies like packages installed + # in out-of-source trees. + # (see #9767 for a regression where the logic was inverted). + return path not in self._confcutdir.parents + + def _try_load_conftest( + self, + anchor: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + self._loadconftestmodules( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + # let's also consider test* subdirs + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._loadconftestmodules( + x, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _loadconftestmodules( + self, + path: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + if self._noconftest: + return + + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + if directory in self._dirpath2confmods: + return + + clist = [] + for parent in reversed((directory, *directory.parents)): + if self._is_in_confcutdir(parent): + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest( + conftestpath, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + clist.append(mod) + self._dirpath2confmods[directory] = clist + + def _getconftestmodules(self, path: pathlib.Path) -> Sequence[types.ModuleType]: + directory = self._get_directory(path) + return self._dirpath2confmods.get(directory, ()) + + def _rget_with_confmod( + self, + name: str, + path: pathlib.Path, + ) -> tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, + conftestpath: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> types.ModuleType: + conftestpath_plugin_name = str(conftestpath) + existing = self.get_plugin(conftestpath_plugin_name) + if existing is not None: + return cast(types.ModuleType, existing) + + # conftest.py files there are not in a Python package all have module + # name "conftest", and thus conflict with each other. Clear the existing + # before loading the new one, otherwise the existing one will be + # returned from the module cache. + pkgpath = resolve_package_path(conftestpath) + if pkgpath is None: + try: + del sys.modules[conftestpath.stem] + except KeyError: + pass + + try: + mod = import_path( + conftestpath, + mode=importmode, + root=rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + except Exception as e: + assert e.__traceback__ is not None + raise ConftestImportFailure(conftestpath, cause=e) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + dirpath = conftestpath.parent + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if dirpath in path.parents or path == dirpath: + if mod in mods: + raise AssertionError( + f"While trying to load conftest path {conftestpath!s}, " + f"found that the module {mod} is already loaded with path {mod.__file__}. " + "This is not supposed to happen. Please report this issue to pytest." + ) + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod, registration_name=conftestpath_plugin_name) + return mod + + def _check_non_top_pytest_plugins( + self, + mod: types.ModuleType, + conftestpath: pathlib.Path, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + """:meta private:""" + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + parg = parg.strip() + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError(f"plugin {name} cannot be disabled") + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. + self.unblock(name) + if not name.startswith("pytest_"): + self.unblock("pytest_" + name) + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest( + self, conftestmodule: types.ModuleType, registration_name: str + ) -> None: + """:meta private:""" + self.register(conftestmodule, name=registration_name) + + def consider_env(self) -> None: + """:meta private:""" + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: None | types.ModuleType | str | Sequence[str] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str), ( + f"module name as text required, got {modname!r}" + ) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + f'Error importing plugin "{modname}": {e.args[0]}' + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: None | types.ModuleType | str | Sequence[str], +) -> list[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + f"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: {specs!r}" + ) + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + # as well as editable installation finder modules made by setuptools + if module_name != "setup" and not module_name.startswith("__editable__"): + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + A pytest PluginManager. + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @dataclasses.dataclass(frozen=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + ini option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args: tuple[str, ...] + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Sequence[str | _PluggyPlugin] | None + """Extra plugins, might be `None`.""" + dir: pathlib.Path + """The directory from which :func:`pytest.main` was invoked. :type: pathlib.Path""" + + def __init__( + self, + *, + args: Iterable[str], + plugins: Sequence[str | _PluggyPlugin] | None, + dir: pathlib.Path, + ) -> None: + object.__setattr__(self, "args", tuple(args)) + object.__setattr__(self, "plugins", plugins) + object.__setattr__(self, "dir", dir) + + class ArgsSource(enum.Enum): + """Indicates the source of the test arguments. + + .. versionadded:: 7.2 + """ + + #: Command line arguments. + ARGS = enum.auto() + #: Invocation directory. + INVOCATION_DIR = enum.auto() + INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias + #: 'testpaths' configuration value. + TESTPATHS = enum.auto() + + # Set by cacheprovider plugin. + cache: Cache + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: InvocationParams | None = None, + ) -> None: + from .argparsing import FILE_OR_DIR + from .argparsing import Parser + + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=pathlib.Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + _a = FILE_OR_DIR + self._parser = Parser( + usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + processopt=self._processopt, + _ispytest=True, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] + self._inicache: dict[str, Any] = {} + self._override_ini: Sequence[str] = () + self._opt2dest: dict[str, str] = {} + self._cleanup_stack = contextlib.ExitStack() + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + self.args_source = Config.ArgsSource.ARGS + self.args: list[str] = [] + + @property + def rootpath(self) -> pathlib.Path: + """The path to the :ref:`rootdir `. + + :type: pathlib.Path + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def inipath(self) -> pathlib.Path | None: + """The path to the :ref:`configfile `. + + .. versionadded:: 6.1 + """ + return self._inipath + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coinciding with pytest_unconfigure). + """ + self._cleanup_stack.callback(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + try: + if self._configured: + self._configured = False + try: + self.hook.pytest_unconfigure(config=self) + finally: + self.hook.pytest_configure._call_history = [] + finally: + try: + self._cleanup_stack.close() + finally: + self._cleanup_stack = contextlib.ExitStack() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: TerminalReporter | None = self.pluginmanager.get_plugin( + "terminalreporter" + ) + assert terminalreporter is not None + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: list[str] + ) -> Config: + try: + self.parse(args) + except UsageError: + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: argparse.Namespace | None = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write(f"INTERNALERROR> {line}\n") + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + base_path_part, *nodeid_part = nodeid.split("::") + # Only process path part + fullpath = self.rootpath / base_path_part + relative_path = bestrelpath(self.invocation_params.dir, fullpath) + + nodeid = "::".join([relative_path, *nodeid_part]) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args) -> Config: + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: Argument) -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: Config) -> None: + # We haven't fully parsed the command line arguments yet, so + # early_config.args it not set yet. But we need it for + # discovering the initial conftests. So "pre-run" the logic here. + # It will be done for real in `parse()`. + args, args_source = early_config._decide_args( + args=early_config.known_args_namespace.file_or_dir, + pyargs=early_config.known_args_namespace.pyargs, + testpaths=early_config.getini("testpaths"), + invocation_dir=early_config.invocation_params.dir, + rootpath=early_config.rootpath, + warn=False, + ) + self.pluginmanager._set_initial_conftests( + args=args, + pyargs=early_config.known_args_namespace.pyargs, + noconftest=early_config.known_args_namespace.noconftest, + rootpath=early_config.rootpath, + confcutdir=early_config.known_args_namespace.confcutdir, + invocation_dir=early_config.invocation_params.dir, + importmode=early_config.known_args_namespace.importmode, + consider_namespace_packages=early_config.getini( + "consider_namespace_packages" + ), + ) + + def _initini(self, args: Sequence[str]) -> None: + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + rootpath, inipath, inicfg = determine_setup( + inifile=ns.inifilename, + args=ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + invocation_dir=self.invocation_params.dir, + ) + self._rootpath = rootpath + self._inipath = inipath + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + self._parser.addini("addopts", "Extra command line options", "args") + self._parser.addini("minversion", "Minimally required pytest version") + self._parser.addini( + "pythonpath", type="paths", help="Add paths to sys.path", default=[] + ) + self._parser.addini( + "required_plugins", + "Plugins that must be present for pytest to run", + type="args", + default=[], + ) + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args: Sequence[str]) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + + disable_autoload = getattr(ns, "disable_plugin_autoload", False) or bool( + os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") + ) + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook, disable_autoload) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite( + self, hook: AssertionRewritingHook, disable_autoload: bool + ) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if disable_autoload: + # We don't autoload from distribution package entry points, + # no need to continue. + return + + package_files = ( + str(file) + for dist in importlib.metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _configure_python_path(self) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(self.getini("pythonpath")): + sys.path.insert(0, str(path)) + self.add_cleanup(self._unconfigure_python_path) + + def _unconfigure_python_path(self) -> None: + for path in self.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) + + def _validate_args(self, args: list[str], via: str) -> list[str]: + """Validate known args.""" + self._parser._config_source_hint = via # type: ignore + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint # type: ignore + + return args + + def _decide_args( + self, + *, + args: list[str], + pyargs: bool, + testpaths: list[str], + invocation_dir: pathlib.Path, + rootpath: pathlib.Path, + warn: bool, + ) -> tuple[list[str], ArgsSource]: + """Decide the args (initial paths/nodeids) to use given the relevant inputs. + + :param warn: Whether can issue warnings. + + :returns: The args and the args source. Guaranteed to be non-empty. + """ + if args: + source = Config.ArgsSource.ARGS + result = args + else: + if invocation_dir == rootpath: + source = Config.ArgsSource.TESTPATHS + if pyargs: + result = testpaths + else: + result = [] + for path in testpaths: + result.extend(sorted(glob.iglob(path, recursive=True))) + if testpaths and not result: + if warn: + warning_text = ( + "No files were found in testpaths; " + "consider removing or adjusting your testpaths configuration. " + "Searching recursively from the current directory instead." + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), stacklevel=3 + ) + else: + result = [] + if not result: + source = Config.ArgsSource.INVOCATION_DIR + result = [str(invocation_dir)] + return result, source + + def _preparse(self, args: list[str], addopts: bool = True) -> None: + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook(args) + self._configure_python_path() + self.pluginmanager.consider_preparse(args, exclude_only=False) + if ( + not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") + and not self.known_args_namespace.disable_plugin_autoload + ): + # Autoloading from distribution package entry point has + # not been disabled. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + # Otherwise only plugins explicitly specified in PYTEST_PLUGINS + # are going to be loaded. + self.pluginmanager.consider_env() + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.known_args_namespace) + ) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.confcutdir is None: + if self.inipath is not None: + confcutdir = str(self.inipath.parent) + else: + confcutdir = str(self.rootpath) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let it pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + @hookimpl(wrapper=True) + def pytest_collection(self) -> Generator[None, object, object]: + # Validate invalid ini keys after collection is done so we take in account + # options added by late-loading conftest files. + try: + return (yield) + finally: + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' must be a single value" + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' requires pytest-{minver}, actual pytest-{pytest.__version__}'" + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.requirements import InvalidRequirement + from packaging.requirements import Requirement + from packaging.version import Version + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + req = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if req.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + if self.known_args_namespace.strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> list[str]: + parser_inicfg = self._parser._inidict + return [name for name in self.inicfg if name not in parser_inicfg] + + def parse(self, args: list[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert self.args == [], ( + "can only parse cmdline args at most once per Config object" + ) + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + self._parser.after_preparse = True # type: ignore + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + self.args, self.args_source = self._decide_args( + args=args, + pyargs=self.known_args_namespace.pyargs, + testpaths=self.getini("testpaths"), + invocation_dir=self.invocation_params.dir, + rootpath=self.rootpath, + warn=True, + ) + except PrintHelp: + pass + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hook wrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str) -> Any: + """Return configuration value from an :ref:`ini file `. + + If a configuration value is not defined in an + :ref:`ini file `, then the ``default`` value provided while + registering the configuration through + :func:`parser.addini ` will be returned. + Please note that you can even provide ``None`` as a valid + default value. + + If ``default`` is not provided while registering using + :func:`parser.addini `, then a default value + based on the ``type`` parameter passed to + :func:`parser.addini ` will be returned. + The default values based on ``type`` are: + ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` + ``bool`` : ``False`` + ``string`` : empty string ``""`` + ``int`` : ``0`` + ``float`` : ``0.0`` + + If neither the ``default`` nor the ``type`` parameter is passed + while registering the configuration through + :func:`parser.addini `, then the configuration + is treated as a string and a default empty string '' is returned. + + If the specified name hasn't been registered through a prior + :func:`parser.addini ` call (usually from a + plugin), a ValueError is raised. + """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: object): + msg = ( + f"Option {name} has unknown configuration type {type} with value {value!r}" + ) + raise ValueError(msg) # pragma: no cover + + def _getini(self, name: str): + try: + description, type, default = self._parser._inidict[name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + override_value = self._get_override_ini_value(name) + if override_value is None: + try: + value = self.inicfg[name] + except KeyError: + return default + else: + value = override_value + # Coerce the values based on types. + # + # Note: some coercions are only required if we are reading from .ini files, because + # the file format doesn't contain type information, but when reading from toml we will + # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). + # For example: + # + # ini: + # a_line_list = "tests acceptance" + # in this case, we need to split the string to obtain a list of strings. + # + # toml: + # a_line_list = ["tests", "acceptance"] + # in this case, we already have a list ready to use. + # + if type == "paths": + dp = ( + self.inipath.parent + if self.inipath is not None + else self.invocation_params.dir + ) + input_values = shlex.split(value) if isinstance(value, str) else value + return [dp / x for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + elif type == "string": + return value + elif type == "int": + if not isinstance(value, str): + raise TypeError( + f"Expected an int string for option {name} of type integer, but got: {value!r}" + ) from None + return int(value) + elif type == "float": + if not isinstance(value, str): + raise TypeError( + f"Expected a float string for option {name} of type float, but got: {value!r}" + ) from None + return float(value) + elif type is None: + return value + else: + return self._getini_unknown_type(name, type, value) + + def _getconftest_pathlist( + self, name: str, path: pathlib.Path + ) -> list[pathlib.Path] | None: + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + assert mod.__file__ is not None + modpath = pathlib.Path(mod.__file__).parent + values: list[pathlib.Path] = [] + for relroot in relroots: + if isinstance(relroot, os.PathLike): + relroot = pathlib.Path(relroot) + else: + relroot = relroot.replace("/", os.sep) + relroot = absolutepath(modpath / relroot) + values.append(relroot) + return values + + def _get_override_ini_value(self, name: str) -> str | None: + value = None + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + f"-o/--override-ini expects option=value style (got: {ini_config!r})." + ) from e + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name: str, default: Any = notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Fallback value if no option of that name is **declared** via :hook:`pytest_addoption`. + Note this parameter will be ignored when the option is **declared** even if the option's value is ``None``. + :param skip: If ``True``, raise :func:`pytest.skip` if option is undeclared or has a ``None`` value. + Note that even if ``True``, if a default was specified it will be returned instead of a skip. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + #: Verbosity type for failed assertions (see :confval:`verbosity_assertions`). + VERBOSITY_ASSERTIONS: Final = "assertions" + #: Verbosity type for test case execution (see :confval:`verbosity_test_cases`). + VERBOSITY_TEST_CASES: Final = "test_cases" + _VERBOSITY_INI_DEFAULT: Final = "auto" + + def get_verbosity(self, verbosity_type: str | None = None) -> int: + r"""Retrieve the verbosity level for a fine-grained verbosity type. + + :param verbosity_type: Verbosity type to get level for. If a level is + configured for the given type, that value will be returned. If the + given type is not a known verbosity type, the global verbosity + level will be returned. If the given type is None (default), the + global verbosity level will be returned. + + To configure a level for a fine-grained verbosity type, the + configuration file should have a setting for the configuration name + and a numeric value for the verbosity level. A special value of "auto" + can be used to explicitly use the global verbosity level. + + Example: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + verbosity_assertions = 2 + + .. code-block:: console + + pytest -v + + .. code-block:: python + + print(config.get_verbosity()) # 1 + print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 + """ + global_level = self.getoption("verbose", default=0) + assert isinstance(global_level, int) + if verbosity_type is None: + return global_level + + ini_name = Config._verbosity_ini_name(verbosity_type) + if ini_name not in self._parser._inidict: + return global_level + + level = self.getini(ini_name) + if level == Config._VERBOSITY_INI_DEFAULT: + return global_level + + return int(level) + + @staticmethod + def _verbosity_ini_name(verbosity_type: str) -> str: + return f"verbosity_{verbosity_type}" + + @staticmethod + def _add_verbosity_ini(parser: Parser, verbosity_type: str, help: str) -> None: + """Add a output verbosity configuration option for the given output type. + + :param parser: Parser for command line arguments and ini-file values. + :param verbosity_type: Fine-grained verbosity category. + :param help: Description of the output this type controls. + + The value should be retrieved via a call to + :py:func:`config.get_verbosity(type) `. + """ + parser.addini( + Config._verbosity_ini_name(verbosity_type), + help=help, + type="string", + default=Config._VERBOSITY_INI_DEFAULT, + ) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), + stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: TextIO | None = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> tuple[warnings._ActionKind, str, type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. + """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + + parts = arg.split(":") + if len(parts) > 5: + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: warnings._ActionKind = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) from None + try: + category: type[Warning] = _resolve_warning_category(category_) + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) from None + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) from None + else: + lineno = 0 + try: + re.compile(message) + re.compile(module) + except re.error as e: + raise UsageError( + error_template.format(error=f"Invalid regex {e.pattern!r}: {e}") + ) from None + return action, message, category, module, lineno + + +def _resolve_warning_category(category: str) -> type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(type[Warning], cat) + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + for arg in cmdline_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) diff --git a/.venv/lib/python3.11/site-packages/_pytest/config/argparsing.py b/.venv/lib/python3.11/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000..8d4ed82 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,535 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import argparse +from collections.abc import Callable +from collections.abc import Mapping +from collections.abc import Sequence +import os +from typing import Any +from typing import cast +from typing import final +from typing import Literal +from typing import NoReturn + +import _pytest._io +from _pytest.config.exceptions import UsageError +from _pytest.deprecated import check_ispytest + + +FILE_OR_DIR = "file_or_dir" + + +class NotSet: + def __repr__(self) -> str: + return "" + + +NOT_SET = NotSet() + + +@final +class Parser: + """Parser for command line arguments and ini-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog: str | None = None + + def __init__( + self, + usage: str | None = None, + processopt: Callable[[Argument], None] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True) + self._groups: list[OptionGroup] = [] + self._processopt = processopt + self._usage = usage + self._inidict: dict[str, tuple[str, str | None, Any]] = {} + self._ininames: list[str] = [] + self.extra_info: dict[str, Any] = {} + + def processoption(self, option: Argument) -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: str | None = None + ) -> OptionGroup: + """Get (or create) a named option Group. + + :param name: Name of the option group. + :param description: Long description for --help output. + :param after: Name of another group, used for ordering --help output. + :returns: The option group. + + The returned group object has an ``addoption`` method with the same + signature as :func:`parser.addoption ` but + will be shown in the respective group in the output of + ``pytest --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self, _ispytest=True) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + strargs = [os.fspath(x) for x in args] + return self.optparser.parse_args(strargs, namespace=namespace) + + def _getparser(self) -> MyOptionParser: + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = [*self._groups, self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") + # bash like autocompletion for dirs (appending '/') + # Type ignored because typeshed doesn't know about argcomplete. + file_or_dir_arg.completer = filescompleter # type: ignore + return optparser + + def parse_setoption( + self, + args: Sequence[str | os.PathLike[str]], + option: argparse.Namespace, + namespace: argparse.Namespace | None = None, + ) -> list[str]: + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return cast(list[str], getattr(parsedoption, FILE_OR_DIR)) + + def parse_known_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Parse the known arguments at this point. + + :returns: An argparse namespace object. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> tuple[argparse.Namespace, list[str]]: + """Parse the known arguments at this point, and also return the + remaining unknown arguments. + + :returns: + A tuple containing an argparse namespace object for the known + arguments, and a list of the unknown arguments. + """ + optparser = self._getparser() + strargs = [os.fspath(x) for x in args] + return optparser.parse_known_args(strargs, namespace=namespace) + + def addini( + self, + name: str, + help: str, + type: Literal[ + "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" + ] + | None = None, + default: Any = NOT_SET, + ) -> None: + """Register an ini-file option. + + :param name: + Name of the ini-variable. + :param type: + Type of the variable. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + * ``int``: an integer + * ``float``: a floating-point number + + .. versionadded:: 8.4 + + The ``float`` and ``int`` types. + + For ``paths`` and ``pathlist`` types, they are considered relative to the ini-file. + In case the execution is happening without an ini-file defined, + they will be considered relative to the current working directory (for example with ``--override-ini``). + + .. versionadded:: 7.0 + The ``paths`` variable type. + + .. versionadded:: 8.1 + Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of an ini-file. + + Defaults to ``string`` if ``None`` or not passed. + :param default: + Default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) `. + """ + assert type in ( + None, + "string", + "paths", + "pathlist", + "args", + "linelist", + "bool", + "int", + "float", + ) + if default is NOT_SET: + default = get_ini_default_for_type(type) + + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +def get_ini_default_for_type( + type: Literal[ + "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" + ] + | None, +) -> Any: + """ + Used by addini to get the default value for a given ini-option type, when + default is not supplied. + """ + if type is None: + return "" + elif type in ("paths", "pathlist", "args", "linelist"): + return [] + elif type == "bool": + return False + elif type == "int": + return 0 + elif type == "float": + return 0.0 + else: + return "" + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Argument | str) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store params in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: list[str] = [] + self._long_opts: list[str] = [] + try: + self.type = attrs["type"] + except KeyError: + pass + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: str | None = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> list[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + attrs = "default dest help".split() + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + f"invalid option string {opt!r}: " + "must be at least two characters long", + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + f"invalid short option string {opt!r}: " + "must be of the form -x, (x any non-dash char)", + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + f"invalid long option string {opt!r}: " + "must start with --, followed by non-dash", + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: list[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + """A group of options shown in its own section.""" + + def __init__( + self, + name: str, + description: str = "", + parser: Parser | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = name + self.description = description + self.options: list[Argument] = [] + self.parser = parser + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + """ + conflict = set(opts).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError(f"option names {conflict} already added") + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *opts: str, **attrs: Any) -> None: + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: Argument, shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + extra_info: dict[str, Any] | None = None, + prog: str | None = None, + ) -> None: + self._parser = parser + super().__init__( + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + fromfile_prefix_chars="@", + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info if extra_info else {} + + def error(self, message: str) -> NoReturn: + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + + if hasattr(self._parser, "_config_source_hint"): + msg = f"{msg} ({self._parser._config_source_hint})" + + raise UsageError(self.format_usage() + msg) + + # Type ignored because typeshed has a very complex type in the superclass. + def parse_args( # type: ignore + self, + args: Sequence[str] | None = None, + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Allow splitting of positional arguments.""" + parsed, unrecognized = self.parse_known_args(args, namespace) + if unrecognized: + for arg in unrecognized: + if arg and arg[0] == "-": + lines = [ + "unrecognized arguments: {}".format(" ".join(unrecognized)) + ] + for k, v in sorted(self.extra_info.items()): + lines.append(f" {k}: {v}") + self.error("\n".join(lines)) + getattr(parsed, FILE_OR_DIR).extend(unrecognized) + return parsed + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = super()._format_action_invocation(action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: str | None = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + f'long optional argument without "--": [{option}]', option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines diff --git a/.venv/lib/python3.11/site-packages/_pytest/config/compat.py b/.venv/lib/python3.11/site-packages/_pytest/config/compat.py new file mode 100644 index 0000000..21eab4c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/config/compat.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections.abc import Mapping +import functools +from pathlib import Path +from typing import Any +import warnings + +import pluggy + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG + + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks: Mapping[str, tuple[str, str]] = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_relay: pluggy.HookRelay) -> None: + self._hook_relay = hook_relay + + def __dir__(self) -> list[str]: + return dir(self._hook_relay) + + def __getattr__(self, key: str) -> pluggy.HookCaller: + hook: pluggy.HookCaller = getattr(self._hook_relay, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @functools.wraps(hook) + def fixed_hook(**kw: Any) -> Any: + path_value: Path | None = kw.pop(path_var, None) + fspath_value: LEGACY_PATH | None = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.name = hook.name # type: ignore[attr-defined] + fixed_hook.spec = hook.spec # type: ignore[attr-defined] + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook # type: ignore[return-value] diff --git a/.venv/lib/python3.11/site-packages/_pytest/config/exceptions.py b/.venv/lib/python3.11/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000..90108ec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from typing import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/.venv/lib/python3.11/site-packages/_pytest/config/findpaths.py b/.venv/lib/python3.11/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000..15bfbb0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,239 @@ +from __future__ import annotations + +from collections.abc import Iterable +from collections.abc import Sequence +import os +from pathlib import Path +import sys +from typing import TYPE_CHECKING + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.pathlib import safe_exists + + +if TYPE_CHECKING: + from typing import Union + + from typing_extensions import TypeAlias + + # Even though TOML supports richer data types, all values are converted to str/list[str] during + # parsing to maintain compatibility with the rest of the configuration system. + ConfigDict: TypeAlias = dict[str, Union[str, list[str]]] + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> ConfigDict | None: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return dict(iniconfig["pytest"].items()) + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name == "pytest.ini": + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return dict(iniconfig["tool:pytest"].items()) + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + elif filepath.suffix == ".toml": + if sys.version_info >= (3, 11): + import tomllib + else: + import tomli as tomllib + + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as exc: + raise UsageError(f"{filepath}: {exc}") from exc + + result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) + if result is not None: + # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), + # however we need to convert all scalar values to str for compatibility with the rest + # of the configuration system, which expects strings only. + def make_scalar(v: object) -> str | list[str]: + return v if isinstance(v, list) else str(v) + + return {k: make_scalar(v) for k, v in result.items()} + + return None + + +def locate_config( + invocation_dir: Path, + args: Iterable[Path], +) -> tuple[Path | None, Path | None, ConfigDict]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict).""" + config_names = [ + "pytest.ini", + ".pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [invocation_dir] + found_pyproject_toml: Path | None = None + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + if p.name == "pyproject.toml" and found_pyproject_toml is None: + found_pyproject_toml = p + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + return base, p, ini_config + if found_pyproject_toml is not None: + return found_pyproject_toml.parent, found_pyproject_toml, {} + return None, None, {} + + +def get_common_ancestor( + invocation_dir: Path, + paths: Iterable[Path], +) -> Path: + common_ancestor: Path | None = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = invocation_dir + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> list[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + *, + inifile: str | None, + args: Sequence[str], + rootdir_cmd_arg: str | None, + invocation_dir: Path, +) -> tuple[Path, Path | None, ConfigDict]: + """Determine the rootdir, inifile and ini configuration values from the + command line arguments. + + :param inifile: + The `--inifile` command line argument, if given. + :param args: + The free command line arguments. + :param rootdir_cmd_arg: + The `--rootdir` command line argument, if given. + :param invocation_dir: + The working directory when pytest was invoked. + """ + rootdir = None + dirs = get_dirs_from_args(args) + if inifile: + inipath_ = absolutepath(inifile) + inipath: Path | None = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = inipath_.parent + else: + ancestor = get_common_ancestor(invocation_dir, dirs) + rootdir, inipath, inicfg = locate_config(invocation_dir, [ancestor]) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg = locate_config(invocation_dir, dirs) + if rootdir is None: + rootdir = get_common_ancestor( + invocation_dir, [invocation_dir, ancestor] + ) + if is_fs_root(rootdir): + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + f"Directory '{rootdir}' not found. Check your '--rootdir' option." + ) + assert rootdir is not None + return rootdir, inipath, inicfg or {} + + +def is_fs_root(p: Path) -> bool: + r""" + Return True if the given path is pointing to the root of the + file system ("/" on Unix and "C:\\" on Windows for example). + """ + return os.path.splitdrive(str(p))[1] == os.sep diff --git a/.venv/lib/python3.11/site-packages/_pytest/debugging.py b/.venv/lib/python3.11/site-packages/_pytest/debugging.py new file mode 100644 index 0000000..de1b268 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/debugging.py @@ -0,0 +1,407 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Interactive debugging with PDB, the Python Debugger.""" + +from __future__ import annotations + +import argparse +from collections.abc import Callable +from collections.abc import Generator +import functools +import sys +import types +from typing import Any +import unittest + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.capture import CaptureManager +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="Start the interactive Python debugger on errors or KeyboardInterrupt", + ) + group.addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="Specify a custom interactive Python debugger for use with --pdb." + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group.addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config.add_cleanup(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: PytestPluginManager | None = None + _config: Config | None = None + _saved: list[ + tuple[Callable[..., None], PytestPluginManager | None, Config | None] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: tuple[type[Any], type[Any]] | None = None + + @classmethod + def _is_capturing(cls, capman: CaptureManager | None) -> str | bool: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: CaptureManager | None): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: CaptureManager | None): + import _pytest.config + + class PytestPdbWrapper(pdb_cls): + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + if hasattr(pdb_cls, "do_debug"): + do_debug.__doc__ = pdb_cls.do_debug.__doc__ + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + f"PDB continue (IO-capturing resumed for {capturing})", + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + if hasattr(pdb_cls, "do_continue"): + do_continue.__doc__ = pdb_cls.do_continue.__doc__ + + do_c = do_cont = do_continue + + def do_quit(self, arg): + # Raise Exit outcome when quit command is used in pdb. + # + # This is a bit of a hack - it would be better if BdbQuit + # could be handled, but this would require to wrap the + # whole pytest run, and adjust the report etc. + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + if hasattr(pdb_cls, "do_quit"): + do_quit.__doc__ = pdb_cls.do_quit.__doc__ + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: CaptureManager | None = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + f"PDB {method} (IO-capturing turned off for {capturing})", + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: CallInfo[Any], report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + + if not isinstance(call.excinfo.value, unittest.SkipTest): + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + exc_or_tb = _postmortem_exc_or_tb(excinfo) + post_mortem(exc_or_tb) + + +class PdbTrace: + @hookimpl(wrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, object, object]: + wrap_pytest_function_for_tracing(pyfuncitem) + return (yield) + + +def wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs) -> None: + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we reuse the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb_or_exc = _postmortem_exc_or_tb(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb_or_exc) + return rep + + +def _postmortem_exc_or_tb( + excinfo: ExceptionInfo[BaseException], +) -> types.TracebackType | BaseException: + from doctest import UnexpectedException + + get_exc = sys.version_info >= (3, 13) + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + underlying_exc = excinfo.value + if get_exc: + return underlying_exc.exc_info[1] + + return underlying_exc.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + cause = excinfo.value.cause + if get_exc: + return cause + + assert cause.__traceback__ is not None + return cause.__traceback__ + else: + assert excinfo._excinfo is not None + if get_exc: + return excinfo._excinfo[1] + + return excinfo._excinfo[2] + + +def post_mortem(tb_or_exc: types.TracebackType | BaseException) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, tb_or_exc) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/.venv/lib/python3.11/site-packages/_pytest/deprecated.py b/.venv/lib/python3.11/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000..a605c24 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/deprecated.py @@ -0,0 +1,91 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" + +from __future__ import annotations + +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import UnformattedWarning + + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", +} + + +# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. +# * If you're in the future: "could have been". +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +# This deprecation is never really meant to be removed. +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +HOOK_LEGACY_MARKING = UnformattedWarning( + PytestDeprecationWarning, + "The hook{type} {fullname} uses old-style configuration options (marks or attributes).\n" + "Please use the pytest.hook{type}({hook_opts}) decorator instead\n" + " to configure the hooks.\n" + " See https://docs.pytest.org/en/latest/deprecations.html" + "#configuring-hook-specs-impls-using-markers", +) + +MARKED_FIXTURE = PytestRemovedIn9Warning( + "Marks applied to fixtures have no effect\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function" +) + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). + + +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/.venv/lib/python3.11/site-packages/_pytest/doctest.py b/.venv/lib/python3.11/site-packages/_pytest/doctest.py new file mode 100644 index 0000000..0dbef60 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/doctest.py @@ -0,0 +1,754 @@ +# mypy: allow-untyped-defs +"""Discover and run doctests in modules and test files.""" + +from __future__ import annotations + +import bdb +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Sequence +from contextlib import contextmanager +import functools +import inspect +import os +from pathlib import Path +import platform +import re +import sys +import traceback +import types +from typing import Any +from typing import TYPE_CHECKING +import warnings + +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import fixture +from _pytest.fixtures import TopRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.python import Module +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + import doctest + + from typing_extensions import Self + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: type[doctest.OutputChecker] | None = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "Option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "Encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="Run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="Choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="Doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="Ignore doctest collection errors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="For a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + file_path: Path, + parent: Collector, +) -> DoctestModule | DoctestTextfile | None: + config = parent.config + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + return DoctestModule.from_parent(parent, path=file_path) + elif _is_doctest(config, file_path, parent): + return DoctestTextfile.from_parent(parent, path=file_path) + return None + + +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": + return False + contents = path.read_bytes() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence[doctest.DocTestFailure]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> type[doctest.DocTestRunner]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + exc_info: tuple[type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> doctest.DocTestRunner: + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(Item): + def __init__( + self, + name: str, + parent: DoctestTextfile | DoctestModule, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + + # Stuff needed for fixture support. + self.obj = None + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: DoctestTextfile | DoctestModule, + *, + name: str, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> Self: + # incompatible signature due to imposed limits on subclass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] + + def setup(self) -> None: + self._request._fillfixtures() + globs = dict(getfixture=self._request.getfixturevalue) + for name, value in self._request.getfixturevalue("doctest_namespace").items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: list[doctest.DocTestFailure] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + import doctest + + failures: ( + Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None + ) = None + if isinstance( + excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is None: + return super().repr_failure(excinfo) + + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + f"{i + test.lineno + 1:03d} {x}" for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + return self.path, self.dtest.lineno, f"[doctest] {self.name}" + + +def _get_flag_lookup() -> dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(config: Config) -> int: + optionflags_str = config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config: Config) -> bool: + continue_on_failure: bool = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self.config) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: doctest.DocTest) -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Callable[[Any], Any] | None = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + f"Got {e!r} when unwrapping {func!r}. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080", + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + py_ver_info_minor = sys.version_info[:2] + is_find_lineno_broken = ( + py_ver_info_minor < (3, 11) + or (py_ver_info_minor == (3, 11) and sys.version_info.micro < 9) + or (py_ver_info_minor == (3, 12) and sys.version_info.micro < 3) + ) + if is_find_lineno_broken: + + def _find_lineno(self, obj, source_lines): + """On older Pythons, doctest code does not take into account + `@property`. https://github.com/python/cpython/issues/61648 + + Moreover, wrapped Doctests need to be unwrapped so the correct + line number is returned. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + if sys.version_info < (3, 10): + + def _find( + self, tests, obj, name, module, source_lines, globs, seen + ) -> None: + """Override _find to work around issue in stdlib. + + https://github.com/pytest-dev/pytest/issues/3456 + https://github.com/python/cpython/issues/69718 + """ + if _is_mocked(obj): + return # pragma: no cover + with _patch_unwrap_mock_aware(): + # Type ignored because this is a private function. + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen + ) + + if sys.version_info < (3, 13): + + def _from_module(self, module, object): + """`cached_property` objects are never considered a part + of the 'current module'. As such they are skipped by doctest. + Here we override `_from_module` to check the underlying + function instead. https://github.com/python/cpython/issues/107995 + """ + if isinstance(object, functools.cached_property): + object = object.func + + # Type ignored because this is a private function. + return super()._from_module(module, object) # type: ignore[misc] + + try: + module = self.obj + except Collector.CollectError: + if self.config.getvalue("doctest_ignore_import_errors"): + skip(f"unable to import module {self.path!r}") + else: + raise + + # While doctests currently don't support fixtures directly, we still + # need to pick up autouse fixtures. + self.session._fixturemanager.parsefactories(self) + + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self.config) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _init_checker_class() -> type[doctest.OutputChecker]: + import doctest + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if super().check_output(want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: re.Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return super().check_output(want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots): + fraction: str | None = w.group("fraction") + exponent: str | None = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + precision = 0 if fraction is None else len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10**-precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> doctest.OutputChecker: + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@fixture(scope="session") +def doctest_namespace() -> dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests. + + Usually this fixture is used in conjunction with another ``autouse`` fixture: + + .. code-block:: python + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace["np"] = numpy + + For more details: :ref:`doctest_namespace`. + """ + return dict() diff --git a/.venv/lib/python3.11/site-packages/_pytest/faulthandler.py b/.venv/lib/python3.11/site-packages/_pytest/faulthandler.py new file mode 100644 index 0000000..79efc1d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/faulthandler.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +from collections.abc import Generator +import os +import sys + +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.stash import StashKey +import pytest + + +fault_handler_original_stderr_fd_key = StashKey[int]() +fault_handler_stderr_fd_key = StashKey[int]() + + +def pytest_addoption(parser: Parser) -> None: + help = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish" + ) + parser.addini("faulthandler_timeout", help, default=0.0) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + # at teardown we want to restore the original faulthandler fileno + # but faulthandler has no api to return the original fileno + # so here we stash the stderr fileno to be used at teardown + # sys.stderr and sys.__stderr__ may be closed or patched during the session + # so we can't rely on their values being good at that point (#11572). + stderr_fileno = get_stderr_fileno() + if faulthandler.is_enabled(): + config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno + config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno) + faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) + + +def pytest_unconfigure(config: Config) -> None: + import faulthandler + + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_fd_key in config.stash: + os.close(config.stash[fault_handler_stderr_fd_key]) + del config.stash[fault_handler_stderr_fd_key] + # Re-enable the faulthandler if it was originally enabled. + if fault_handler_original_stderr_fd_key in config.stash: + faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key]) + del config.stash[fault_handler_original_stderr_fd_key] + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, ValueError): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + assert sys.__stderr__ is not None + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + timeout = get_timeout_config_value(item.config) + if timeout > 0: + import faulthandler + + stderr = item.config.stash[fault_handler_stderr_fd_key] + faulthandler.dump_traceback_later(timeout, file=stderr) + try: + return (yield) + finally: + faulthandler.cancel_dump_traceback_later() + else: + return (yield) + + +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/.venv/lib/python3.11/site-packages/_pytest/fixtures.py b/.venv/lib/python3.11/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000..421237e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/fixtures.py @@ -0,0 +1,2017 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from collections import defaultdict +from collections import deque +from collections import OrderedDict +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import MutableMapping +from collections.abc import Sequence +from collections.abc import Set as AbstractSet +import dataclasses +import functools +import inspect +import os +from pathlib import Path +import sys +import types +from typing import Any +from typing import cast +from typing import Final +from typing import final +from typing import Generic +from typing import NoReturn +from typing import Optional +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import warnings + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code import Source +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import assert_never +from _pytest.compat import get_real_func +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.compat import signature +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.main import Session +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.scope import _ScopeName +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope +from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import PytestWarning + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +if TYPE_CHECKING: + from _pytest.python import CallSpec2 + from _pytest.python import Function + from _pytest.python import Metafunc + + +# The value of the fixture -- return/yield of the fixture function (type variable). +FixtureValue = TypeVar("FixtureValue") +# The type of the fixture function (type variable). +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Union[ + Callable[..., FixtureValue], Callable[..., Generator[FixtureValue]] +] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = Union[ + tuple[ + # The result. + FixtureValue, + # Cache key. + object, + None, + ], + tuple[ + None, + # Cache key. + object, + # The exception and the original traceback. + tuple[BaseException, Optional[types.TracebackType]], + ], +] + + +@dataclasses.dataclass(frozen=True) +class PseudoFixtureDef(Generic[FixtureValue]): + cached_result: _FixtureCachedResult[FixtureValue] + _scope: Scope + + +def pytest_sessionstart(session: Session) -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package( + node: nodes.Item, + fixturedef: FixtureDef[object], +) -> nodes.Node | None: + from _pytest.python import Package + + for parent in node.iter_parents(): + if isinstance(parent, Package) and parent.nodeid == fixturedef.baseid: + return parent + return node.session + + +def get_scope_node(node: nodes.Node, scope: Scope) -> nodes.Node | None: + """Get the closest parent node (including self) which matches the given + scope. + + If there is no parent node for the scope (e.g. asking for class scope on a + Module, or on a Function when not defined in a class), returns None. + """ + import _pytest.python + + if scope is Scope.Function: + # Type ignored because this is actually safe, see: + # https://github.com/python/mypy/issues/4717 + return node.getparent(nodes.Item) # type: ignore[type-abstract] + elif scope is Scope.Class: + return node.getparent(_pytest.python.Class) + elif scope is Scope.Module: + return node.getparent(_pytest.python.Module) + elif scope is Scope.Package: + return node.getparent(_pytest.python.Package) + elif scope is Scope.Session: + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +# TODO: Try to use FixtureFunctionDefinition instead of the marker +def getfixturemarker(obj: object) -> FixtureFunctionMarker | None: + """Return fixturemarker or None if it doesn't exist""" + if isinstance(obj, FixtureFunctionDefinition): + return obj._fixture_function_marker + return None + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for Session scope first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +@dataclasses.dataclass(frozen=True) +class ParamArgKey: + """A key for a high-scoped parameter used by an item. + + For use as a hashable key in `reorder_items`. The combination of fields + is meant to uniquely identify a particular "instance" of a param, + potentially shared by multiple items in a scope. + """ + + #: The param name. + argname: str + param_index: int + #: For scopes Package, Module, Class, the path to the file (directory in + #: Package's case) of the package/module/class where the item is defined. + scoped_item_path: Path | None + #: For Class scope, the class where the item is defined. + item_cls: type | None + + +_V = TypeVar("_V") +OrderedSet = dict[_V, None] + + +def get_param_argkeys(item: nodes.Item, scope: Scope) -> Iterator[ParamArgKey]: + """Return all ParamArgKeys for item matching the specified high scope.""" + assert scope is not Scope.Function + + try: + callspec: CallSpec2 = item.callspec # type: ignore[attr-defined] + except AttributeError: + return + + item_cls = None + if scope is Scope.Session: + scoped_item_path = None + elif scope is Scope.Package: + # Package key = module's directory. + scoped_item_path = item.path.parent + elif scope is Scope.Module: + scoped_item_path = item.path + elif scope is Scope.Class: + scoped_item_path = item.path + item_cls = item.cls # type: ignore[attr-defined] + else: + assert_never(scope) + + for argname in callspec.indices: + if callspec._arg2scope[argname] != scope: + continue + param_index = callspec.indices[argname] + yield ParamArgKey(argname, param_index, scoped_item_path, item_cls) + + +def reorder_items(items: Sequence[nodes.Item]) -> list[nodes.Item]: + argkeys_by_item: dict[Scope, dict[nodes.Item, OrderedSet[ParamArgKey]]] = {} + items_by_argkey: dict[Scope, dict[ParamArgKey, OrderedDict[nodes.Item, None]]] = {} + for scope in HIGH_SCOPES: + scoped_argkeys_by_item = argkeys_by_item[scope] = {} + scoped_items_by_argkey = items_by_argkey[scope] = defaultdict(OrderedDict) + for item in items: + argkeys = dict.fromkeys(get_param_argkeys(item, scope)) + if argkeys: + scoped_argkeys_by_item[item] = argkeys + for argkey in argkeys: + scoped_items_by_argkey[argkey][item] = None + + items_set = dict.fromkeys(items) + return list( + reorder_items_atscope( + items_set, argkeys_by_item, items_by_argkey, Scope.Session + ) + ) + + +def reorder_items_atscope( + items: OrderedSet[nodes.Item], + argkeys_by_item: Mapping[Scope, Mapping[nodes.Item, OrderedSet[ParamArgKey]]], + items_by_argkey: Mapping[ + Scope, Mapping[ParamArgKey, OrderedDict[nodes.Item, None]] + ], + scope: Scope, +) -> OrderedSet[nodes.Item]: + if scope is Scope.Function or len(items) < 3: + return items + + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_by_item = argkeys_by_item[scope] + + ignore: set[ParamArgKey] = set() + items_deque = deque(items) + items_done: OrderedSet[nodes.Item] = {} + while items_deque: + no_argkey_items: OrderedSet[nodes.Item] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_items: + continue + argkeys = dict.fromkeys( + k for k in scoped_argkeys_by_item.get(item, ()) if k not in ignore + ) + if not argkeys: + no_argkey_items[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + items_deque.appendleft(i) + # Fix items_by_argkey order. + for other_scope in HIGH_SCOPES: + other_scoped_items_by_argkey = items_by_argkey[other_scope] + for argkey in argkeys_by_item[other_scope].get(i, ()): + argkey_dict = other_scoped_items_by_argkey[argkey] + if not hasattr(sys, "pypy_version_info"): + argkey_dict[i] = None + argkey_dict.move_to_end(i, last=False) + else: + # Work around a bug in PyPy: + # https://github.com/pypy/pypy/issues/5257 + # https://github.com/pytest-dev/pytest/issues/13312 + bkp = argkey_dict.copy() + argkey_dict.clear() + argkey_dict[i] = None + argkey_dict.update(bkp) + break + if no_argkey_items: + reordered_no_argkey_items = reorder_items_atscope( + no_argkey_items, argkeys_by_item, items_by_argkey, scope.next_lower() + ) + items_done.update(reordered_no_argkey_items) + if slicing_argkey is not None: + ignore.add(slicing_argkey) + return items_done + + +@dataclasses.dataclass(frozen=True) +class FuncFixtureInfo: + """Fixture-related information for a fixture-requesting item (e.g. test + function). + + This is used to examine the fixtures which an item requests statically + (known during collection). This includes autouse fixtures, fixtures + requested by the `usefixtures` marker, fixtures requested in the function + parameters, and the transitive closure of these. + + An item may also request fixtures dynamically (using `request.getfixturevalue`); + these are not reflected here. + """ + + __slots__ = ("argnames", "initialnames", "name2fixturedefs", "names_closure") + + # Fixture names that the item requests directly by function parameters. + argnames: tuple[str, ...] + # Fixture names that the item immediately requires. These include + # argnames + fixture names specified via usefixtures and via autouse=True in + # fixture definitions. + initialnames: tuple[str, ...] + # The transitive closure of the fixture names that the item requires. + # Note: can't include dynamic dependencies (`request.getfixturevalue` calls). + names_closure: list[str] + # A map from a fixture name in the transitive closure to the FixtureDefs + # matching the name which are applicable to this function. + # There may be multiple overriding fixtures with the same name. The + # sequence is ordered from furthest to closes to the function. + name2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be something not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(abc.ABC): + """The type of the ``request`` fixture. + + A request object gives access to the requesting test context and has a + ``param`` attribute in case the fixture is parametrized. + """ + + def __init__( + self, + pyfuncitem: Function, + fixturename: str | None, + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]], + fixture_defs: dict[str, FixtureDef[Any]], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + #: Fixture for which this request is being performed. + self.fixturename: Final = fixturename + self._pyfuncitem: Final = pyfuncitem + # The FixtureDefs for each fixture name requested by this item. + # Starts from the statically-known fixturedefs resolved during + # collection. Dynamically requested fixtures (using + # `request.getfixturevalue("foo")`) are added dynamically. + self._arg2fixturedefs: Final = arg2fixturedefs + # The evaluated argnames so far, mapping to the FixtureDef they resolved + # to. + self._fixture_defs: Final = fixture_defs + # Notes on the type of `param`: + # -`request.param` is only defined in parametrized fixtures, and will raise + # AttributeError otherwise. Python typing has no notion of "undefined", so + # this cannot be reflected in the type. + # - Technically `param` is only (possibly) defined on SubRequest, not + # FixtureRequest, but the typing of that is still in flux so this cheats. + # - In the future we might consider using a generic for the param type, but + # for now just using Any. + self.param: Any + + @property + def _fixturemanager(self) -> FixtureManager: + return self._pyfuncitem.session._fixturemanager + + @property + @abc.abstractmethod + def _scope(self) -> Scope: + raise NotImplementedError() + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + @abc.abstractmethod + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + raise NotImplementedError() + + @property + def fixturenames(self) -> list[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem.fixturenames) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + @abc.abstractmethod + def node(self): + """Underlying collection node (depends on current request scope).""" + raise NotImplementedError() + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + if self.scope != "function": + return None + return getattr(self._pyfuncitem, "instance", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + mod = self._pyfuncitem.getparent(_pytest.python.Module) + assert mod is not None + return mod.obj + + @property + def path(self) -> Path: + """Path where the test function was collected.""" + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"path not available in {self.scope}-scoped context") + return self._pyfuncitem.path + + @property + def keywords(self) -> MutableMapping[str, Any]: + """Keywords/markers dictionary for the underlying node.""" + node: nodes.Node = self.node + return node.keywords + + @property + def session(self) -> Session: + """Pytest session object.""" + return self._pyfuncitem.session + + @abc.abstractmethod + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called without arguments after + the last test within the requesting test context finished execution.""" + raise NotImplementedError() + + def applymarker(self, marker: str | MarkDecorator) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + An object created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: str | None) -> NoReturn: + """Raise a FixtureLookupError exception. + + :param msg: + An optional custom error message. + """ + raise FixtureLookupError(None, self, msg) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + This method can be used during the test setup phase or the test run + phase, but during the test teardown phase a fixture's value may not + be available. + + :param argname: + The fixture name. + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + # Note that in addition to the use case described in the docstring, + # getfixturevalue() is also called by pytest itself during item and fixture + # setup to evaluate the fixtures that are requested statically + # (using function parameters, autouse, etc). + + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None, ( + f'The fixture value for "{argname}" is not available. ' + "This can happen when the fixture has already been torn down." + ) + return fixturedef.cached_result[0] + + def _iter_chain(self) -> Iterator[SubRequest]: + """Yield all SubRequests in the chain, from self up. + + Note: does *not* yield the TopRequest. + """ + current = self + while isinstance(current, SubRequest): + yield current + current = current._parent_request + + def _get_active_fixturedef( + self, argname: str + ) -> FixtureDef[object] | PseudoFixtureDef[object]: + if argname == "request": + cached_result = (self, [0], None) + return PseudoFixtureDef(cached_result, Scope.Function) + + # If we already finished computing a fixture by this name in this item, + # return it. + fixturedef = self._fixture_defs.get(argname) + if fixturedef is not None: + self._check_scope(fixturedef, fixturedef._scope) + return fixturedef + + # Find the appropriate fixturedef. + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) which was naturally + # not known at parsing/collection time. + fixturedefs = self._fixturemanager.getfixturedefs(argname, self._pyfuncitem) + if fixturedefs is not None: + self._arg2fixturedefs[argname] = fixturedefs + # No fixtures defined with this name. + if fixturedefs is None: + raise FixtureLookupError(argname, self) + # The are no fixtures with this name applicable for the function. + if not fixturedefs: + raise FixtureLookupError(argname, self) + + # A fixture may override another fixture with the same name, e.g. a + # fixture in a module can override a fixture in a conftest, a fixture in + # a class can override a fixture in the module, and so on. + # An overriding fixture can request its own name (possibly indirectly); + # in this case it gets the value of the fixture it overrides, one level + # up. + # Check how many `argname`s deep we are, and take the next one. + # `fixturedefs` is sorted from furthest to closest, so use negative + # indexing to go in reverse. + index = -1 + for request in self._iter_chain(): + if request.fixturename == argname: + index -= 1 + # If already consumed all of the available levels, fail. + if -index > len(fixturedefs): + raise FixtureLookupError(argname, self) + fixturedef = fixturedefs[index] + + # Prepare a SubRequest object for calling the fixture. + try: + callspec = self._pyfuncitem.callspec + except AttributeError: + callspec = None + if callspec is not None and argname in callspec.params: + param = callspec.params[argname] + param_index = callspec.indices[argname] + # The parametrize invocation scope overrides the fixture's scope. + scope = callspec._arg2scope[argname] + else: + param = NOTSET + param_index = 0 + scope = fixturedef._scope + self._check_fixturedef_without_param(fixturedef) + # The parametrize invocation scope only controls caching behavior while + # allowing wider-scoped fixtures to keep depending on the parametrized + # fixture. Scope control is enforced for parametrized fixtures + # by recreating the whole fixture tree on parameter change. + # Hence `fixturedef._scope`, not `scope`. + self._check_scope(fixturedef, fixturedef._scope) + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Make sure the fixture value is cached, running it if it isn't + fixturedef.execute(request=subrequest) + + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _check_fixturedef_without_param(self, fixturedef: FixtureDef[object]) -> None: + """Check that this request is allowed to execute this fixturedef without + a param.""" + funcitem = self._pyfuncitem + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n" + f"Node id: {funcitem.nodeid}\n" + f"Function type: {type(funcitem).__name__}" + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = absolutepath(frameinfo.filename) + source_lineno = frameinfo.lineno + try: + source_path_str = str(source_path.relative_to(funcitem.config.rootpath)) + except ValueError: + source_path_str = str(source_path) + location = getlocation(fixturedef.func, funcitem.config.rootpath) + msg = ( + "The requested fixture has no parameter defined for test:\n" + f" {funcitem.nodeid}\n\n" + f"Requested fixture '{fixturedef.argname}' defined in:\n" + f"{location}\n\n" + f"Requested here:\n" + f"{source_path_str}:{source_lineno}" + ) + fail(msg, pytrace=False) + + def _get_fixturestack(self) -> list[FixtureDef[Any]]: + values = [request._fixturedef for request in self._iter_chain()] + values.reverse() + return values + + +@final +class TopRequest(FixtureRequest): + """The type of the ``request`` fixture in a test function.""" + + def __init__(self, pyfuncitem: Function, *, _ispytest: bool = False) -> None: + super().__init__( + fixturename=None, + pyfuncitem=pyfuncitem, + arg2fixturedefs=pyfuncitem._fixtureinfo.name2fixturedefs.copy(), + fixture_defs={}, + _ispytest=_ispytest, + ) + + @property + def _scope(self) -> Scope: + return Scope.Function + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + # TopRequest always has function scope so always valid. + pass + + @property + def node(self): + return self._pyfuncitem + + def __repr__(self) -> str: + return f"" + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + for argname in item.fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self.node.addfinalizer(finalizer) + + +@final +class SubRequest(FixtureRequest): + """The type of the ``request`` fixture in a fixture function requested + (transitively) by a test function.""" + + def __init__( + self, + request: FixtureRequest, + scope: Scope, + param: Any, + param_index: int, + fixturedef: FixtureDef[object], + *, + _ispytest: bool = False, + ) -> None: + super().__init__( + pyfuncitem=request._pyfuncitem, + fixturename=fixturedef.argname, + fixture_defs=request._fixture_defs, + arg2fixturedefs=request._arg2fixturedefs, + _ispytest=_ispytest, + ) + self._parent_request: Final[FixtureRequest] = request + self._scope_field: Final = scope + self._fixturedef: Final[FixtureDef[object]] = fixturedef + if param is not NOTSET: + self.param = param + self.param_index: Final = param_index + + def __repr__(self) -> str: + return f"" + + @property + def _scope(self) -> Scope: + return self._scope_field + + @property + def node(self): + scope = self._scope + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: nodes.Node | None = self._pyfuncitem + elif scope is Scope.Package: + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, ( + f'Could not obtain a node for scope "{scope}" for function {self._pyfuncitem!r}' + ) + return node + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + if isinstance(requested_fixturedef, PseudoFixtureDef): + return + if self._scope > requested_scope: + # Try to report something helpful. + argname = requested_fixturedef.argname + fixture_stack = "\n".join( + self._format_fixturedef_line(fixturedef) + for fixturedef in self._get_fixturestack() + ) + requested_fixture = self._format_fixturedef_line(requested_fixturedef) + fail( + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {self._scope.value} scoped request object. " + f"Requesting fixture stack:\n{fixture_stack}\n" + f"Requested fixture:\n{requested_fixture}", + pytrace=False, + ) + + def _format_fixturedef_line(self, fixturedef: FixtureDef[object]) -> str: + factory = fixturedef.func + path, lineno = getfslineno(factory) + if isinstance(path, Path): + path = bestrelpath(self._pyfuncitem.session.path, path) + sig = signature(factory) + return f"{path}:{lineno + 1}: def {factory.__name__}{sig}" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._fixturedef.addfinalizer(finalizer) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: str | None, request: FixtureRequest, msg: str | None = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> FixtureLookupErrorRepr: + tblines: list[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + # This function currently makes an assumption that a non-None msg means we + # have a non-empty `self.fixturestack`. This is currently true, but if + # somebody at some point want to extend the use of FixtureLookupError to + # new cases it might break. + # Add the assert to make it clearer to developer that this will fail, otherwise + # it crashes because `fspath` does not get set due to `stack` being empty. + assert self.msg is None or self.fixturestack, ( + "formatrepr assumptions broken, rewrite it to handle it" + ) + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline(f"file {fspath}, line {lineno + 1}") + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parent = self.request._pyfuncitem.parent + assert parent is not None + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parent)) + if faclist: + available.add(name) + if self.argname in available: + msg = ( + f" recursive dependency involving fixture '{self.argname}' detected" + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: str | os.PathLike[str], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: str | None, + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, + ) + tw.line() + tw.line(f"{os.fspath(self.filename)}:{self.firstlineno + 1}") + + +def call_fixture_func( + fixturefunc: _FixtureFunc[FixtureValue], request: FixtureRequest, kwargs +) -> FixtureValue: + if inspect.isgeneratorfunction(fixturefunc): + fixturefunc = cast(Callable[..., Generator[FixtureValue]], fixturefunc) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fs, lineno = getfslineno(fixturefunc) + fail( + f"fixture function has more than one 'yield':\n\n" + f"{Source(fixturefunc).indent()}\n" + f"{fs}:{lineno + 1}", + pytrace=False, + ) + + +def _eval_scope_callable( + scope_callable: Callable[[str, Config], _ScopeName], + fixture_name: str, + config: Config, +) -> _ScopeName: + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + f"Error evaluating {scope_callable} while defining fixture '{fixture_name}'.\n" + "Expected a function with the signature (*, fixture_name, config)" + ) from e + if not isinstance(result, str): + fail( + f"Expected {scope_callable} to return a 'str' while defining fixture '{fixture_name}', but it returned:\n" + f"{result!r}", + pytrace=False, + ) + return result + + +@final +class FixtureDef(Generic[FixtureValue]): + """A container for a fixture definition. + + Note: At this time, only explicitly documented fields and methods are + considered public stable API. + """ + + def __init__( + self, + config: Config, + baseid: str | None, + argname: str, + func: _FixtureFunc[FixtureValue], + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None, + params: Sequence[object] | None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + *, + _ispytest: bool = False, + # only used in a deprecationwarning msg, can be removed in pytest9 + _autouse: bool = False, + ) -> None: + check_ispytest(_ispytest) + # The "base" node ID for the fixture. + # + # This is a node ID prefix. A fixture is only available to a node (e.g. + # a `Function` item) if the fixture's baseid is a nodeid of a parent of + # node. + # + # For a fixture found in a Collector's object (e.g. a `Module`s module, + # a `Class`'s class), the baseid is the Collector's nodeid. + # + # For a fixture found in a conftest plugin, the baseid is the conftest's + # directory path relative to the rootdir. + # + # For other plugins, the baseid is the empty string (always matches). + self.baseid: Final = baseid or "" + # Whether the fixture was found from a node or a conftest in the + # collection tree. Will be false for fixtures defined in non-conftest + # plugins. + self.has_location: Final = baseid is not None + # The fixture factory function. + self.func: Final = func + # The name by which the fixture may be requested. + self.argname: Final = argname + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, config) + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope: Final = scope + # If the fixture is directly parametrized, the parameter values. + self.params: Final = params + # If the fixture is directly parametrized, a tuple of explicit IDs to + # assign to the parameter values, or a callable to generate an ID given + # a parameter value. + self.ids: Final = ids + # The names requested by the fixtures. + self.argnames: Final = getfuncargnames(func, name=argname) + # If the fixture was executed, the current value of the fixture. + # Can change if the fixture is executed with different parameters. + self.cached_result: _FixtureCachedResult[FixtureValue] | None = None + self._finalizers: Final[list[Callable[[], object]]] = [] + + # only used to emit a deprecationwarning, can be removed in pytest9 + self._autouse = _autouse + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exceptions: list[BaseException] = [] + while self._finalizers: + fin = self._finalizers.pop() + try: + fin() + except BaseException as e: + exceptions.append(e) + node = request.node + node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers.clear() + if len(exceptions) == 1: + raise exceptions[0] + elif len(exceptions) > 1: + msg = f'errors while tearing down fixture "{self.argname}" of {node}' + raise BaseExceptionGroup(msg, exceptions[::-1]) + + def execute(self, request: SubRequest) -> FixtureValue: + """Return the value of this fixture, executing it if not cached.""" + # Ensure that the dependent fixtures requested by this fixture are loaded. + # This needs to be done before checking if we have a cached value, since + # if a dependent fixture has their cache invalidated, e.g. due to + # parametrization, they finalize themselves and fixtures depending on it + # (which will likely include this fixture) setting `self.cached_result = None`. + # See #4871 + requested_fixtures_that_should_finalize_us = [] + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + # Saves requested fixtures in a list so we later can add our finalizer + # to them, ensuring that if a requested fixture gets torn down we get torn + # down first. This is generally handled by SetupState, but still currently + # needed when this fixture is not parametrized but depends on a parametrized + # fixture. + if not isinstance(fixturedef, PseudoFixtureDef): + requested_fixtures_that_should_finalize_us.append(fixturedef) + + # Check for (and return) cached value/exception. + if self.cached_result is not None: + request_cache_key = self.cache_key(request) + cache_key = self.cached_result[1] + try: + # Attempt to make a normal == check: this might fail for objects + # which do not implement the standard comparison (like numpy arrays -- #6497). + cache_hit = bool(request_cache_key == cache_key) + except (ValueError, RuntimeError): + # If the comparison raises, use 'is' as fallback. + cache_hit = request_cache_key is cache_key + + if cache_hit: + if self.cached_result[2] is not None: + exc, exc_tb = self.cached_result[2] + raise exc.with_traceback(exc_tb) + else: + result = self.cached_result[0] + return result + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + # Add finalizer to requested fixtures we saved previously. + # We make sure to do this after checking for cached value to avoid + # adding our finalizer multiple times. (#12135) + finalizer = functools.partial(self.finish, request=request) + for parent_fixture in requested_fixtures_that_should_finalize_us: + parent_fixture.addfinalizer(finalizer) + + ihook = request.node.ihook + try: + # Setup the fixture, run the code in it, and cache the value + # in self.cached_result + result = ihook.pytest_fixture_setup(fixturedef=self, request=request) + finally: + # schedule our finalizer, even if the setup failed + request.node.addfinalizer(finalizer) + + return result + + def cache_key(self, request: SubRequest) -> object: + return getattr(request, "param", None) + + def __repr__(self) -> str: + return f"" + + +def resolve_fixture_function( + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> _FixtureFunc[FixtureValue]: + """Get the actual callable that can be called to obtain the fixture + value.""" + fixturefunc = fixturedef.func + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + instance = request.instance + if instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + instance, + fixturefunc.__self__.__class__, + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(instance) + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + kwargs[argname] = request.getfixturevalue(argname) + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + + if inspect.isasyncgenfunction(fixturefunc) or inspect.iscoroutinefunction( + fixturefunc + ): + auto_str = " with autouse=True" if fixturedef._autouse else "" + + warnings.warn( + PytestRemovedIn9Warning( + f"{request.node.name!r} requested an async fixture " + f"{request.fixturename!r}{auto_str}, with no plugin or hook that " + "handled it. This is usually an error, as pytest does not natively " + "support it. " + "This will turn into an error in pytest 9.\n" + "See: https://docs.pytest.org/en/stable/deprecations.html#sync-test-depending-on-async-fixture" + ), + # no stacklevel will point at users code, so we just point here + stacklevel=1, + ) + + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME as e: + if isinstance(e, skip.Exception): + # The test requested a fixture which caused a skip. + # Don't show the fixture as the skip location, as then the user + # wouldn't know which test skipped. + e._use_item_location = True + fixturedef.cached_result = (None, my_cache_key, (e, e.__traceback__)) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +@final +@dataclasses.dataclass(frozen=True) +class FixtureFunctionMarker: + scope: _ScopeName | Callable[[str, Config], _ScopeName] + params: tuple[object, ...] | None + autouse: bool = False + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None + name: str | None = None + + _ispytest: dataclasses.InitVar[bool] = False + + def __post_init__(self, _ispytest: bool) -> None: + check_ispytest(_ispytest) + + def __call__(self, function: FixtureFunction) -> FixtureFunctionDefinition: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if isinstance(function, FixtureFunctionDefinition): + raise ValueError( + f"@pytest.fixture is being applied more than once to the same function {function.__name__!r}" + ) + + if hasattr(function, "pytestmark"): + warnings.warn(MARKED_FIXTURE, stacklevel=2) + + fixture_definition = FixtureFunctionDefinition( + function=function, fixture_function_marker=self, _ispytest=True + ) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + f"'request' is a reserved word for fixtures, use another name:\n {location}", + pytrace=False, + ) + + return fixture_definition + + +# TODO: paramspec/return type annotation tracking and storing +class FixtureFunctionDefinition: + def __init__( + self, + *, + function: Callable[..., Any], + fixture_function_marker: FixtureFunctionMarker, + instance: object | None = None, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = fixture_function_marker.name or function.__name__ + # In order to show the function that this fixture contains in messages. + # Set the __name__ to be same as the function __name__ or the given fixture name. + self.__name__ = self.name + self._fixture_function_marker = fixture_function_marker + if instance is not None: + self._fixture_function = cast( + Callable[..., Any], function.__get__(instance) + ) + else: + self._fixture_function = function + functools.update_wrapper(self, function) + + def __repr__(self) -> str: + return f"" + + def __get__(self, instance, owner=None): + """Behave like a method if the function it was applied to was a method.""" + return FixtureFunctionDefinition( + function=self._fixture_function, + fixture_function_marker=self._fixture_function_marker, + instance=instance, + _ispytest=True, + ) + + def __call__(self, *args: Any, **kwds: Any) -> Any: + message = ( + f'Fixture "{self.name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly" + ) + fail(message, pytrace=False) + + def _get_wrapped_function(self) -> Callable[..., Any]: + return self._fixture_function + + +@overload +def fixture( + fixture_function: Callable[..., object], + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = ..., +) -> FixtureFunctionDefinition: ... + + +@overload +def fixture( + fixture_function: None = ..., + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = None, +) -> FixtureFunctionMarker: ... + + +def fixture( + fixture_function: FixtureFunction | None = None, + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Iterable[object] | None = None, + autouse: bool = False, + ids: Sequence[object | None] | Callable[[Any], object | None] | None = None, + name: str | None = None, +) -> FixtureFunctionMarker | FixtureFunctionDefinition: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + Sequence of ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, + params=tuple(params) if params is not None else None, + autouse=autouse, + ids=None if ids is None else ids if callable(ids) else tuple(ids), + name=name, + _ispytest=True, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.get_verbosity() > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="List of default fixtures to be used with this project", + ) + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="Show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="Show fixtures per test", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def _get_direct_parametrize_args(node: nodes.Node) -> set[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: set[str] = set() + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.update(p_argnames) + return parametrize_argnames + + +def deduplicate_names(*seqs: Iterable[str]) -> tuple[str, ...]: + """De-duplicate the sequence of names while keeping the original order.""" + # Ideally we would use a set, but it does not preserve insertion order. + return tuple(dict.fromkeys(name for seq in seqs for name in seq)) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + def __init__(self, session: Session) -> None: + self.session = session + self.config: Config = session.config + # Maps a fixture name (argname) to all of the FixtureDefs in the test + # suite/plugins defined with this name. Populated by parsefactories(). + # TODO: The order of the FixtureDefs list of each arg is significant, + # explain. + self._arg2fixturedefs: Final[dict[str, list[FixtureDef[Any]]]] = {} + self._holderobjseen: Final[set[object]] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Final[dict[str, list[str]]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo( + self, + node: nodes.Item, + func: Callable[..., object] | None, + cls: type | None, + ) -> FuncFixtureInfo: + """Calculate the :class:`FuncFixtureInfo` for an item. + + If ``func`` is None, or if the item sets an attribute + ``nofuncargs = True``, then ``func`` is not examined at all. + + :param node: + The item requesting the fixtures. + :param func: + The item's function. + :param cls: + If the function is a method, the method's class. + """ + if func is not None and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + usefixturesnames = self._getusefixturesnames(node) + autousenames = self._getautousenames(node) + initialnames = deduplicate_names(autousenames, usefixturesnames, argnames) + + direct_parametrize_args = _get_direct_parametrize_args(node) + + names_closure, arg2fixturedefs = self.getfixtureclosure( + parentnode=node, + initialnames=initialnames, + ignore_args=direct_parametrize_args, + ) + + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin, plugin_name: str) -> None: + # Fixtures defined in conftest plugins are only visible to within the + # conftest's directory. This is unlike fixtures in non-conftest plugins + # which have global visibility. So for conftests, construct the base + # nodeid from the plugin name (which is the conftest path). + if plugin_name and plugin_name.endswith("conftest.py"): + # Note: we explicitly do *not* use `plugin.__file__` here -- The + # difference is that plugin_name has the correct capitalization on + # case-insensitive systems (Windows) and other normalization issues + # (issue #11816). + conftestpath = absolutepath(plugin_name) + try: + nodeid = str(conftestpath.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + else: + nodeid = None + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, node: nodes.Node) -> Iterator[str]: + """Return the names of autouse fixtures applicable to node.""" + for parentnode in node.listchain(): + basenames = self._nodeid_autousenames.get(parentnode.nodeid) + if basenames: + yield from basenames + + def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]: + """Return the names of usefixtures fixtures applicable to node.""" + for marker_node, mark in node.iter_markers_with_node(name="usefixtures"): + if not mark.args: + marker_node.warn( + PytestWarning( + f"usefixtures() in {node.nodeid} without arguments has no effect" + ) + ) + yield from mark.args + + def getfixtureclosure( + self, + parentnode: nodes.Node, + initialnames: tuple[str, ...], + ignore_args: AbstractSet[str], + ) -> tuple[list[str], dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + fixturenames_closure = list(initialnames) + + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in ignore_args: + continue + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentnode) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + for arg in fixturedefs[-1].argnames: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + def sort_by_scope(arg_name: str) -> Scope: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return Scope.Function + else: + return fixturedefs[-1]._scope + + fixturenames_closure.sort(key=sort_by_scope, reverse=True) + return fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: Metafunc) -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + def _register_fixture( + self, + *, + name: str, + func: _FixtureFunc[object], + nodeid: str | None, + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Sequence[object] | None = None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + autouse: bool = False, + ) -> None: + """Register a fixture + + :param name: + The fixture's name. + :param func: + The fixture's implementation function. + :param nodeid: + The visibility of the fixture. The fixture will be available to the + node with this nodeid and its children in the collection tree. + None means that the fixture is visible to the entire collection tree, + e.g. a fixture defined for general use in a plugin. + :param scope: + The fixture's scope. + :param params: + The fixture's parametrization params. + :param ids: + The fixture's IDs. + :param autouse: + Whether this is an autouse fixture. + """ + fixture_def = FixtureDef( + config=self.config, + baseid=nodeid, + argname=name, + func=func, + scope=scope, + params=params, + ids=ids, + _ispytest=True, + _autouse=autouse, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if autouse: + self._nodeid_autousenames.setdefault(nodeid or "", []).append(name) + + @overload + def parsefactories( + self, + node_or_obj: nodes.Node, + ) -> None: + raise NotImplementedError() + + @overload + def parsefactories( + self, + node_or_obj: object, + nodeid: str | None, + ) -> None: + raise NotImplementedError() + + def parsefactories( + self, + node_or_obj: nodes.Node | object, + nodeid: str | NotSetType | None = NOTSET, + ) -> None: + """Collect fixtures from a collection node or object. + + Found fixtures are parsed into `FixtureDef`s and saved. + + If `node_or_object` is a collection node (with an underlying Python + object), the node's object is traversed and the node's nodeid is used to + determine the fixtures' visibility. `nodeid` must not be specified in + this case. + + If `node_or_object` is an object (e.g. a plugin), the object is + traversed and the given `nodeid` is used to determine the fixtures' + visibility. `nodeid` must be specified in this case; None and "" mean + total visibility. + """ + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + assert isinstance(node_or_obj, nodes.Node) + holderobj = cast(object, node_or_obj.obj) # type: ignore[attr-defined] + assert isinstance(node_or_obj.nodeid, str) + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + # Avoid accessing `@property` (and other descriptors) when iterating fixtures. + if not safe_isclass(holderobj) and not isinstance(holderobj, types.ModuleType): + holderobj_tp: object = type(holderobj) + else: + holderobj_tp = holderobj + + self._holderobjseen.add(holderobj) + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getattr() ignores such exceptions. + obj_ub = safe_getattr(holderobj_tp, name, None) + if type(obj_ub) is FixtureFunctionDefinition: + marker = obj_ub._fixture_function_marker + if marker.name: + fixture_name = marker.name + else: + fixture_name = name + + # OK we know it is a fixture -- now safe to look up on the _instance_. + try: + obj = getattr(holderobj, name) + # if the fixture is named in the decorator we cannot find it in the module + except AttributeError: + obj = obj_ub + + func = obj._get_wrapped_function() + + self._register_fixture( + name=fixture_name, + nodeid=nodeid, + func=func, + scope=marker.scope, + params=marker.params, + ids=marker.ids, + autouse=marker.autouse, + ) + + def getfixturedefs( + self, argname: str, node: nodes.Node + ) -> Sequence[FixtureDef[Any]] | None: + """Get FixtureDefs for a fixture name which are applicable + to a given node. + + Returns None if there are no fixtures at all defined with the given + name. (This is different from the case in which there are fixtures + with the given name, but none applicable to the node. In this case, + an empty result is returned). + + :param argname: Name of the fixture to search for. + :param node: The requesting Node. + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, node)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], node: nodes.Node + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = {n.nodeid for n in node.iter_parents()} + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef + + +def show_fixtures_per_test(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def _pretty_fixture_path(invocation_dir: Path, func) -> str: + loc = Path(getlocation(func, invocation_dir)) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(invocation_dir, loc) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + def get_best_relpath(func) -> str: + loc = getlocation(func, invocation_dir) + return bestrelpath(invocation_dir, Path(loc)) + + def write_fixture(fixture_def: FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + prettypath = _pretty_fixture_path(invocation_dir, fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring( + tw, + fixture_doc.split("\n\n", maxsplit=1)[0] + if verbose <= 0 + else fixture_doc, + ) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: FuncFixtureInfo | None = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + fm = session._fixturemanager + + available = [] + seen: set[tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, invocation_dir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + _pretty_fixture_path(invocation_dir, fixturedef.func), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, prettypath, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname.startswith("_"): + continue + tw.write(f"{argname}", green=True) + if fixturedef.scope != "function": + tw.write(f" [{fixturedef.scope} scope]", cyan=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring( + tw, doc.split("\n\n", maxsplit=1)[0] if verbose <= 0 else doc + ) + else: + tw.line(" no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) diff --git a/.venv/lib/python3.11/site-packages/_pytest/freeze_support.py b/.venv/lib/python3.11/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000..959ff07 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" + +from __future__ import annotations + +from collections.abc import Iterator +import types + + +def freeze_includes() -> list[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import _pytest + + result = list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: str | types.ModuleType, + prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/.venv/lib/python3.11/site-packages/_pytest/helpconfig.py b/.venv/lib/python3.11/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000..b5ac0e6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/helpconfig.py @@ -0,0 +1,283 @@ +# mypy: allow-untyped-defs +"""Version info, help messages, tracing configuration.""" + +from __future__ import annotations + +from argparse import Action +from collections.abc import Generator +import os +import sys + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser +from _pytest.terminal import TerminalReporter +import pytest + + +class HelpAction(Action): + """An argparse Action that will raise an exception in order to skip the + rest of the argument parsing when --help is passed. + + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done. + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="Display pytest version and information about plugins. " + "When given twice, also display information about plugins.", + ) + group._addoption( # private to use reserved lower-case short option + "-h", + "--help", + action=HelpAction, + dest="help", + help="Show help message and configuration info", + ) + group._addoption( # private to use reserved lower-case short option + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="Early-load given plugin module name or entry point (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`. See also --disable-plugin-autoload.", + ) + group.addoption( + "--disable-plugin-autoload", + action="store_true", + default=False, + help="Disable plugin auto-loading through entry point packaging metadata. " + "Only plugins explicitly specified in -p or env var PYTEST_PLUGINS will be loaded.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="Trace considerations of conftest.py files", + ) + group.addoption( + "--debug", + action="store", + nargs="?", + const="pytestdebug.log", + dest="debug", + metavar="DEBUG_FILE_NAME", + help="Store internal tracing debug information in this log file. " + "This file is opened with 'w' and truncated as a result, care advised. " + "Default: pytestdebug.log.", + ) + group._addoption( # private to use reserved lower-case short option + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='Override ini option with "option=value" style, ' + "e.g. `-o xfail_strict=True -o cache_dir=cache`.", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_cmdline_parse() -> Generator[None, Config, Config]: + config = yield + + if config.option.debug: + # --debug | --debug was provided. + path = config.option.debug + debugfile = open(path, "w", encoding="utf-8") + debugfile.write( + "versions pytest-{}, " + "python-{}\ninvocation_dir={}\ncwd={}\nargs={}\n\n".format( + pytest.__version__, + ".".join(map(str, sys.version_info)), + config.invocation_params.dir, + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write(f"writing pytest debug information to {path}\n") + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n") + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + return config + + +def showversion(config: Config) -> None: + if config.option.version > 1: + sys.stdout.write( + f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") + else: + sys.stdout.write(f"pytest {pytest.__version__}\n") + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.version > 0: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter: TerminalReporter | None = config.pluginmanager.get_plugin( + "terminalreporter" + ) + assert reporter is not None + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] ini-options in the first " + "pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(f" {spec}") + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("Environment variables:") + vars = [ + ( + "CI", + "When set (regardless of value), pytest knows it is running in a " + "CI process and does not truncate summary info", + ), + ("BUILD_NUMBER", "Equivalent to CI"), + ("PYTEST_ADDOPTS", "Extra command line options"), + ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config: Config) -> list[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("registered third-party plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> list[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/.venv/lib/python3.11/site-packages/_pytest/hookspec.py b/.venv/lib/python3.11/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000..12653ea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/hookspec.py @@ -0,0 +1,1333 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" + +from __future__ import annotations + +from collections.abc import Mapping +from collections.abc import Sequence +from pathlib import Path +from typing import Any +from typing import TYPE_CHECKING + +from pluggy import HookspecMarker + +from .deprecated import HOOK_LEGACY_PATH_ARG + + +if TYPE_CHECKING: + import pdb + from typing import Literal + import warnings + + from _pytest._code.code import ExceptionInfo + from _pytest._code.code import ExceptionRepr + from _pytest.compat import LEGACY_PATH + from _pytest.config import _PluggyPlugin + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Class + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + from _pytest.terminal import TestShortLogReport + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: PytestPluginManager) -> None: + """Called at plugin registration time to allow adding new hooks via a call to + :func:`pluginmanager.add_hookspecs(module_or_class, prefix) `. + + :param pluginmanager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: _PluggyPlugin, + plugin_name: str, + manager: PytestPluginManager, +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param plugin_name: The name by which the plugin is registered. + :param manager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered, once for each plugin registered thus far + (including itself!), and for all plugins thereafter when they are + registered. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None: + """Register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + :param parser: + To add command line options, call + :py:func:`parser.addoption(...) `. + To add ini-file values call :py:func:`parser.addini(...) + `. + + :param pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`~pytest.hookspec`'s + or :py:func:`~pytest.hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config ` object, respectively: + + - :py:func:`config.getoption(name) ` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) ` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec(historic=True) +def pytest_configure(config: Config) -> None: + """Allow plugins and conftest files to perform initial configuration. + + .. note:: + This hook is incompatible with hook wrappers. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + This hook is called for every :ref:`initial conftest ` file + after command line options have been parsed. After that, the hook is called + for other conftest files as they are registered. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: PytestPluginManager, args: list[str] +) -> Config | None: + """Return an initialized :class:`~pytest.Config`, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook is only called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param pluginmanager: The pytest plugin manager. + :param args: List of arguments passed on the command line. + :returns: A pytest config object. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +def pytest_load_initial_conftests( + early_config: Config, parser: Parser, args: list[str] +) -> None: + """Called to implement the loading of :ref:`initial conftest files + ` ahead of command line option parsing. + + :param early_config: The pytest config object. + :param args: Arguments passed on the command line. + :param parser: To add command line options. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: Config) -> ExitCode | int | None: + """Called for performing the main command line action. + + The default implementation will invoke the configure hooks and + :hook:`pytest_runtestloop`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :returns: The exit code. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: Session) -> object | None: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: list[Item] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + When items are deselected (filtered out from ``items``), + the hook :hook:`pytest_deselected` must be called explicitly + with the deselected items to properly notify other plugins, + e.g. with ``config.hook.pytest_deselected(items=deselected_items)``. + + :param session: The pytest session object. + :param config: The pytest config object. + :param items: List of item objects. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_collection_finish(session: Session) -> None: + """Called after collection has been performed and modified. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="collection_path" + ), + }, +) +def pytest_ignore_collect( + collection_path: Path, path: LEGACY_PATH, config: Config +) -> bool | None: + """Return ``True`` to ignore this path for collection. + + Return ``None`` to let other plugins ignore the path for collection. + + Returning ``False`` will forcefully *not* ignore this path for collection, + without giving a chance for other plugins to ignore this path. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collection_path: The path to analyze. + :type collection_path: pathlib.Path + :param path: The path to analyze (deprecated). + :param config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot ignore itself!). + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path: Path, parent: Collector) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given directory, or None if + not relevant. + + .. versionadded:: 8.0 + + For best results, the returned collector should be a subclass of + :class:`~pytest.Directory`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + Stops at first non-None result, see :ref:`firstresult`. + + :param path: The path to analyze. + :type path: pathlib.Path + + See :ref:`custom directory collectors` for a simple example of use of this + hook. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot collect itself!). + """ + + +@hookspec( + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="file_path" + ), + }, +) +def pytest_collect_file( + file_path: Path, path: LEGACY_PATH, parent: Collector +) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given path, or None if not relevant. + + For best results, the returned collector should be a subclass of + :class:`~pytest.File`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + :param file_path: The path to analyze. + :type file_path: pathlib.Path + :param path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given file path, only + conftest files in parent directories of the file path are consulted. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: Collector) -> None: + """Collector starts collecting. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_itemcollected(item: Item) -> None: + """We just collected a test item. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_collectreport(report: CollectReport) -> None: + """Collector finished collecting. + + :param report: + The collect report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_deselected(items: Sequence[Item]) -> None: + """Called for deselected test items, e.g. by keyword. + + Note that this hook has two integration aspects for plugins: + + - it can be *implemented* to be notified of deselected items + - it must be *called* from :hook:`pytest_collection_modifyitems` + implementations when items are deselected (to properly notify other plugins). + + May be called multiple times. + + :param items: + The items. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: Collector) -> CollectReport | None: + """Perform :func:`collector.collect() ` and return + a :class:`~pytest.CollectReport`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="module_path" + ), + }, +) +def pytest_pycollect_makemodule( + module_path: Path, path: LEGACY_PATH, parent +) -> Module | None: + """Return a :class:`pytest.Module` collector or None for the given path. + + This hook will be called for each matching test module path. + The :hook:`pytest_collect_file` hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param module_path: The path of the module to collect. + :type module_path: pathlib.Path + :param path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given parent collector, + only conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | Item | Collector | list[Item | Collector]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The module/class collector. + :param name: + The name of the object in the module/class. + :param obj: + The object. + :returns: + The created items/collectors. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pyfuncitem: + The function item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only + conftest files in the item's directory and its parent directories + are consulted. + """ + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + """Generate (multiple) parametrized calls to a test function. + + :param metafunc: + The :class:`~pytest.Metafunc` helper for the test function. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given function definition, + only conftest files in the functions's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config: Config, val: object, argname: str) -> str | None: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :param val: The parametrized value. + :param argname: The automatic parameter name produced by pytest. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: Session) -> object | None: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> object | None: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_runtest_logstart(nodeid: str, location: tuple[str, int | None, str]) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: tuple[str, int | None, str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_setup(item: Item) -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_call(item: Item) -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param item: + The item. + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport | None: + """Called to create a :class:`~pytest.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param item: The item. + :param call: The :class:`~pytest.CallInfo` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logreport(report: TestReport) -> None: + """Process the :class:`~pytest.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: Config, + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON. + + :param config: The pytest config object. + :param report: The report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: Config, + data: dict[str, Any], +) -> CollectReport | TestReport | None: + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[Any], request: SubRequest +) -> object | None: + """Perform fixture setup execution. + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + :returns: + The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[Any], request: SubRequest +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``). + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: Session) -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_sessionfinish( + session: Session, + exitstatus: int | ExitCode, +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param session: The pytest session object. + :param exitstatus: The status which pytest will return to the system. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_unconfigure(config: Config) -> None: + """Called before test process is exited. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: Config, op: str, left: object, right: object +) -> list[str] | None: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param config: The pytest config object. + :param op: The operator, e.g. `"=="`, `"!="`, `"not in"`. + :param left: The left operand. + :param right: The right operand. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_assertion_pass(item: Item, lineno: int, orig: str, expl: str) -> None: + """Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` + ini-file option: + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook=true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param item: pytest item object of current test. + :param lineno: Line number of the assert statement. + :param orig: String with the original assertion. + :param expl: String with the assert explanation. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_header( # type:ignore[empty-body] + config: Config, start_path: Path, startdir: LEGACY_PATH +) -> str | list[str]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_collectionfinish( # type:ignore[empty-body] + config: Config, + start_path: Path, + startdir: LEGACY_PATH, + items: Sequence[Item], +) -> str | list[str]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( # type:ignore[empty-body] + report: CollectReport | TestReport, config: Config +) -> TestShortLogReport | tuple[str, str, str | tuple[str, Mapping[str, bool]]]: + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param config: The pytest config object. + :returns: The test status. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_terminal_summary( + terminalreporter: TerminalReporter, + exitstatus: ExitCode, + config: Config, +) -> None: + """Add a section to terminal summary reporting. + + :param terminalreporter: The internal terminal reporter object. + :param exitstatus: The exit status that will be reported back to the OS. + :param config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: warnings.WarningMessage, + when: Literal["config", "collect", "runtest"], + nodeid: str, + location: tuple[str, int, str] | None, +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warning_message: + The captured warning. This is the same object produced by :class:`warnings.catch_warnings`, + and contains the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param nodeid: + Full id of the item. Empty string for warnings that are not specific to + a particular node. + + :param location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. If the warning is specific to a + particular node, only conftest files in parent directories of the node are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace( # type:ignore[empty-body] + config: Config, +) -> dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param config: The pytest config object. + :returns: A dictionary of additional globals to add. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in parent directories of the item are consulted. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: ExceptionRepr, + excinfo: ExceptionInfo[BaseException], +) -> bool | None: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + + :param excrepr: The exception repr object. + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_keyboard_interrupt( + excinfo: ExceptionInfo[KeyboardInterrupt | Exit], +) -> None: + """Called for keyboard interrupt. + + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_exception_interact( + node: Item | Collector, + call: CallInfo[Any], + report: CollectReport | TestReport, +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`~pytest.CollectReport`. + + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`~pytest.TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + + :param node: + The item or collector. + :param call: + The call information. Contains the exception. + :param report: + The collection or test report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given node, only conftest + files in parent directories of the node are consulted. + """ + + +def pytest_enter_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_leave_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ diff --git a/.venv/lib/python3.11/site-packages/_pytest/junitxml.py b/.venv/lib/python3.11/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000..dc35e3a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/junitxml.py @@ -0,0 +1,692 @@ +# mypy: allow-untyped-defs +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" + +from __future__ import annotations + +from collections.abc import Callable +import functools +import os +import platform +import re +import xml.etree.ElementTree as ET + +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +xml_key = StashKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: re.Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return f"#x{i:02X}" + else: + return f"#x{i:04X}" + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000a\u000d\u0020-\u007e\u0080-\ud7ff\ue000-\ufffd\u10000-\u10ffff]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = { # pylint: disable=dict-init-mutate + "_base": {"testcase": ["classname", "name"]}, + "_base_legacy": {"testcase": ["file", "line", "url"]}, +} +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: str | TestReport, xml: LogXML) -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0.0 + self.properties: list[tuple[str, str]] = [] + self.nodes: list[ET.Element] = [] + self.attrs: dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs: + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time=f"{self.duration:.3f}") + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: str | None = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr(report.longrepr, "reprcrash", None) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", bin_xml_escape(msg), str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element( + "skipped", type="pytest.skip", message=bin_xml_escape(skipreason) + ) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored because mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[method-assign] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config.stash.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + f"{fixture_name} is incompatible with junit_family '{xml.family}' (use 'legacy' or 'xunit1')" + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + :param name: + The property name. + :param value: + The property value. Will be converted to a string. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See + :issue:`7767` for details. + """ + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junit-xml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="Create junit-xml style report file at given path", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="Prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config.stash[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config.stash[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config.stash.get(xml_key, None) + if xml: + del config.stash[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> list[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: str | None, + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: dict[tuple[str | TestReport, object], _NodeReporter] = {} + self.node_reporters_ordered: list[_NodeReporter] = [] + self.global_properties: list[tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: list[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: TestReport | str) -> _NodeReporter: + nodeid: str | TestReport = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration in {"total", report.when}: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start = timing.Instant() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + # exist_ok avoids filesystem race conditions between checking path existence and requesting creation + os.makedirs(dirname, exist_ok=True) + + with open(self.logfile, "w", encoding="utf-8") as logfile: + duration = self.suite_start.elapsed() + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time=f"{duration.seconds:.3f}", + timestamp=self.suite_start.as_utc().astimezone().isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.set("name", "pytest tests") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/.venv/lib/python3.11/site-packages/_pytest/legacypath.py b/.venv/lib/python3.11/site-packages/_pytest/legacypath.py new file mode 100644 index 0000000..59e8ef6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/legacypath.py @@ -0,0 +1,468 @@ +# mypy: allow-untyped-defs +"""Add backward compatibility support for the legacy py path type.""" + +from __future__ import annotations + +import dataclasses +from pathlib import Path +import shlex +import subprocess +from typing import Final +from typing import final +from typing import TYPE_CHECKING + +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + + +if TYPE_CHECKING: + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: Final = Pytester.CLOSE_STDIN + TimeoutExpired: Final = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Item | Collector | None: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: list[Item | Collector]) -> list[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@dataclasses.dataclass +class TempdirFactory: + """Backward compatibility wrapper that implements ``py.path.local`` + for :class:`TempPathFactory`. + + .. note:: + These days, it is preferred to use ``tmp_path_factory``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + """ + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory (as `legacy_path`_ object) + which is unique to each test function invocation. + The temporary directory is created as a subdirectory + of the base temporary directory, with configurable retention, + as discussed in :ref:`temporary directory location and retention`. + + .. note:: + These days, it is preferred to use ``tmp_path``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> LEGACY_PATH | None: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_startdir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type(self, name: str, type: str, value: str | list[str]): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_startdir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/.venv/lib/python3.11/site-packages/_pytest/logging.py b/.venv/lib/python3.11/site-packages/_pytest/logging.py new file mode 100644 index 0000000..e4fed57 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/logging.py @@ -0,0 +1,960 @@ +# mypy: allow-untyped-defs +"""Access and control log capturing.""" + +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Mapping +from collections.abc import Set as AbstractSet +from contextlib import contextmanager +from contextlib import nullcontext +from datetime import datetime +from datetime import timedelta +from datetime import timezone +import io +from io import StringIO +import logging +from logging import LogRecord +import os +from pathlib import Path +import re +from types import TracebackType +from typing import final +from typing import Generic +from typing import Literal +from typing import TYPE_CHECKING +from typing import TypeVar + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +if TYPE_CHECKING: + logging_StreamHandler = logging.StreamHandler[StringIO] +else: + logging_StreamHandler = logging.StreamHandler + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[dict[str, list[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class DatetimeFormatter(logging.Formatter): + """A logging formatter which formats record with + :func:`datetime.datetime.strftime` formatter instead of + :func:`time.strftime` in case of microseconds in format string. + """ + + def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str: + if datefmt and "%f" in datefmt: + ct = self.converter(record.created) + tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone) + # Construct `datetime.datetime` object from `struct_time` + # and msecs information from `record` + # Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861). + dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz) + return dt.strftime(datefmt) + # Use `logging.Formatter` for non-microsecond formats + return super().formatTime(record, datefmt) + + +class ColoredLevelFormatter(DatetimeFormatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: dict[int, str] = {} + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: int | str | bool | None) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _get_auto_indent(auto_indent_option: int | str | bool | None) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % {**record.__dict__, "message": lines[0]} + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="Default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "Level of messages to catch/display." + " Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="Log date format used by the logging module", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='Enable log display during test run (also known as "live logging")', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="Path to a file when logging will be written to", + ) + add_option_ini( + "--log-file-mode", + dest="log_file_mode", + default="w", + choices=["w", "a"], + help="Log file open mode", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="Log file logging level", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + group.addoption( + "--log-disable", + action="append", + default=[], + dest="logger_disable", + help="Disable a logger by name. Can be passed multiple times.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs(Generic[_HandlerType]): + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: int | None = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self) -> _HandlerType: + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging_StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: list[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def clear(self) -> None: + self.records.clear() + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise # noqa: PLE0704 + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: int | None = None + # Dict of log name -> log level. + self._initial_logger_levels: dict[str | None, int] = {} + self._initial_disabled_logging_level: int | None = None + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels and the disabled logging levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + # Disable logging at the original disabled logging level. + if self._initial_disabled_logging_level is not None: + logging.disable(self._initial_disabled_logging_level) + self._initial_disabled_logging_level = None + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture.""" + return self._item.stash[caplog_handler_key] + + def get_records( + self, when: Literal["setup", "call", "teardown"] + ) -> list[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param when: + Which test phase to obtain the records from. + Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + + .. versionadded:: 3.4 + """ + return self._item.stash[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> list[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> list[tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> list[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.clear() + + def _force_enable_logging( + self, level: int | str, logger_obj: logging.Logger + ) -> int: + """Enable the desired logging level if the global level was disabled via ``logging.disabled``. + + Only enables logging levels greater than or equal to the requested ``level``. + + Does nothing if the desired ``level`` wasn't disabled. + + :param level: + The logger level caplog should capture. + All logging is enabled if a non-standard logging level string is supplied. + Valid level strings are in :data:`logging._nameToLevel`. + :param logger_obj: The logger object to check. + + :return: The original disabled logging level. + """ + original_disable_level: int = logger_obj.manager.disable + + if isinstance(level, str): + # Try to translate the level string to an int for `logging.disable()` + level = logging.getLevelName(level) + + if not isinstance(level, int): + # The level provided was not valid, so just un-disable all logging. + logging.disable(logging.NOTSET) + elif not logger_obj.isEnabledFor(level): + # Each level is `10` away from other levels. + # https://docs.python.org/3/library/logging.html#logging-levels + disable_level = max(level - 10, logging.NOTSET) + logging.disable(disable_level) + + return original_disable_level + + def set_level(self, level: int | str, logger: str | None = None) -> None: + """Set the threshold level of a logger for the duration of a test. + + Logging messages which are less severe than this level will not be captured. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) + if self._initial_disabled_logging_level is None: + self._initial_disabled_logging_level = initial_disabled_logging_level + + @contextmanager + def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + original_disable_level = self._force_enable_logging(level, logger_obj) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + logging.disable(original_disable_level) + + @contextmanager + def filtering(self, filter_: logging.Filter) -> Generator[None]: + """Context manager that temporarily adds the given filter to the caplog's + :meth:`handler` for the 'with' statement block, and removes that filter at the + end of the block. + + :param filter_: A custom :class:`logging.Filter` object. + + .. versionadded:: 7.5 + """ + self.handler.addFilter(filter_) + try: + yield + finally: + self.handler.removeFilter(filter_) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> int | None: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + f"'{log_level}' is not recognized as a logging level name for " + f"'{setting_name}'. Please consider passing the " + "logging level num instead." + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting( + config, "log_file_level", "log_level" + ) + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_mode = get_option_ini(config, "log_file_mode") or "w" + self.log_file_handler = _FileHandler( + log_file, mode=self.log_file_mode, encoding="UTF-8" + ) + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = DatetimeFormatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + # Guaranteed by `_log_cli_enabled()`. + assert terminal_reporter is not None + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: ( + _LiveLoggingStreamHandler | _LiveLoggingNullHandler + ) = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + self._disable_loggers(loggers_to_disable=config.option.logger_disable) + + def _disable_loggers(self, loggers_to_disable: list[str]) -> None: + if not loggers_to_disable: + return + + for name in loggers_to_disable: + logger = logging.getLogger(name) + logger.disabled = True + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = DatetimeFormatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + # https://github.com/python/mypy/issues/11193 + stream: io.TextIOWrapper = fpath.open(mode=self.log_file_mode, encoding="UTF-8") # type: ignore[assignment] + old_stream = self.log_file_handler.setStream(stream) + if old_stream: + old_stream.close() + + def _log_cli_enabled(self) -> bool: + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]: + if session.config.option.collectonly: + return (yield) + + if self._log_cli_enabled() and self._config.get_verbosity() < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + @contextmanager + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with ( + catching_logs( + self.caplog_handler, + level=self.log_level, + ) as caplog_handler, + catching_logs( + self.report_handler, + level=self.log_level, + ) as report_handler, + ): + caplog_handler.reset() + report_handler.reset() + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler + + try: + yield + finally: + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("setup") + + empty: dict[str, list[logging.LogRecord]] = {} + item.stash[caplog_records_key] = empty + with self._runtest_for(item, "setup"): + yield + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("call") + + with self._runtest_for(item, "call"): + yield + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("teardown") + + try: + with self._runtest_for(item, "teardown"): + yield + finally: + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging_StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: CaptureManager | None, + ) -> None: + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: str | None) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/.venv/lib/python3.11/site-packages/_pytest/main.py b/.venv/lib/python3.11/site-packages/_pytest/main.py new file mode 100644 index 0000000..dac084b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/main.py @@ -0,0 +1,1076 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" + +from __future__ import annotations + +import argparse +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Sequence +from collections.abc import Set as AbstractSet +import dataclasses +import fnmatch +import functools +import importlib +import importlib.util +import os +from pathlib import Path +import sys +from typing import final +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import nodes +import _pytest._code +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.config.compat import PathAwareHookProxy +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import safe_exists +from _pytest.pathlib import scandir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.fixtures import FixtureManager + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general", "Running and selection options") + group._addoption( # private to use reserved lower-case short option + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="Exit instantly on first error or failed test", + ) + group.addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="Exit after first num failures or errors", + ) + group.addoption( + "--strict-config", + action="store_true", + help="Any warnings encountered while parsing the `pytest` section of the " + "configuration file raise errors", + ) + group.addoption( + "--strict-markers", + action="store_true", + help="Markers not registered in the `markers` section of the configuration " + "file raise errors", + ) + group.addoption( + "--strict", + action="store_true", + help="(Deprecated) alias to --strict-markers", + ) + + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="Set which warnings to report, see -W option of Python itself", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="Only collect tests, don't execute them", + ) + group.addoption( + "--pyargs", + action="store_true", + help="Try to interpret all arguments as Python packages", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="Ignore path during collection (multi-allowed)", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="Ignore path pattern during collection (multi-allowed)", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="Deselect item (via node id prefix) during collection (multi-allowed)", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="Only load conftest.py's relative to specified dir", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="Prepend/append to sys.path when importing test modules and conftest " + "files. Default: prepend.", + ) + parser.addini( + "norecursedirs", + "Directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "Directories to search for tests when no files or directories are given on the " + "command line", + type="args", + default=[], + ) + parser.addini( + "collect_imported_tests", + "Whether to collect tests in imported modules outside `testpaths`", + type="bool", + default=True, + ) + parser.addini( + "consider_namespace_packages", + type="bool", + default=False, + help="Consider namespace packages when resolving module names during import", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group._addoption( # private to use reserved lower-case short option + "-c", + "--config-file", + metavar="FILE", + type=str, + dest="inifilename", + help="Load configuration from `FILE` instead of trying to locate one of the " + "implicit configuration files.", + ) + group.addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "Base temporary directory for this test run. " + "(Warning: this directory is removed if it exists.)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + return query in base.parents + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, Session], int | ExitCode | None] +) -> int | ExitCode: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: int | ExitCode = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + os.chdir(session.startpath) + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> int | ExitCode: + return wrap_session(config, _main) + + +def _main(config: Config, session: Session) -> int | ExitCode | None: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: Session) -> None: + session.perform_collect() + + +def pytest_runtestloop(session: Session) -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + f"{session.testsfailed} error{'s' if session.testsfailed != 1 else ''} during collection" + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: Path) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the pyvenv.cfg file. + + [https://peps.python.org/pep-0405/] + + For regression protection we also check for conda environments that do not include pyenv.cfg yet -- + https://github.com/conda/conda/issues/13337 is the conda issue tracking adding pyenv.cfg. + + Checking for the `conda-meta/history` file per https://github.com/pytest-dev/pytest/issues/12652#issuecomment-2246336902. + + """ + try: + return ( + path.joinpath("pyvenv.cfg").is_file() + or path.joinpath("conda-meta", "history").is_file() + ) + except OSError: + return False + + +def pytest_ignore_collect(collection_path: Path, config: Config) -> bool | None: + if collection_path.name == "__pycache__": + return True + + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent + ) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend(absolutepath(x) for x in excludeopt) + + if collection_path in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=collection_path.parent + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) + + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(collection_path): + return True + + if collection_path.is_dir(): + norecursepatterns = config.getini("norecursedirs") + if any(fnmatch_ex(pat, collection_path) for pat in norecursepatterns): + return True + + return None + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + return Dir.from_parent(parent, path=path) + + +def pytest_collection_modifyitems(items: list[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__( + self, + pm: PytestPluginManager, + remove_mods: AbstractSet[object], + ) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str) -> pluggy.HookCaller: + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@dataclasses.dataclass +class _bestrelpath_cache(dict[Path, str]): + __slots__ = ("path",) + + path: Path + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Dir(nodes.Directory): + """Collector of files in a file system directory. + + .. versionadded:: 8.0 + + .. note:: + + Python directories with an `__init__.py` file are instead collected by + :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` + collectors. + """ + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: nodes.Collector, + *, + path: Path, + ) -> Self: + """The public constructor. + + :param parent: The parent collector of this Dir. + :param path: The directory's path. + :type path: pathlib.Path + """ + return super().from_parent(parent=parent, path=path) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +@final +class Session(nodes.Collector): + """The root of the collection tree. + + ``Session`` collects the initial paths given as arguments to pytest. + """ + + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: int | ExitCode + + def __init__(self, config: Config) -> None: + super().__init__( + name="", + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", + ) + self.testsfailed = 0 + self.testscollected = 0 + self._shouldstop: bool | str = False + self._shouldfail: bool | str = False + self.trace = config.trace.root.get("collection") + self._initialpaths: frozenset[Path] = frozenset() + self._initialpaths_with_parents: frozenset[Path] = frozenset() + self._notfound: list[tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: list[CollectionArgument] = [] + self._collection_cache: dict[nodes.Collector, CollectReport] = {} + self.items: list[nodes.Item] = [] + + self._bestrelpathcache: dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> Session: + session: Session = cls._create(config=config) + return session + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} {self.name} " + f"exitstatus=%r " + f"testsfailed={self.testsfailed} " + f"testscollected={self.testscollected}>" + ) % getattr(self, "exitstatus", "") + + @property + def shouldstop(self) -> bool | str: + return self._shouldstop + + @shouldstop.setter + def shouldstop(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldstop: + warnings.warn( + PytestWarning( + "session.shouldstop cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldstop = value + + @property + def shouldfail(self) -> bool | str: + return self._shouldfail + + @shouldfail.setter + def shouldfail(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldfail: + warnings.warn( + PytestWarning( + "session.shouldfail cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldfail = value + + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = f"stopping after {self.testsfailed} failures" + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath( + self, + path: str | os.PathLike[str], + *, + with_parents: bool = False, + ) -> bool: + """Is path an initial path? + + An initial path is a path explicitly given to pytest on the command + line. + + :param with_parents: + If set, also return True if the path is a parent of an initial path. + + .. versionchanged:: 8.0 + Added the ``with_parents`` parameter. + """ + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + if with_parents: + return path_ in self._initialpaths_with_parents + else: + return path_ in self._initialpaths + + def gethookproxy(self, fspath: os.PathLike[str]) -> pluggy.HookRelay: + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager + # Check if we have the common case of running + # hooks with all conftest.py files. + my_conftestmodules = pm._getconftestmodules(path) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + proxy: pluggy.HookRelay + if remove_mods: + # One or more conftests are not in use at this path. + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _collect_path( + self, + path: Path, + path_cache: dict[Path, Sequence[nodes.Collector]], + ) -> Sequence[nodes.Collector]: + """Create a Collector for the given path. + + `path_cache` makes it so the same Collectors are returned for the same + path. + """ + if path in path_cache: + return path_cache[path] + + if path.is_dir(): + ihook = self.gethookproxy(path.parent) + col: nodes.Collector | None = ihook.pytest_collect_directory( + path=path, parent=self + ) + cols: Sequence[nodes.Collector] = (col,) if col is not None else () + + elif path.is_file(): + ihook = self.gethookproxy(path) + cols = ihook.pytest_collect_file(file_path=path, parent=self) + + else: + # Broken symlink or invalid/missing file. + cols = () + + path_cache[path] = cols + return cols + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: Literal[True] = ... + ) -> Sequence[nodes.Item]: ... + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: bool = ... + ) -> Sequence[nodes.Item | nodes.Collector]: ... + + def perform_collect( + self, args: Sequence[str] | None = None, genitems: bool = True + ) -> Sequence[nodes.Item | nodes.Collector]: + """Perform the collection phase for this session. + + This is called by the default :hook:`pytest_collection` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + hook = self.config.hook + + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + self.items = [] + items: Sequence[nodes.Item | nodes.Collector] = self.items + try: + initialpaths: list[Path] = [] + initialpaths_with_parents: list[Path] = [] + for arg in args: + collection_argument = resolve_collection_argument( + self.config.invocation_params.dir, + arg, + as_pypath=self.config.option.pyargs, + ) + self._initial_parts.append(collection_argument) + initialpaths.append(collection_argument.path) + initialpaths_with_parents.append(collection_argument.path) + initialpaths_with_parents.extend(collection_argument.path.parents) + self._initialpaths = frozenset(initialpaths) + self._initialpaths_with_parents = frozenset(initialpaths_with_parents) + + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, collectors in self._notfound: + if collectors: + errors.append( + f"not found: {arg}\n(no match in any of {collectors!r})" + ) + else: + errors.append(f"found no collectors for {arg}") + + raise UsageError(*errors) + + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + hook.pytest_collection_finish(session=self) + + if genitems: + self.testscollected = len(items) + + return items + + def _collect_one_node( + self, + node: nodes.Collector, + handle_dupes: bool = True, + ) -> tuple[CollectReport, bool]: + if node in self._collection_cache and handle_dupes: + rep = self._collection_cache[node] + return rep, True + else: + rep = collect_one_node(node) + self._collection_cache[node] = rep + return rep, False + + def collect(self) -> Iterator[nodes.Item | nodes.Collector]: + # This is a cache for the root directories of the initial paths. + # We can't use collection_cache for Session because of its special + # role as the bootstrapping collector. + path_cache: dict[Path, Sequence[nodes.Collector]] = {} + + pm = self.config.pluginmanager + + for collection_argument in self._initial_parts: + self.trace("processing argument", collection_argument) + self.trace.root.indent += 1 + + argpath = collection_argument.path + names = collection_argument.parts + module_name = collection_argument.module_name + + # resolve_collection_argument() ensures this. + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" + + paths = [argpath] + # Add relevant parents of the path, from the root, e.g. + # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] + if module_name is None: + # Paths outside of the confcutdir should not be considered. + for path in argpath.parents: + if not pm._is_in_confcutdir(path): + break + paths.insert(0, path) + else: + # For --pyargs arguments, only consider paths matching the module + # name. Paths beyond the package hierarchy are not included. + module_name_parts = module_name.split(".") + for i, path in enumerate(argpath.parents, 2): + if i > len(module_name_parts) or path.stem != module_name_parts[-i]: + break + paths.insert(0, path) + + # Start going over the parts from the root, collecting each level + # and discarding all nodes which don't match the level's part. + any_matched_in_initial_part = False + notfound_collectors = [] + work: list[tuple[nodes.Collector | nodes.Item, list[Path | str]]] = [ + (self, [*paths, *names]) + ] + while work: + matchnode, matchparts = work.pop() + + # Pop'd all of the parts, this is a match. + if not matchparts: + yield matchnode + any_matched_in_initial_part = True + continue + + # Should have been matched by now, discard. + if not isinstance(matchnode, nodes.Collector): + continue + + # Collect this level of matching. + # Collecting Session (self) is done directly to avoid endless + # recursion to this function. + subnodes: Sequence[nodes.Collector | nodes.Item] + if isinstance(matchnode, Session): + assert isinstance(matchparts[0], Path) + subnodes = matchnode._collect_path(matchparts[0], path_cache) + else: + # For backward compat, files given directly multiple + # times on the command line should not be deduplicated. + handle_dupes = not ( + len(matchparts) == 1 + and isinstance(matchparts[0], Path) + and matchparts[0].is_file() + ) + rep, duplicate = self._collect_one_node(matchnode, handle_dupes) + if not duplicate and not rep.passed: + # Report collection failures here to avoid failing to + # run some test specified in the command line because + # the module could not be imported (#134). + matchnode.ihook.pytest_collectreport(report=rep) + if not rep.passed: + continue + subnodes = rep.result + + # Prune this level. + any_matched_in_collector = False + for node in reversed(subnodes): + # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. + if isinstance(matchparts[0], Path): + is_match = node.path == matchparts[0] + if sys.platform == "win32" and not is_match: + # In case the file paths do not match, fallback to samefile() to + # account for short-paths on Windows (#11895). + same_file = os.path.samefile(node.path, matchparts[0]) + # We don't want to match links to the current node, + # otherwise we would match the same file more than once (#12039). + is_match = same_file and ( + os.path.islink(node.path) + == os.path.islink(matchparts[0]) + ) + + # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. + else: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + is_match = ( + node.name == matchparts[0] + or node.name.split("[")[0] == matchparts[0] + ) + if is_match: + work.append((node, matchparts[1:])) + any_matched_in_collector = True + + if not any_matched_in_collector: + notfound_collectors.append(matchnode) + + if not any_matched_in_initial_part: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, notfound_collectors)) + + self.trace.root.indent -= 1 + + def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + keepduplicates = self.config.getoption("keepduplicates") + # For backward compat, dedup only applies to files. + handle_dupes = not (keepduplicates and isinstance(node, nodes.File)) + rep, duplicate = self._collect_one_node(node, handle_dupes) + if duplicate and not keepduplicates: + return + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + if not duplicate: + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath(module_name: str) -> str | None: + """Search sys.path for the given a dotted module name, and return its file + system path if found.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return None + if spec is None or spec.origin is None or spec.origin == "namespace": + return None + elif spec.submodule_search_locations: + return os.path.dirname(spec.origin) + else: + return spec.origin + + +@dataclasses.dataclass(frozen=True) +class CollectionArgument: + """A resolved collection argument.""" + + path: Path + parts: Sequence[str] + module_name: str | None + + +def resolve_collection_argument( + invocation_path: Path, arg: str, *, as_pypath: bool = False +) -> CollectionArgument: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a resolved `CollectionArgument`: + + CollectionArgument( + path=Path("/full/path/to/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name=None, + ) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module, which may look like this: + + CollectionArgument( + path=Path("/home/u/myvenv/lib/site-packages/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name="pkg.tests.test_foo", + ) + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + base, squacket, rest = str(arg).partition("[") + strpath, *parts = base.split("::") + if parts: + parts[-1] = f"{parts[-1]}{squacket}{rest}" + module_name = None + if as_pypath: + pyarg_strpath = search_pypath(strpath) + if pyarg_strpath is not None: + module_name = strpath + strpath = pyarg_strpath + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not safe_exists(fspath): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return CollectionArgument( + path=fspath, + parts=parts, + module_name=module_name, + ) diff --git a/.venv/lib/python3.11/site-packages/_pytest/mark/__init__.py b/.venv/lib/python3.11/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000..068c741 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,301 @@ +"""Generic mechanism for marking and selecting python functions.""" + +from __future__ import annotations + +import collections +from collections.abc import Collection +from collections.abc import Iterable +from collections.abc import Set as AbstractSet +import dataclasses +from typing import Optional +from typing import TYPE_CHECKING + +from .expression import Expression +from .expression import ParseError +from .structures import _HiddenParam +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import HIDDEN_PARAM +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import NOT_SET +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "HIDDEN_PARAM", + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StashKey[Optional[Config]]() + + +def param( + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | _HiddenParam | None = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + + :param marks: + A single mark or a list of marks to be applied to this parameter set. + + :ref:`pytest.mark.usefixtures ` cannot be added via this parameter. + + :type id: str | Literal[pytest.HIDDEN_PARAM] | None + :param id: + The id to attribute to this parameter set. + + .. versionadded:: 8.4 + :ref:`hidden-param` means to hide the parameter set + from the test name. Can only be used at most 1 time, as + test names need to be unique. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( # private to use reserved lower-case short option + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="Only run tests which match the given substring expression. " + "An expression is a Python evaluable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( # private to use reserved lower-case short option + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="Only run tests matching given mark expression. " + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "Register new markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write(f"@pytest.mark.{name}:", bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@dataclasses.dataclass +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + __slots__ = ("_names",) + + _names: AbstractSet[str] + + @classmethod + def from_item(cls, item: Item) -> KeywordMatcher: + mapped_names = set() + + # Add the names of the current item and any parent items, + # except the Session and root Directory's which are not + # interesting for matching. + import pytest + + for node in item.listchain(): + if isinstance(node, pytest.Session): + continue + if isinstance(node, pytest.Directory) and isinstance( + node.parent, pytest.Session + ): + continue + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str, /, **kwargs: str | int | bool | None) -> bool: + if kwargs: + raise UsageError("Keyword expressions do not support call parameters.") + subname = subname.lower() + return any(subname in name.lower() for name in self._names) + + +def deselect_by_keyword(items: list[Item], config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") + + remaining = [] + deselected = [] + for colitem in items: + if not expr.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@dataclasses.dataclass +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + __slots__ = ("own_mark_name_mapping",) + + own_mark_name_mapping: dict[str, list[Mark]] + + @classmethod + def from_markers(cls, markers: Iterable[Mark]) -> MarkMatcher: + mark_name_mapping = collections.defaultdict(list) + for mark in markers: + mark_name_mapping[mark.name].append(mark) + return cls(mark_name_mapping) + + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: + if not (matches := self.own_mark_name_mapping.get(name, [])): + return False + + for mark in matches: # pylint: disable=consider-using-any-or-all + if all(mark.kwargs.get(k, NOT_SET) == v for k, v in kwargs.items()): + return True + return False + + +def deselect_by_mark(items: list[Item], config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: list[Item] = [] + deselected: list[Item] = [] + for item in items: + if expr.evaluate(MarkMatcher.from_markers(item.iter_markers())): + remaining.append(item) + else: + deselected.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except ParseError as e: + raise UsageError(f"{exc_message}: {expr}: {e}") from None + + +def pytest_collection_modifyitems(items: list[Item], config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config.stash[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + f"{EMPTY_PARAMETERSET_OPTION!s} must be one of skip, xfail or fail_at_collect" + f" but it is {empty_parameterset!r}" + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/.venv/lib/python3.11/site-packages/_pytest/mark/expression.py b/.venv/lib/python3.11/site-packages/_pytest/mark/expression.py new file mode 100644 index 0000000..743a46b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/mark/expression.py @@ -0,0 +1,331 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident kwargs? + +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ +kwargs: ('(' name '=' value ( ', ' name '=' value )* ')') +name: a valid ident, but not a reserved keyword +value: (unescaped) string literal | (-)?[0-9]+ | 'False' | 'True' | 'None' + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True or False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +- ident with parentheses and keyword arguments evaluates to True or False according to a provided matcher function. +""" + +from __future__ import annotations + +import ast +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import enum +import keyword +import re +import types +from typing import Literal +from typing import NoReturn +from typing import overload +from typing import Protocol + + +__all__ = [ + "Expression", + "ParseError", +] + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + EQUAL = "=" + STRING = "string literal" + COMMA = "," + + +@dataclasses.dataclass(frozen=True) +class Token: + __slots__ = ("pos", "type", "value") + type: TokenType + value: str + pos: int + + +class ParseError(Exception): + """The expression contains invalid syntax. + + :param column: The column in the line where the error occurred (1-based). + :param message: A description of the error. + """ + + def __init__(self, column: int, message: str) -> None: + self.column = column + self.message = message + + def __str__(self) -> str: + return f"at column {self.column}: {self.message}" + + +class Scanner: + __slots__ = ("current", "tokens") + + def __init__(self, input: str) -> None: + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + elif input[pos] == "=": + yield Token(TokenType.EQUAL, "=", pos) + pos += 1 + elif input[pos] == ",": + yield Token(TokenType.COMMA, ",", pos) + pos += 1 + elif (quote_char := input[pos]) in ("'", '"'): + end_quote_pos = input.find(quote_char, pos + 1) + if end_quote_pos == -1: + raise ParseError( + pos + 1, + f'closing quote "{quote_char}" is missing', + ) + value = input[pos : end_quote_pos + 1] + if (backslash_pos := input.find("\\")) != -1: + raise ParseError( + backslash_pos + 1, + r'escaping with "\" not supported in marker expression', + ) + yield Token(TokenType.STRING, value, pos) + pos += len(value) + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise ParseError( + pos + 1, + f'unexpected character "{input[pos]}"', + ) + yield Token(TokenType.EOF, "", pos) + + @overload + def accept(self, type: TokenType, *, reject: Literal[True]) -> Token: ... + + @overload + def accept( + self, type: TokenType, *, reject: Literal[False] = False + ) -> Token | None: ... + + def accept(self, type: TokenType, *, reject: bool = False) -> Token | None: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> NoReturn: + raise ParseError( + self.current.pos + 1, + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), + self.current.type.value, + ), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = ast.Constant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + name = ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + if s.accept(TokenType.LPAREN): + ret = ast.Call(func=name, args=[], keywords=all_kwargs(s)) + s.accept(TokenType.RPAREN, reject=True) + else: + ret = name + return ret + + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +BUILTIN_MATCHERS = {"True": True, "False": False, "None": None} + + +def single_kwarg(s: Scanner) -> ast.keyword: + keyword_name = s.accept(TokenType.IDENT, reject=True) + if not keyword_name.value.isidentifier(): + raise ParseError( + keyword_name.pos + 1, + f"not a valid python identifier {keyword_name.value}", + ) + if keyword.iskeyword(keyword_name.value): + raise ParseError( + keyword_name.pos + 1, + f"unexpected reserved python keyword `{keyword_name.value}`", + ) + s.accept(TokenType.EQUAL, reject=True) + + if value_token := s.accept(TokenType.STRING): + value: str | int | bool | None = value_token.value[1:-1] # strip quotes + else: + value_token = s.accept(TokenType.IDENT, reject=True) + if (number := value_token.value).isdigit() or ( + number.startswith("-") and number[1:].isdigit() + ): + value = int(number) + elif value_token.value in BUILTIN_MATCHERS: + value = BUILTIN_MATCHERS[value_token.value] + else: + raise ParseError( + value_token.pos + 1, + f'unexpected character/s "{value_token.value}"', + ) + + ret = ast.keyword(keyword_name.value, ast.Constant(value)) + return ret + + +def all_kwargs(s: Scanner) -> list[ast.keyword]: + ret = [single_kwarg(s)] + while s.accept(TokenType.COMMA): + ret.append(single_kwarg(s)) + return ret + + +class MatcherCall(Protocol): + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: ... + + +@dataclasses.dataclass +class MatcherNameAdapter: + matcher: MatcherCall + name: str + + def __bool__(self) -> bool: + return self.matcher(self.name) + + def __call__(self, **kwargs: str | int | bool | None) -> bool: + return self.matcher(self.name, **kwargs) + + +class MatcherAdapter(Mapping[str, MatcherNameAdapter]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: MatcherCall) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> MatcherNameAdapter: + return MatcherNameAdapter(matcher=self.matcher, name=key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaluated against different matchers. + """ + + __slots__ = ("code",) + + def __init__(self, code: types.CodeType) -> None: + self.code = code + + @classmethod + def compile(cls, input: str) -> Expression: + """Compile a match expression. + + :param input: The input expression - one line. + """ + astexpr = expression(Scanner(input)) + code: types.CodeType = compile( + astexpr, + filename="", + mode="eval", + ) + return Expression(code) + + def evaluate(self, matcher: MatcherCall) -> bool: + """Evaluate the match expression. + + :param matcher: + Given an identifier, should return whether it matches or not. + Should be prepared to handle arbitrary strings as input. + + :returns: Whether the expression matches or not. + """ + ret: bool = bool(eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher))) + return ret diff --git a/.venv/lib/python3.11/site-packages/_pytest/mark/structures.py b/.venv/lib/python3.11/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000..f926107 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/mark/structures.py @@ -0,0 +1,662 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import collections.abc +from collections.abc import Callable +from collections.abc import Collection +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import MutableMapping +from collections.abc import Sequence +import dataclasses +import enum +import inspect +from typing import Any +from typing import final +from typing import NamedTuple +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import warnings + +from .._code import getfslineno +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.outcomes import fail +from _pytest.raises import AbstractRaises +from _pytest.scope import _ScopeName +from _pytest.warning_types import PytestUnknownMarkWarning + + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +# Singleton type for HIDDEN_PARAM, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class _HiddenParam(enum.Enum): + token = 0 + + +#: Can be used as a parameter set id to hide it from the test name. +HIDDEN_PARAM = _HiddenParam.token + + +def istestfunc(func) -> bool: + return callable(func) and getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> MarkDecorator: + from ..nodes import Collector + + argslisting = ", ".join(argnames) + + fs, lineno = getfslineno(func) + reason = f"got empty parameter set for ({argslisting})" + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + raise Collector.CollectError( + f"Empty parameter set in '{func.__name__}' at line {lineno + 1}" + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet(NamedTuple): + """A set of values for a set of parameters along with associated marks and + an optional ID for the set. + + Examples:: + + pytest.param(1, 2, 3) + # ParameterSet(values=(1, 2, 3), marks=(), id=None) + + pytest.param("hello", id="greeting") + # ParameterSet(values=("hello",), marks=(), id="greeting") + + # Parameter set with marks + pytest.param(42, marks=pytest.mark.xfail) + # ParameterSet(values=(42,), marks=(MarkDecorator(...),), id=None) + + # From parametrize mark (parameter names + list of parameter sets) + pytest.mark.parametrize( + ("a", "b", "expected"), + [ + (1, 2, 3), + pytest.param(40, 2, 42, id="everything"), + ], + ) + # ParameterSet(values=(1, 2, 3), marks=(), id=None) + # ParameterSet(values=(2, 2, 3), marks=(), id="everything") + """ + + values: Sequence[object | NotSetType] + marks: Collection[MarkDecorator | Mark] + id: str | _HiddenParam | None + + @classmethod + def param( + cls, + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | _HiddenParam | None = None, + ) -> ParameterSet: + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + if any(i.name == "usefixtures" for i in marks): + raise ValueError( + "pytest.param cannot add pytest.mark.usefixtures; see " + "https://docs.pytest.org/en/stable/reference/reference.html#pytest-param" + ) + + if id is not None: + if not isinstance(id, str) and id is not HIDDEN_PARAM: + raise TypeError( + "Expected id to be a string or a `pytest.HIDDEN_PARAM` sentinel, " + f"got {type(id)}: {id!r}", + ) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: ParameterSet | Sequence[object] | object, + force_tuple: bool = False, + ) -> ParameterSet: + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *args, + **kwargs, + ) -> tuple[Sequence[str], bool]: + if isinstance(argnames, str): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[ParameterSet | Sequence[object] | object], + force_tuple: bool, + ) -> list[ParameterSet]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + func, + config: Config, + nodeid: str, + ) -> tuple[Sequence[str], list[ParameterSet]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet( + values=(NOTSET,) * len(argnames), marks=[mark], id="NOTSET" + ) + ) + return argnames, parameters + + +@final +@dataclasses.dataclass(frozen=True) +class Mark: + """A pytest mark.""" + + #: Name of the mark. + name: str + #: Positional arguments of the mark decorator. + args: tuple[Any, ...] + #: Keyword arguments of the mark decorator. + kwargs: Mapping[str, Any] + + #: Source Mark for ids with parametrize Marks. + _param_ids_from: Mark | None = dataclasses.field(default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated: Sequence[str] | None = dataclasses.field( + default=None, repr=False + ) + + def __init__( + self, + name: str, + args: tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Mark | None = None, + param_ids_generated: Sequence[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: Mark) -> Mark: + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Mark | None = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + _ispytest=True, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) + + +@dataclasses.dataclass +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + ``MarkDecorators`` are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a ``MarkDecorator`` is called, it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + ``MarkDecorator``. + + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. + + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + """:meta private:""" + return self.name # for backward-compat (2.4.1 had this attr) + + def with_args(self, *args: object, **kwargs: object) -> MarkDecorator: + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + """ + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: Markable) -> Markable: # type: ignore[overload-overlap] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> MarkDecorator: + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + # For staticmethods/classmethods, the marks are eventually fetched from the + # function object, not the descriptor, so unwrap. + unwrapped_func = func + if isinstance(func, (staticmethod, classmethod)): + unwrapped_func = func.__func__ + if len(args) == 1 and (istestfunc(unwrapped_func) or is_class): + store_mark(unwrapped_func, self.mark, stacklevel=3) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks( + obj: object | type, + *, + consider_mro: bool = True, +) -> list[Mark]: + """Obtain the unpacked marks that are stored on an object. + + If obj is a class and consider_mro is true, return marks applied to + this class and all of its super-classes in MRO order. If consider_mro + is false, only return marks applied directly to this class. + """ + if isinstance(obj, type): + if not consider_mro: + mark_lists = [obj.__dict__.get("pytestmark", [])] + else: + mark_lists = [ + x.__dict__.get("pytestmark", []) for x in reversed(obj.__mro__) + ] + mark_list = [] + for item in mark_lists: + if isinstance(item, list): + mark_list.extend(item) + else: + mark_list.append(item) + else: + mark_attribute = getattr(obj, "pytestmark", []) + if isinstance(mark_attribute, list): + mark_list = mark_attribute + else: + mark_list = [mark_attribute] + return list(normalize_mark_list(mark_list)) + + +def normalize_mark_list( + mark_list: Iterable[Mark | MarkDecorator], +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. + + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects + """ + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {mark_obj!r} instead of Mark") + yield mark_obj + + +def store_mark(obj, mark: Mark, *, stacklevel: int = 2) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + + from ..fixtures import getfixturemarker + + if getfixturemarker(obj) is not None: + warnings.warn(MARKED_FIXTURE, stacklevel=stacklevel) + + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__(self, reason: str = ...) -> MarkDecorator: ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: str | bool = ..., + *conditions: str | bool, + reason: str = ..., + ) -> MarkDecorator: ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__( + self, + condition: str | bool = False, + *conditions: str | bool, + reason: str = ..., + run: bool = ..., + raises: None + | type[BaseException] + | tuple[type[BaseException], ...] + | AbstractRaises[BaseException] = ..., + strict: bool = ..., + ) -> MarkDecorator: ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *, + indirect: bool | Sequence[str] = ..., + ids: Iterable[None | str | float | int | bool] + | Callable[[Any], object | None] + | None = ..., + scope: _ScopeName | None = ..., + ) -> MarkDecorator: ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Config | None = None + self._markers: set[str] = set() + + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + if self._config.option.strict_markers or self._config.option.strict: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + warnings.warn( + f"Unknown pytest.mark.{name} - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/how-to/mark.html", + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) + + +MARK_GEN = MarkGenerator(_ispytest=True) + + +@final +class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("_markers", "node", "parent") + + def __init__(self, node: Node) -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return key in self._markers or ( + self.parent is not None and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Mapping[str, Any] | Iterable[tuple[str, Any]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + # Doesn't need to be fast. + yield from self._markers + if self.parent is not None: + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword + + def __len__(self) -> int: + # Doesn't need to be fast. + return sum(1 for keyword in self) + + def __repr__(self) -> str: + return f"" diff --git a/.venv/lib/python3.11/site-packages/_pytest/monkeypatch.py b/.venv/lib/python3.11/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000..1285e57 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,415 @@ +# mypy: allow-untyped-defs +"""Monkeypatching and mocking functionality.""" + +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Mapping +from collections.abc import MutableMapping +from contextlib import contextmanager +import os +import re +import sys +from typing import Any +from typing import final +from typing import overload +from typing import TypeVar +import warnings + +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator[MonkeyPatch]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries, or + :data:`os.environ`: + + * :meth:`monkeypatch.setattr(obj, name, value, raising=True) ` + * :meth:`monkeypatch.delattr(obj, name, raising=True) ` + * :meth:`monkeypatch.setitem(mapping, name, value) ` + * :meth:`monkeypatch.delitem(obj, name, raising=True) ` + * :meth:`monkeypatch.setenv(name, value, prepend=None) ` + * :meth:`monkeypatch.delenv(name, raising=True) ` + * :meth:`monkeypatch.syspath_prepend(path) ` + * :meth:`monkeypatch.chdir(path) ` + * :meth:`monkeypatch.context() ` + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a :class:`KeyError` + or :class:`AttributeError` will be raised if the set/deletion operation does not have the + specified target. + + To undo modifications done by the fixture in a contained scope, + use :meth:`context() `. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found: object = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + f"{type(obj).__name__!r} object at {ann} has no attribute {name!r}" + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + .. versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: list[tuple[object, str, object]] = [] + self._setitem: list[tuple[Mapping[Any, Any], object, object]] = [] + self._cwd: str | None = None + self._savesyspath: list[str] | None = None + + @classmethod + @contextmanager + def context(cls) -> Generator[MonkeyPatch]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see :issue:`3290`). + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., + ) -> None: ... + + @overload + def setattr( + self, + target: object, + name: str, + value: object, + raising: bool = ..., + ) -> None: ... + + def setattr( + self, + target: str | object, + name: object | str, + value: object = notset, + raising: bool = True, + ) -> None: + """ + Set attribute value on target, memorizing the old value. + + For example: + + .. code-block:: python + + import os + + monkeypatch.setattr(os, "getcwd", lambda: "/") + + The code above replaces the :func:`os.getcwd` function by a ``lambda`` which + always returns ``"/"``. + + For convenience, you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name: + + .. code-block:: python + + monkeypatch.setattr("os.getcwd", lambda: "/") + + Raises :class:`AttributeError` if the attribute does not exist, unless + ``raising`` is set to False. + + **Where to patch** + + ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. + There can be many names pointing to any individual object, so for patching to work you must ensure + that you patch the name used by the system under test. + + See the section :ref:`Where to patch ` in the :mod:`unittest.mock` + docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but + applies to ``monkeypatch.setattr`` as well. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: object | str, + name: str | Notset = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dic[name] = value # type: ignore[index] + + def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dic[name] # type: ignore[attr-defined] + + def setenv(self, name: str, value: str, prepend: str | None = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + f"Value of environment variable {name} type should be str, but got " + f"{value!r} (type: {type(value).__name__}); converted to str implicitly" + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + from pkg_resources import fixup_namespace_packages + + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path: str | os.PathLike[str]) -> None: + """Change the current working directory to the specified path. + + :param path: + The path to change into. + """ + if self._cwd is None: + self._cwd = os.getcwd() + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + .. note:: + The same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + + Prefer to use :meth:`context() ` instead. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dictionary[key] # type: ignore[attr-defined] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dictionary[key] = value # type: ignore[index] + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/.venv/lib/python3.11/site-packages/_pytest/nodes.py b/.venv/lib/python3.11/site-packages/_pytest/nodes.py new file mode 100644 index 0000000..6690f6a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/nodes.py @@ -0,0 +1,772 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import MutableMapping +from functools import cached_property +from functools import lru_cache +import os +import pathlib +from pathlib import Path +from typing import Any +from typing import cast +from typing import NoReturn +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +import pluggy + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._code.code import TracebackStyle +from _pytest.compat import LEGACY_PATH +from _pytest.compat import signature +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config.compat import _check_path +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + # Imported here due to circular import. + from _pytest.main import Session + + +SEP = "/" + +tracebackcutdir = Path(_pytest.__file__).parent + + +_T = TypeVar("_T") + + +def _imply_path( + node_type: type[Node], + path: Path | None, + fspath: LEGACY_PATH | None, +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=6, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(abc.ABCMeta): + """Metaclass used by :class:`Node` to enforce that direct construction raises + :class:`Failed`. + + This behaviour supports the indirection introduced with :meth:`Node.from_parent`, + the named constructor to be used instead of direct construction. The design + decision to enforce indirection with :class:`NodeMeta` was made as a + temporary aid for refactoring the collection tree, which was diagnosed to + have :class:`Node` objects whose creational patterns were overly entangled. + Once the refactoring is complete, this metaclass can be removed. + + See https://github.com/pytest-dev/pytest/projects/3 for an overview of the + progress on detangling the :class:`Node` classes. + """ + + def __call__(cls, *k, **kw) -> NoReturn: + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=f"{cls.__module__}.{cls.__name__}") + fail(msg, pytrace=False) + + def _create(cls: type[_T], *k, **kw) -> _T: + try: + return super().__call__(*k, **kw) # type: ignore[no-any-return,misc] + except TypeError: + sig = signature(getattr(cls, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{cls} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) # type: ignore[no-any-return,misc] + + +class Node(abc.ABC, metaclass=NodeMeta): + r"""Base class of :class:`Collector` and :class:`Item`, the components of + the test collection tree. + + ``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the + leaf nodes. + """ + + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo `. Will be deprecated in + #: a future release, prefer using :attr:`path` instead. + fspath: LEGACY_PATH + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "__dict__", + "_nodeid", + "_store", + "config", + "name", + "parent", + "path", + "session", + ) + + def __init__( + self, + name: str, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + nodeid: str | None = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name: str = name + + #: The parent collector node. + self.parent = parent + + if config: + #: The pytest config object. + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + if session: + #: The pytest session this node is part of. + self.session: Session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + if path is None and fspath is None: + path = getattr(parent, "path", None) + #: Filesystem path where this node was collected from (can be None). + self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath) + + # The explicit annotation is to avoid publicly exposing NodeKeywords. + #: Keywords/markers collected from all scopes. + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: list[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + "::" + self.name + + #: A place where plugins can store information on the node for their + #: own use. + self.stash: Stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + @classmethod + def from_parent(cls, parent: Node, **kw) -> Self: + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self) -> pluggy.HookRelay: + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.path) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + f"warning must be an instance of Warning or subclass, got {warning!r}" + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def iter_parents(self) -> Iterator[Node]: + """Iterate over all parent collectors starting from and including self + up to the root of the collection tree. + + .. versionadded:: 8.1 + """ + parent: Node | None = self + while parent is not None: + yield parent + parent = parent.parent + + def listchain(self) -> list[Node]: + """Return a list of all parent collectors starting from the root of the + collection tree down to and including self.""" + chain = [] + item: Node | None = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker: str | MarkDecorator, append: bool = True) -> None: + """Dynamically add a marker object to the node. + + :param marker: + The marker. + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: str | None = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of the markers of the node. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: str | None = None + ) -> Iterator[tuple[Node, Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in self.iter_parents(): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Mark | None: ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: ... + + def get_closest_marker(self, name: str, default: Mark | None = None) -> Mark | None: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> list[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called without arguments when this node is + finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: type[_NodeType]) -> _NodeType | None: + """Get the closest parent node (including self) which is an instance of + the given class. + + :param cls: The node class to search for. + :returns: The node, if found. + """ + for node in self.iter_parents(): + if isinstance(node, cls): + return node + return None + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + return excinfo.traceback + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo.from_exception(excinfo.value.cause) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] + if self.config.getoption("fulltrace", False): + style = "long" + tbfilter = False + else: + tbfilter = self._traceback_filter + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.get_verbosity() > 1: + truncate_locals = False + else: + truncate_locals = True + + truncate_args = False if self.config.get_verbosity() > 2 else True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> str | TerminalRepr: + """Return a representation of a collection or test failure. + + .. seealso:: :ref:`non-python tests` + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item(node: Node) -> tuple[str | Path, int | None]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "path": just a path + + :rtype: A tuple of (str|Path, int) with filename and 0-based line number. + """ + # See Item.location. + location: tuple[str, int | None, str] | None = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "path", "unknown location"), -1 + + +class Collector(Node, abc.ABC): + """Base class of all collectors. + + Collector create children through `collect()` and thus iteratively build + the collection tree. + """ + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + @abc.abstractmethod + def collect(self) -> Iterable[Item | Collector]: + """Collect children (items and collectors) for this collector.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> str | TerminalRepr: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "path"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + return ntraceback.filter(excinfo) + return excinfo.traceback + + +@lru_cache(maxsize=1000) +def _check_initialpaths_for_relpath( + initial_paths: frozenset[Path], path: Path +) -> str | None: + if path in initial_paths: + return "" + + for parent in path.parents: + if parent in initial_paths: + return str(path.relative_to(parent)) + + return None + + +class FSCollector(Collector, abc.ABC): + """Base class for filesystem collectors.""" + + def __init__( + self, + fspath: LEGACY_PATH | None = None, + path_or_parent: Path | Node | None = None, + path: Path | None = None, + name: str | None = None, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + ) -> None: + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session + + if nodeid is None: + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session._initialpaths, path) + + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) + + @classmethod + def from_parent( + cls, + parent, + *, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + **kw, + ) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) + + +class File(FSCollector, abc.ABC): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Directory(FSCollector, abc.ABC): + """Base class for collecting files from a directory. + + A basic directory collector does the following: goes over the files and + sub-directories in the directory and creates collectors for them by calling + the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`, + after checking that they are not ignored using + :hook:`pytest_ignore_collect`. + + The default directory collectors are :class:`~pytest.Dir` and + :class:`~pytest.Package`. + + .. versionadded:: 8.0 + + :ref:`custom directory collectors`. + """ + + +class Item(Node, abc.ABC): + """Base class of all test invocation items. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + **kw, + ) -> None: + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) + self._report_sections: list[tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: list[tuple[str, object]] = [] + + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + + @abc.abstractmethod + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The 0-based line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" + + @cached_property + def location(self) -> tuple[str, int | None, str]: + """ + Returns a tuple of ``(relfspath, lineno, testname)`` for this item + where ``relfspath`` is file path relative to ``config.rootpath`` + and lineno is a 0-based line number. + """ + location = self.reportinfo() + path = absolutepath(location[0]) + relfspath = self.session._node_location_to_relpath(path) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/.venv/lib/python3.11/site-packages/_pytest/outcomes.py b/.venv/lib/python3.11/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000..68ba054 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/outcomes.py @@ -0,0 +1,317 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" + +from __future__ import annotations + +from collections.abc import Callable +import sys +from typing import Any +from typing import cast +from typing import NoReturn +from typing import Protocol +from typing import TypeVar + +from .warning_types import PytestDeprecationWarning + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: str | None = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + super().__init__(msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: str | None = None, + pytrace: bool = True, + allow_module_level: bool = False, + *, + _use_item_location: bool = False, + ) -> None: + super().__init__(msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: int | None = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +# We need a callable protocol to add attributes, for discussion see +# https://github.com/python/mypy/issues/2087. + +_F = TypeVar("_F", bound=Callable[..., object]) +_ET = TypeVar("_ET", bound=type[BaseException]) + + +class _WithException(Protocol[_F, _ET]): + Exception: _ET + __call__: _F + + +def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: + def decorate(func: _F) -> _WithException[_F, _ET]: + func_with_exception = cast(_WithException[_F, _ET], func) + func_with_exception.Exception = exception_type + return func_with_exception + + return decorate + + +# Exposed helper methods. + + +@_with_exception(Exit) +def exit( + reason: str = "", + returncode: int | None = None, +) -> NoReturn: + """Exit testing process. + + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. None means the same as ``0`` (no error), same as :func:`sys.exit`. + + :raises pytest.exit.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise Exit(reason, returncode) + + +@_with_exception(Skipped) +def skip( + reason: str = "", + *, + allow_module_level: bool = False, +) -> NoReturn: + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: + Allows this function to be called at module level. + Raising the skip exception at module level will stop + the execution of the module and prevent the collection of all tests in the module, + even those defined before the `skip` call. + + Defaults to False. + + :raises pytest.skip.Exception: + The exception that is raised. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) + to skip a doctest statically. + """ + __tracebackhide__ = True + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +@_with_exception(Failed) +def fail(reason: str = "", pytrace: bool = True) -> NoReturn: + """Explicitly fail an executing test with the given message. + + :param reason: + The message to show the user as reason for the failure. + + :param pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + + :raises pytest.fail.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise Failed(msg=reason, pytrace=pytrace) + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +@_with_exception(XFailed) +def xfail(reason: str = "") -> NoReturn: + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + No other code is executed after using ``xfail()`` (it is implemented + internally by raising an exception). + + :param reason: + The message to show the user as reason for the xfail. + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + + :raises pytest.xfail.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +def importorskip( + modname: str, + minversion: str | None = None, + reason: str | None = None, + *, + exc_type: type[ImportError] | None = None, +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param modname: + The name of the module to import. + :param minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param reason: + If given, this reason is shown as the message when the module cannot + be imported. + :param exc_type: + The exception that should be captured in order to skip modules. + Must be :py:class:`ImportError` or a subclass. + + If the module can be imported but raises :class:`ImportError`, pytest will + issue a warning to the user, as often users expect the module not to be + found (which would raise :class:`ModuleNotFoundError` instead). + + This warning can be suppressed by passing ``exc_type=ImportError`` explicitly. + + See :ref:`import-or-skip-import-error` for details. + + + :returns: + The imported module. This should be assigned to its canonical name. + + :raises pytest.skip.Exception: + If the module cannot be imported. + + Example:: + + docutils = pytest.importorskip("docutils") + + .. versionadded:: 8.2 + + The ``exc_type`` parameter. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + # Until pytest 9.1, we will warn the user if we catch ImportError (instead of ModuleNotFoundError), + # as this might be hiding an installation/environment problem, which is not usually what is intended + # when using importorskip() (#11523). + # In 9.1, to keep the function signature compatible, we just change the code below to: + # 1. Use `exc_type = ModuleNotFoundError` if `exc_type` is not given. + # 2. Remove `warn_on_import` and the warning handling. + if exc_type is None: + exc_type = ImportError + warn_on_import_error = True + else: + warn_on_import_error = False + + skipped: Skipped | None = None + warning: Warning | None = None + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + + try: + __import__(modname) + except exc_type as exc: + # Do not raise or issue warnings inside the catch_warnings() block. + if reason is None: + reason = f"could not import {modname!r}: {exc}" + skipped = Skipped(reason, allow_module_level=True) + + if warn_on_import_error and not isinstance(exc, ModuleNotFoundError): + lines = [ + "", + f"Module '{modname}' was found, but when imported by pytest it raised:", + f" {exc!r}", + "In pytest 9.1 this warning will become an error by default.", + "You can fix the underlying problem, or alternatively overwrite this behavior and silence this " + "warning by passing exc_type=ImportError explicitly.", + "See https://docs.pytest.org/en/stable/deprecations.html#pytest-importorskip-default-behavior-regarding-importerror", + ] + warning = PytestDeprecationWarning("\n".join(lines)) + + if warning: + warnings.warn(warning, stacklevel=2) + if skipped: + raise skipped + + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + f"module {modname!r} has __version__ {verattr!r}, required is: {minversion!r}", + allow_module_level=True, + ) + return mod diff --git a/.venv/lib/python3.11/site-packages/_pytest/pastebin.py b/.venv/lib/python3.11/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000..c7b39d9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/pastebin.py @@ -0,0 +1,117 @@ +# mypy: allow-untyped-defs +"""Submit failure or test session information to a pastebin service.""" + +from __future__ import annotations + +from io import StringIO +import tempfile +from typing import IO + +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +pastebinfile_key = StashKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="Send failed|all info to bpaste.net pastebin service", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config.stash[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config.stash[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line(f"pastebin session-log: {pastebinurl}\n") + + +def create_new_paste(contents: str | bytes) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.error import HTTPError + from urllib.parse import urlencode + from urllib.request import urlopen + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpa.st" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except HTTPError as e: + with e: # HTTPErrors are also http responses that must be closed! + return f"bad response: {e}" + except OSError as e: # eg urllib.error.URLError + return f"bad response: {e}" + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return f"{url}/show/{m.group(1)}" + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/.venv/lib/python3.11/site-packages/_pytest/pathlib.py b/.venv/lib/python3.11/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000..b69e854 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/pathlib.py @@ -0,0 +1,1055 @@ +from __future__ import annotations + +import atexit +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +import contextlib +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +import fnmatch +from functools import partial +from importlib.machinery import ModuleSpec +from importlib.machinery import PathFinder +import importlib.util +import itertools +import os +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +import shutil +import sys +import types +from types import ModuleType +from typing import Any +from typing import TypeVar +import uuid +import warnings + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + + +if sys.version_info < (3, 11): + from importlib._bootstrap_external import _NamespaceLoader as NamespaceLoader +else: + from importlib.machinery import NamespaceLoader + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception: Exception) -> bool: + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error( + func: Callable[..., Any] | None, + path: str, + excinfo: BaseException + | tuple[type[BaseException], BaseException, types.TracebackType | None], + *, + start_path: Path, +) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + if isinstance(excinfo, BaseException): + exc = excinfo + else: + exc = excinfo[1] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(exc, FileNotFoundError): + return False + + if not isinstance(exc, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{type(exc)}: {exc}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + f"(rm_rf) unknown function {func} when removing {path}:\n{type(exc)}: {exc}" + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + if sys.version_info >= (3, 12): + shutil.rmtree(str(path), onexc=onerror) + else: + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[os.DirEntry[str]]: + """Find all elements in root that begin with the prefix, case-insensitive.""" + l_prefix = prefix.lower() + for x in os.scandir(root): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[os.DirEntry[str]], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for entry in iter: + yield entry.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num: str) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink(root: Path, target: str | PurePath, link_to: str | Path) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + f"{prefix} in {root} after 10 tries" + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal( + lock_path: Path, register: Any = atexit.register +) -> Any: + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + entries = find_prefixed(root, prefix) + entries, entries2 = itertools.tee(entries) + numbers = map(parse_num, extract_suffixes(entries2, prefix)) + for entry, number in zip(entries, numbers): + if number <= max_delete: + yield Path(entry) + + +def cleanup_dead_symlinks(root: Path) -> None: + for left_dir in root.iterdir(): + if left_dir.is_symlink(): + if not left_dir.resolve().exists(): + left_dir.unlink() + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + if not root.exists(): + return + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + cleanup_dead_symlinks(root) + + +def make_numbered_dir_with_cleanup( + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + # Only lock the current dir when keep is not 0 + if keep != 0: + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path: str | os.PathLike[str]) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip( + src: os.PathLike[str] | str, + dst: os.PathLike[str] | str, + **kwargs: Any, +) -> None: + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(src, dst, **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + path: str | os.PathLike[str], + *, + mode: str | ImportMode = ImportMode.prepend, + root: Path, + consider_namespace_packages: bool, +) -> ModuleType: + """ + Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + :param path: + Path to the file to import. + + :param mode: + Controls the underlying import mechanism that will be used: + + * ImportMode.prepend: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `importlib.import_module`. + + * ImportMode.append: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * ImportMode.importlib: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to muck with `sys.path` at all. It effectively + allows having same-named test modules in different places. + + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + + :param consider_namespace_packages: + If True, consider namespace packages when resolving module names. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + path = Path(path) + mode = ImportMode(mode) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + # Try to import this module using the standard import mechanisms, but + # without touching sys.path. + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pass + else: + # If the given module name is already in sys.modules, do not import it again. + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, pkg_root, insert_modules=False + ) + if mod is not None: + return mod + + # Could not import the module with the current sys.path, so we fall back + # to importing the file as a single module, not being a part of a package. + module_name = module_name_from_path(path, root) + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, path.parent, insert_modules=True + ) + if mod is None: + raise ImportError(f"Can't find module {module_name} at location {path}") + return mod + + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pkg_root, module_name = path.parent, path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file is None: + raise ImportPathMismatchError(module_name, module_file, path) + + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.sep + "__init__.py"): + module_file = module_file[: -(len(os.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +def _import_module_using_spec( + module_name: str, module_path: Path, module_location: Path, *, insert_modules: bool +) -> ModuleType | None: + """ + Tries to import a module by its canonical name, path, and its parent location. + + :param module_name: + The expected module name, will become the key of `sys.modules`. + + :param module_path: + The file path of the module, for example `/foo/bar/test_demo.py`. + If module is a package, pass the path to the `__init__.py` of the package. + If module is a namespace package, pass directory path. + + :param module_location: + The parent location of the module. + If module is a package, pass the directory containing the `__init__.py` file. + + :param insert_modules: + If True, will call `insert_missing_modules` to create empty intermediate modules + with made-up module names (when importing test files not reachable from `sys.path`). + + Example 1 of parent_module_*: + + module_name: "a.b.c.demo" + module_path: Path("a/b/c/demo.py") + module_location: Path("a/b/c/") + if "a.b.c" is package ("a/b/c/__init__.py" exists), then + parent_module_name: "a.b.c" + parent_module_path: Path("a/b/c/__init__.py") + parent_module_location: Path("a/b/c/") + else: + parent_module_name: "a.b.c" + parent_module_path: Path("a/b/c") + parent_module_location: Path("a/b/") + + Example 2 of parent_module_*: + + module_name: "a.b.c" + module_path: Path("a/b/c/__init__.py") + module_location: Path("a/b/c/") + if "a.b" is package ("a/b/__init__.py" exists), then + parent_module_name: "a.b" + parent_module_path: Path("a/b/__init__.py") + parent_module_location: Path("a/b/") + else: + parent_module_name: "a.b" + parent_module_path: Path("a/b/") + parent_module_location: Path("a/") + """ + # Attempt to import the parent module, seems is our responsibility: + # https://github.com/python/cpython/blob/73906d5c908c1e0b73c5436faeff7d93698fc074/Lib/importlib/_bootstrap.py#L1308-L1311 + parent_module_name, _, name = module_name.rpartition(".") + parent_module: ModuleType | None = None + if parent_module_name: + parent_module = sys.modules.get(parent_module_name) + # If the parent_module lacks the `__path__` attribute, AttributeError when finding a submodule's spec, + # requiring re-import according to the path. + need_reimport = not hasattr(parent_module, "__path__") + if parent_module is None or need_reimport: + # Get parent_location based on location, get parent_path based on path. + if module_path.name == "__init__.py": + # If the current module is in a package, + # need to leave the package first and then enter the parent module. + parent_module_path = module_path.parent.parent + else: + parent_module_path = module_path.parent + + if (parent_module_path / "__init__.py").is_file(): + # If the parent module is a package, loading by __init__.py file. + parent_module_path = parent_module_path / "__init__.py" + + parent_module = _import_module_using_spec( + parent_module_name, + parent_module_path, + parent_module_path.parent, + insert_modules=insert_modules, + ) + + # Checking with sys.meta_path first in case one of its hooks can import this module, + # such as our own assertion-rewrite hook. + for meta_importer in sys.meta_path: + module_name_of_meta = getattr(meta_importer.__class__, "__module__", "") + if module_name_of_meta == "_pytest.assertion.rewrite" and module_path.is_file(): + # Import modules in subdirectories by module_path + # to ensure assertion rewrites are not missed (#12659). + find_spec_path = [str(module_location), str(module_path)] + else: + find_spec_path = [str(module_location)] + + spec = meta_importer.find_spec(module_name, find_spec_path) + + if spec_matches_module_path(spec, module_path): + break + else: + loader = None + if module_path.is_dir(): + # The `spec_from_file_location` matches a loader based on the file extension by default. + # For a namespace package, need to manually specify a loader. + loader = NamespaceLoader(name, module_path, PathFinder()) # type: ignore[arg-type] + + spec = importlib.util.spec_from_file_location( + module_name, str(module_path), loader=loader + ) + + if spec_matches_module_path(spec, module_path): + assert spec is not None + # Find spec and import this module. + mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod + spec.loader.exec_module(mod) # type: ignore[union-attr] + + # Set this module as an attribute of the parent module (#12194). + if parent_module is not None: + setattr(parent_module, name, mod) + + if insert_modules: + insert_missing_modules(sys.modules, module_name) + return mod + + return None + + +def spec_matches_module_path(module_spec: ModuleSpec | None, module_path: Path) -> bool: + """Return true if the given ModuleSpec can be used to import the given module path.""" + if module_spec is None: + return False + + if module_spec.origin: + return Path(module_spec.origin) == module_path + + # Compare the path with the `module_spec.submodule_Search_Locations` in case + # the module is part of a namespace package. + # https://docs.python.org/3/library/importlib.html#importlib.machinery.ModuleSpec.submodule_search_locations + if module_spec.submodule_search_locations: # can be None. + for path in module_spec.submodule_search_locations: + if Path(path) == module_path: + return True + + return False + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + # Module name for packages do not contain the __init__ file, unless + # the `__init__.py` file is at the root. + if len(path_parts) >= 2 and path_parts[-1] == "__init__": + path_parts = path_parts[:-1] + + # Module names cannot contain ".", normalize them to "_". This prevents + # a directory having a "." in the name (".env.310" for example) causing extra intermediate modules. + # Also, important to replace "." at the start of paths, as those are considered relative imports. + path_parts = tuple(x.replace(".", "_") for x in path_parts) + + return ".".join(path_parts) + + +def insert_missing_modules(modules: dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + while module_name: + parent_module_name, _, child_name = module_name.rpartition(".") + if parent_module_name: + parent_module = modules.get(parent_module_name) + if parent_module is None: + try: + # If sys.meta_path is empty, calling import_module will issue + # a warning and raise ModuleNotFoundError. To avoid the + # warning, we check sys.meta_path explicitly and raise the error + # ourselves to fall back to creating a dummy module. + if not sys.meta_path: + raise ModuleNotFoundError + parent_module = importlib.import_module(parent_module_name) + except ModuleNotFoundError: + parent_module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + modules[parent_module_name] = parent_module + + # Add child attribute to the parent that can reference the child + # modules. + if not hasattr(parent_module, child_name): + setattr(parent_module, child_name, modules[module_name]) + + module_parts.pop(-1) + module_name = ".".join(module_parts) + + +def resolve_package_path(path: Path) -> Path | None: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it cannot be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not (parent / "__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def resolve_pkg_root_and_module_name( + path: Path, *, consider_namespace_packages: bool = False +) -> tuple[Path, str]: + """ + Return the path to the directory of the root package that contains the + given Python file, and its module name: + + src/ + app/ + __init__.py + core/ + __init__.py + models.py + + Passing the full path to `models.py` will yield Path("src") and "app.core.models". + + If consider_namespace_packages is True, then we additionally check upwards in the hierarchy + for namespace packages: + + https://packaging.python.org/en/latest/guides/packaging-namespace-packages + + Raises CouldNotResolvePathError if the given path does not belong to a package (missing any __init__.py files). + """ + pkg_root: Path | None = None + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + if consider_namespace_packages: + start = pkg_root if pkg_root is not None else path.parent + for candidate in (start, *start.parents): + module_name = compute_module_name(candidate, path) + if module_name and is_importable(module_name, path): + # Point the pkg_root to the root of the namespace package. + pkg_root = candidate + break + + if pkg_root is not None: + module_name = compute_module_name(pkg_root, path) + if module_name: + return pkg_root, module_name + + raise CouldNotResolvePathError(f"Could not resolve for {path}") + + +def is_importable(module_name: str, module_path: Path) -> bool: + """ + Return if the given module path could be imported normally by Python, akin to the user + entering the REPL and importing the corresponding module name directly, and corresponds + to the module_path specified. + + :param module_name: + Full module name that we want to check if is importable. + For example, "app.models". + + :param module_path: + Full path to the python module/package we want to check if is importable. + For example, "/projects/src/app/models.py". + """ + try: + # Note this is different from what we do in ``_import_module_using_spec``, where we explicitly search through + # sys.meta_path to be able to pass the path of the module that we want to import (``meta_importer.find_spec``). + # Using importlib.util.find_spec() is different, it gives the same results as trying to import + # the module normally in the REPL. + spec = importlib.util.find_spec(module_name) + except (ImportError, ValueError, ImportWarning): + return False + else: + return spec_matches_module_path(spec, module_path) + + +def compute_module_name(root: Path, module_path: Path) -> str | None: + """Compute a module name based on a path and a root anchor.""" + try: + path_without_suffix = module_path.with_suffix("") + except ValueError: + # Empty paths (such as Path.cwd()) might break meta_path hooks (like our own assertion rewriter). + return None + + try: + relative = path_without_suffix.relative_to(root) + except ValueError: # pragma: no cover + return None + names = list(relative.parts) + if not names: + return None + if names[-1] == "__init__": + names.pop() + return ".".join(names) + + +class CouldNotResolvePathError(Exception): + """Custom exception raised by resolve_pkg_root_and_module_name.""" + + +def scandir( + path: str | os.PathLike[str], + sort_key: Callable[[os.DirEntry[str]], object] = lambda entry: entry.name, +) -> list[os.DirEntry[str]]: + """Scan a directory recursively, in breadth-first order. + + The returned entries are sorted according to the given key. + The default is to sort by name. + If the directory does not exist, return an empty list. + """ + entries = [] + # Attempt to create a scandir iterator for the given path. + try: + scandir_iter = os.scandir(path) + except FileNotFoundError: + # If the directory does not exist, return an empty list. + return [] + # Use the scandir iterator in a context manager to ensure it is properly closed. + with scandir_iter as s: + for entry in s: + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + # Reraise non-ignorable errors to avoid hiding issues. + raise + entries.append(entry) + entries.sort(key=sort_key) # type: ignore[arg-type] + return entries + + +def visit( + path: str | os.PathLike[str], recurse: Callable[[os.DirEntry[str]], bool] +) -> Iterator[os.DirEntry[str]]: + """Walk a directory recursively, in breadth-first order. + + The `recurse` predicate determines whether a directory is recursed. + + Entries at each directory level are sorted. + """ + entries = scandir(path) + yield from entries + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: str | os.PathLike[str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(path)) + + +def commonpath(path1: Path, path2: Path) -> Path | None: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) + + +def safe_exists(p: Path) -> bool: + """Like Path.exists(), but account for input arguments that might be too long (#11394).""" + try: + return p.exists() + except (ValueError, OSError): + # ValueError: stat: path too long for Windows + # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect + return False diff --git a/.venv/lib/python3.11/site-packages/_pytest/py.typed b/.venv/lib/python3.11/site-packages/_pytest/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/_pytest/pytester.py b/.venv/lib/python3.11/site-packages/_pytest/pytester.py new file mode 100644 index 0000000..59d2b0b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/pytester.py @@ -0,0 +1,1775 @@ +# mypy: allow-untyped-defs +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" + +from __future__ import annotations + +import collections.abc +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Sequence +import contextlib +from fnmatch import fnmatch +import gc +import importlib +from io import StringIO +import locale +import os +from pathlib import Path +import platform +import re +import shutil +import subprocess +import sys +import traceback +from typing import Any +from typing import Final +from typing import final +from typing import IO +from typing import Literal +from typing import overload +from typing import TextIO +from typing import TYPE_CHECKING +from weakref import WeakKeyDictionary + +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestFDWarning + + +if TYPE_CHECKING: + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="Run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "Run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="Directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> list[tuple[str, str]]: + if sys.version_info >= (3, 11): + # New in Python 3.11, ignores utf-8 mode + encoding = locale.getencoding() + else: + encoding = locale.getpreferredencoding(False) + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + text=True, + encoding=encoding, + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]: + lines1 = self.get_open_files() + try: + return (yield) + finally: + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + f"***** {len(leaked_files)} FD leakage detected", + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + f"***** {len(leaked_files)} FD leakage detected", + "*** function {}:{}: {} ".format(*item.location), + "See issue #2366", + ] + item.warn(PytestFDWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> PytestArg: + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> HookRecorder: + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> list[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): ... + + +@final +class HookRecorder: + """Record all hooks called in a plugin manager. + + Hook recorders are created by :class:`Pytester`. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + + self._pluginmanager = pluginmanager + self.calls: list[RecordedHookCall] = [] + self.ret: int | ExitCode | None = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(RecordedHookCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + # Since Python 3.13, f_locals is not a dict, but eval requires a dict. + backlocals = dict(sys._getframe(1).f_locals) + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> RecordedHookCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([f" {x}" for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> RecordedHookCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getreports( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: str | Iterable[str] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: str | None = None, + ) -> CollectReport | TestReport: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + f"could not find test report matching {inamepart!r}: " + "no test reports at all!" + ) + if len(values) > 1: + raise ValueError( + f"found 2 or more testreports matching {inamepart!r}: {values}" + ) + return values[0] + + @overload + def getfailures( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getfailures( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> list[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> LineComp: + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> type[LineMatcher]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester( + request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch +) -> Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +@final +class RunResult: + """The result of running a command from :class:`~pytest.Pytester`.""" + + def __init__( + self, + ret: int | ExitCode, + outlines: list[str], + errlines: list[str], + duration: float, + ) -> None: + try: + self.ret: int | ExitCode = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`~pytest.LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`~pytest.LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + f"" + ) + + def parseoutcomes(self) -> dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, + ) -> None: + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + warnings=warnings, + deselected=deselected, + ) + + +class SysModulesSnapshot: + def __init__(self, preserve: Callable[[str], bool] | None = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to :attr:`path` and environment variables during initialization. + """ + + __test__ = False + + CLOSE_STDIN: Final = NOTSET + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + monkeypatch: MonkeyPatch, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = ( + WeakKeyDictionary() + ) + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + #: A list of plugins to use with :py:meth:`parseconfig` and + #: :py:meth:`runpytest`. Initially this is an empty list but plugins can + #: be added to the list. The type of items to add to the list depends on + #: the method using them so refer to them for details. + self.plugins: list[str | _PluggyPlugin] = [] + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = monkeypatch + self.chdir() + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory path used to create files/run tests from, etc.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + self._monkeypatch.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Any | bytes], + files: dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + if ext is None: + raise TypeError("ext must not be None") + + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + + def to_text(s: Any | bytes) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new text file(s) in the test directory. + + :param ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + :returns: + The first created file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a conftest.py file. + + :param source: The contents. + :returns: The conftest.py file. + """ + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file. + + :param source: The contents. + :returns: The tox.ini file. + """ + return self.makefile(".ini", tox=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file. + + :param source: The contents. + :returns: The pyproject.ini file. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None: + """Prepend a directory to sys.path, defaults to :attr:`path`. + + This is undone automatically when this object dies at the end of each + test. + + :param path: + The path. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: str | os.PathLike[str]) -> Path: + """Create a new (sub)directory. + + :param name: + The name of the directory, relative to the pytester path. + :returns: + The created directory. + :rtype: pathlib.Path + """ + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: str | os.PathLike[str]) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: str | None = None) -> Path: + """Copy file from project's directory into the testdir. + + :param name: + The name of the file to copy. + :return: + Path to the copied directory (inside ``self.path``). + :rtype: pathlib.Path + """ + example_dir_ = self._request.config.getini("pytester_example_dir") + if example_dir_ is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir: Path = self._request.config.rootpath / example_dir_ + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item: + """Get the collection node of a file. + + :param config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param arg: + Path to the file. + :returns: + The node. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = Path(os.path.abspath(arg)) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item: + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: + Path to the file. + :returns: + The node. + """ + path = Path(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = bestrelpath(session.path, path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + :param colitems: + The collection nodes. + :returns: + The collected items. + """ + session = colitems[0].session + result: list[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + ``_pytest.runner.runtestprotocol``. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + :param cmdlineargs: Any extra command line arguments to use. + """ + p = self.makepyfile(source) + values = [*list(cmdlineargs), p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]: + """Run ``pytest.main(['--collect-only'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: str | os.PathLike[str], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + """ + from _pytest.unraisableexception import gc_collect_iterations_key + + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class PytesterHelperPlugin: + @staticmethod + def pytest_configure(config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + # The unraisable plugin GC collect slows down inline + # pytester runs too much. + config.stash[gc_collect_iterations_key] = 0 + + plugins.append(PytesterHelperPlugin()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: str | os.PathLike[str], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + instant = timing.Instant() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), instant.elapsed().seconds + ) + res.reprec = reprec # type: ignore + return res + + def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[str | os.PathLike[str]] + ) -> list[str | os.PathLike[str]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append( + "--basetemp={}".format(self.path.parent.joinpath("basetemp")) + ) + return new_args + + def parseconfig(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest :class:`pytest.Config` instance from given + commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create a + new :py:class:`pytest.PytestPluginManager` and call the + :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` + instance. + + If :attr:`plugins` has been populated they should be plugin modules + to be registered with the plugin manager. + """ + import _pytest.config + + new_args = self._ensure_basetemp(args) + new_args = [str(x) for x in new_args] + + config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`pytest.Config` instance like + :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` + hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem( + self, source: str | os.PathLike[str], funcname: str = "test_func" + ) -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + :returns: + The test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}" + + def getitems(self, source: str | os.PathLike[str]) -> list[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, + source: str | os.PathLike[str], + configargs=(), + *, + withinit: bool = False, + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, os.PathLike): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """Return the collection node for name from the module collection. + + Searches a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs: Sequence[str | os.PathLike[str]], + stdout: int | TextIO = subprocess.PIPE, + stderr: int | TextIO = subprocess.PIPE, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + **kw, + ): + """Invoke :py:class:`subprocess.Popen`. + + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: str | os.PathLike[str], + timeout: float | None = None, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. + + :param cmdargs: + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. + + - If it is ``CLOSE_STDIN`` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. + + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. + :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int + :returns: + The result. + + """ + __tracebackhide__ = True + + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + instant = timing.Instant() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + close_fds=(sys.platform != "win32"), + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = f"{timeout} second timeout expired running: {cmdargs}" + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, instant.elapsed().seconds) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script: os.PathLike[str]) -> RunResult: + """Run a python script using sys.executable as interpreter.""" + return self.run(sys.executable, script) + + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess( + self, *args: str | os.PathLike[str], timeout: float | None = None + ) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :returns: + The result. + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = (f"--basetemp={p}", *args) + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0], *args) + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines `. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: list[str]) -> None: + self.lines = lines + self._log_output: list[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: str | Sequence[str] | Source) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = f"line {line!r} not found in output" + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError(f"line {fnline!r} not found in output") + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log(f"{match_nickname}:", repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/.venv/lib/python3.11/site-packages/_pytest/pytester_assertions.py b/.venv/lib/python3.11/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 0000000..915cc8a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,74 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" + +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from __future__ import annotations + +from collections.abc import Sequence + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected + assert obtained == expected diff --git a/.venv/lib/python3.11/site-packages/_pytest/python.py b/.venv/lib/python3.11/site-packages/_pytest/python.py new file mode 100644 index 0000000..8e4fb04 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/python.py @@ -0,0 +1,1723 @@ +# mypy: allow-untyped-defs +"""Python test discovery, setup and run of test functions.""" + +from __future__ import annotations + +import abc +from collections import Counter +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import enum +import fnmatch +from functools import partial +import inspect +import itertools +import os +from pathlib import Path +import re +import types +from typing import Any +from typing import final +from typing import Literal +from typing import NoReturn +from typing import TYPE_CHECKING +import warnings + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.compat import LEGACY_PATH +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import FixtureRequest +from _pytest.fixtures import FuncFixtureInfo +from _pytest.fixtures import get_scope_node +from _pytest.main import Session +from _pytest.mark import ParameterSet +from _pytest.mark.structures import _HiddenParam +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import HIDDEN_PARAM +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import scandir +from _pytest.scope import _ScopeName +from _pytest.scope import Scope +from _pytest.stash import StashKey +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestReturnNotNoneWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="Glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="Prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="Prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="Disable string escape non-ASCII characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", + ) + + +def async_fail(nodeid: str) -> None: + msg = ( + "async def functions are not natively supported.\n" + "You need to install a suitable plugin for your async framework, for example:\n" + " - anyio\n" + " - pytest-asyncio\n" + " - pytest-tornasync\n" + " - pytest-trio\n" + " - pytest-twisted" + ) + fail(msg, pytrace=False) + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_fail(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_fail(pyfuncitem.nodeid) + elif result is not None: + warnings.warn( + PytestReturnNotNoneWarning( + f"Test functions should return None, but {pyfuncitem.nodeid} returned {type(result)!r}.\n" + "Did you mean to use `assert` instead of `return`?\n" + "See https://docs.pytest.org/en/stable/how-to/assert.html#return-not-none for more information." + ) + ) + return True + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + pkginit = path / "__init__.py" + try: + has_pkginit = pkginit.is_file() + except PermissionError: + # See https://github.com/pytest-dev/pytest/issues/12120#issuecomment-2106349096. + return None + if has_pkginit: + return Package.from_parent(parent, path=path) + return None + + +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Module | None: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): + if not path_matches_patterns( + file_path, parent.config.getini("python_files") + ): + return None + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) + return module + return None + + +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(fnmatch_ex(pattern, path) for pattern in patterns) + + +def pytest_pycollect_makemodule(module_path: Path, parent) -> Module: + return Module.from_parent(parent, path=module_path) + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]: + assert isinstance(collector, (Class, Module)), type(collector) + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + return Class.from_parent(collector, name=name, obj=obj) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + f"cannot collect {name!r} because it is not a function." + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if inspect.isgeneratorfunction(obj): + fail( + f"'yield' keyword is allowed in fixtures, but not in tests ({name})", + pytrace=False, + ) + return list(collector._genfunctions(name, obj)) + return None + return None + + +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information + + as its intended to always mix in before a node + its position in the mro is unaffected""" + + _ALLOW_MARKERS = True + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a class or a module. + """ + # Overridden by Function. + return None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Function marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + # This assumes that `obj` is called before there is a chance + # to add custom keys to `self.keywords`, so no fear of overriding. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + parts = [] + for node in self.iter_parents(): + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + # XXX caching? + path, lineno = getfslineno(self.obj) + modpath = self.getmodpath() + return path, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector, abc.ABC): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, (staticmethod, classmethod)): + # staticmethods and classmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return callable(obj) and fixtures.getfixturemarker(obj) is None + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + if not (self.classnamefilter(name) or self.isnosetest(obj)): + return False + if inspect.isabstract(obj): + return False + return True + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in ini configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not getattr(self.obj, "__test__", True): + return [] + + # Avoid random getattrs and peek in the __dict__ instead. + dicts = [getattr(self.obj, "__dict__", {})] + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. + # __dict__ is definition ordered. + seen: set[str] = set() + dict_values: list[list[nodes.Item | nodes.Collector]] = [] + collect_imported_tests = self.session.config.getini("collect_imported_tests") + ihook = self.ihook + for dic in dicts: + values: list[nodes.Item | nodes.Collector] = [] + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + + if not collect_imported_tests and isinstance(self, Module): + # Do not collect functions and classes from other modules. + if inspect.isfunction(obj) or inspect.isclass(obj): + if obj.__module__ != self._getobj().__name__: + continue + + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + dict_values.append(values) + + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result + + def _genfunctions(self, name: str, funcobj) -> Iterator[Function]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = (clscol and clscol.obj) or None + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. + metafunc = Metafunc( + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + metafunc._recompute_direct_params_indices() + # Direct parametrizations taking place in module/class-specific + # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure + # we update what the function really needs a.k.a its fixture closure. Note that + # direct parametrizations using `@pytest.mark.parametrize` have already been considered + # into making the closure using `ignore_args` arg to `getfixtureclosure`. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" if callspec._idlist else name + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +def importtestmodule( + path: Path, + config: Config, +): + # We assume we are only called once per module. + importmode = config.getoption("--import-mode") + try: + mod = import_path( + path, + mode=importmode, + root=config.rootpath, + consider_namespace_packages=config.getini("consider_namespace_packages"), + ) + except SyntaxError as e: + raise nodes.Collector.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise nodes.Collector.CollectError( + "import file mismatch:\n" + "imported module {!r} has this __file__ attribute:\n" + " {}\n" + "which is not the same as the test file we want to collect:\n" + " {}\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules".format(*e.args) + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if config.get_verbosity() < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise nodes.Collector.CollectError( + f"ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + f"{formatted_tb}" + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise nodes.Collector.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + config.pluginmanager.consider_module(mod) + return mod + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions in a Python module.""" + + def _getobj(self): + return importtestmodule(self.path, self.config) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + self._register_setup_module_fixture() + self._register_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _register_setup_module_fixture(self) -> None: + """Register an autouse, module-scoped fixture for the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + + if setup_module is None and teardown_module is None: + return + + def xunit_setup_module_fixture(request) -> Generator[None]: + module = request.module + if setup_module is not None: + _call_with_optional_argument(setup_module, module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, module) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", + func=xunit_setup_module_fixture, + nodeid=self.nodeid, + scope="module", + autouse=True, + ) + + def _register_setup_function_fixture(self) -> None: + """Register an autouse, function-scoped fixture for the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + def xunit_setup_function_fixture(request) -> Generator[None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + function = request.function + if setup_function is not None: + _call_with_optional_argument(setup_function, function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", + func=xunit_setup_function_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class Package(nodes.Directory): + """Collector for files and directories in a Python packages -- directories + with an `__init__.py` file. + + .. note:: + + Directories without an `__init__.py` file are instead collected by + :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory` + collectors. + + .. versionchanged:: 8.0 + + Now inherits from :class:`~pytest.Directory`. + """ + + def __init__( + self, + fspath: LEGACY_PATH | None, + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + path: Path | None = None, + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # super().__init__(self, fspath, parent=parent) + session = parent.session + super().__init__( + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + ) + + def setup(self) -> None: + init_mod = importtestmodule(self.path / "__init__.py", self.config) + + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + init_mod, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, init_mod) + + teardown_module = _get_first_non_fixture_func( + init_mod, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, init_mod) + self.addfinalizer(func) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + # Always collect __init__.py first. + def sort_key(entry: os.DirEntry[str]) -> object: + return (entry.name != "__init__.py", entry.name) + + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path, sort_key): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> object | None: + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice. + """ + for name in names: + meth: object | None = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + return None + + +class Class(PyCollector): + """Collector for test methods (and nested classes) in a Python class.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override] + """The public constructor.""" + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__init__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__new__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + + self._register_setup_class_fixture() + self._register_setup_method_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() + + def _register_setup_class_fixture(self) -> None: + """Register an autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",)) + if setup_class is None and teardown_class is None: + return + + def xunit_setup_class_fixture(request) -> Generator[None]: + cls = request.cls + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, cls) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, cls) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", + func=xunit_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_setup_method_fixture(self) -> None: + """Register an autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) + if setup_method is None and teardown_method is None: + return + + def xunit_setup_method_fixture(request) -> Generator[None]: + instance = request.instance + method = request.function + if setup_method is not None: + func = getattr(instance, setup_name) + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(instance, teardown_name) + _call_with_optional_argument(func, method) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", + func=xunit_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +@dataclasses.dataclass(frozen=True) +class IdMaker: + """Make IDs for a parametrization.""" + + __slots__ = ( + "argnames", + "config", + "func_name", + "idfn", + "ids", + "nodeid", + "parametersets", + ) + + # The argnames of the parametrization. + argnames: Sequence[str] + # The ParameterSets of the parametrization. + parametersets: Sequence[ParameterSet] + # Optionally, a user-provided callable to make IDs for parameters in a + # ParameterSet. + idfn: Callable[[Any], object | None] | None + # Optionally, explicit IDs for ParameterSets by index. + ids: Sequence[object | None] | None + # Optionally, the pytest config. + # Used for controlling ASCII escaping, and for calling the + # :hook:`pytest_make_parametrize_id` hook. + config: Config | None + # Optionally, the ID of the node being parametrized. + # Used only for clearer error messages. + nodeid: str | None + # Optionally, the ID of the function being parametrized. + # Used only for clearer error messages. + func_name: str | None + + def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]: + """Make a unique identifier for each ParameterSet, that may be used to + identify the parametrization in a node ID. + + Format is -...-[counter], where prm_x_token is + - user-provided id, if given + - else an id derived from the value, applicable for certain types + - else + The counter suffix is appended only in case a string wouldn't be unique + otherwise. + """ + resolved_ids = list(self._resolve_ids()) + # All IDs must be unique! + if len(resolved_ids) != len(set(resolved_ids)): + # Record the number of occurrences of each ID. + id_counts = Counter(resolved_ids) + # Map the ID to its next suffix. + id_suffixes: dict[str, int] = defaultdict(int) + # Suffix non-unique IDs to make them unique. + for index, id in enumerate(resolved_ids): + if id_counts[id] > 1: + if id is HIDDEN_PARAM: + self._complain_multiple_hidden_parameter_sets() + suffix = "" + if id and id[-1].isdigit(): + suffix = "_" + new_id = f"{id}{suffix}{id_suffixes[id]}" + while new_id in set(resolved_ids): + id_suffixes[id] += 1 + new_id = f"{id}{suffix}{id_suffixes[id]}" + resolved_ids[index] = new_id + id_suffixes[id] += 1 + assert len(resolved_ids) == len(set(resolved_ids)), ( + f"Internal error: {resolved_ids=}" + ) + return resolved_ids + + def _resolve_ids(self) -> Iterable[str | _HiddenParam]: + """Resolve IDs for all ParameterSets (may contain duplicates).""" + for idx, parameterset in enumerate(self.parametersets): + if parameterset.id is not None: + # ID provided directly - pytest.param(..., id="...") + if parameterset.id is HIDDEN_PARAM: + yield HIDDEN_PARAM + else: + yield _ascii_escaped_by_config(parameterset.id, self.config) + elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: + # ID provided in the IDs list - parametrize(..., ids=[...]). + if self.ids[idx] is HIDDEN_PARAM: + yield HIDDEN_PARAM + else: + yield self._idval_from_value_required(self.ids[idx], idx) + else: + # ID not provided - generate it. + yield "-".join( + self._idval(val, argname, idx) + for val, argname in zip(parameterset.values, self.argnames) + ) + + def _idval(self, val: object, argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet.""" + idval = self._idval_from_function(val, argname, idx) + if idval is not None: + return idval + idval = self._idval_from_hook(val, argname) + if idval is not None: + return idval + idval = self._idval_from_value(val) + if idval is not None: + return idval + return self._idval_from_argname(argname, idx) + + def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None: + """Try to make an ID for a parameter in a ParameterSet using the + user-provided id callable, if given.""" + if self.idfn is None: + return None + try: + id = self.idfn(val) + except Exception as e: + prefix = f"{self.nodeid}: " if self.nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + if id is None: + return None + return self._idval_from_value(id) + + def _idval_from_hook(self, val: object, argname: str) -> str | None: + """Try to make an ID for a parameter in a ParameterSet by calling the + :hook:`pytest_make_parametrize_id` hook.""" + if self.config: + id: str | None = self.config.hook.pytest_make_parametrize_id( + config=self.config, val=val, argname=argname + ) + return id + return None + + def _idval_from_value(self, val: object) -> str | None: + """Try to make an ID for a parameter in a ParameterSet from its value, + if the value type is supported.""" + if isinstance(val, (str, bytes)): + return _ascii_escaped_by_config(val, self.config) + elif val is None or isinstance(val, (float, int, bool, complex)): + return str(val) + elif isinstance(val, re.Pattern): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return None + + def _idval_from_value_required(self, val: object, idx: int) -> str: + """Like _idval_from_value(), but fails if the type is not supported.""" + id = self._idval_from_value(val) + if id is not None: + return id + + # Fail. + prefix = self._make_error_prefix() + msg = ( + f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " + "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." + ) + fail(msg, pytrace=False) + + @staticmethod + def _idval_from_argname(argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet from the argument name + and the index of the ParameterSet.""" + return str(argname) + str(idx) + + def _complain_multiple_hidden_parameter_sets(self) -> NoReturn: + fail( + f"{self._make_error_prefix()}multiple instances of HIDDEN_PARAM " + "cannot be used in the same parametrize call, " + "because the tests names need to be unique." + ) + + def _make_error_prefix(self) -> str: + if self.func_name is not None: + return f"In {self.func_name}: " + elif self.nodeid is not None: + return f"In {self.nodeid}: " + else: + return "" + + +@final +@dataclasses.dataclass(frozen=True) +class CallSpec2: + """A planned parameterized invocation of a test function. + + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ + + # arg name -> arg value which will be passed to a fixture or pseudo-fixture + # of the same name. (indirect or direct parametrization respectively) + params: dict[str, object] = dataclasses.field(default_factory=dict) + # arg name -> arg index. + indices: dict[str, int] = dataclasses.field(default_factory=dict) + # arg name -> parameter scope. + # Used for sorting parametrized resources. + _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: Sequence[str] = dataclasses.field(default_factory=tuple) + # Marks which will be applied to the item. + marks: list[Mark] = dataclasses.field(default_factory=list) + + def setmulti( + self, + *, + argnames: Iterable[str], + valset: Iterable[object], + id: str | _HiddenParam, + marks: Iterable[Mark | MarkDecorator], + scope: Scope, + param_index: int, + nodeid: str, + ) -> CallSpec2: + params = self.params.copy() + indices = self.indices.copy() + arg2scope = dict(self._arg2scope) + for arg, val in zip(argnames, valset): + if arg in params: + raise nodes.Collector.CollectError( + f"{nodeid}: duplicate parametrization of {arg!r}" + ) + params[arg] = val + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + params=params, + indices=indices, + _arg2scope=arg2scope, + _idlist=self._idlist if id is HIDDEN_PARAM else [*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) + + +def get_direct_param_fixture_func(request: FixtureRequest) -> Any: + return request.param + + +# Used for storing pseudo fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[dict[str, FixtureDef[Any]]]() + + +@final +class Metafunc: + """Objects passed to the :hook:`pytest_generate_tests` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: FunctionDefinition, + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`pytest.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + # Result of parametrize(). + self._calls: list[CallSpec2] = [] + + self._params_directness: dict[str, Literal["indirect", "direct"]] = {} + + def parametrize( + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + indirect: bool | Sequence[str] = False, + ids: Iterable[object | None] | Callable[[Any], object | None] | None = None, + scope: _ScopeName | None = None, + *, + _param_mark: Mark | None = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting ``indirect`` to do it at test setup time instead. + + Can be called multiple times per test function (but only on different + argument names), in which case each call parametrizes all previous + parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + .. versionadded:: 8.4 + :ref:`hidden-param` means to hide the parameter set + from the test name. Can only be used at most 1 time, as + test names need to be unique. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + nodeid = self.definition.nodeid + + argnames, parametersets = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + f"{nodeid}: 'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_parameter_set_ids( + argnames, ids, parametersets, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + # Calculate directness. + arg_directness = self._resolve_args_directness(argnames, indirect) + self._params_directness.update(arg_directness) + + # Add direct parametrizations as fixturedefs to arg2fixturedefs by + # registering artificial "pseudo" FixtureDef's such that later at test + # setup time we can rely on FixtureDefs to exist for all argnames. + node = None + # For scopes higher than function, a "pseudo" FixtureDef might have + # already been created for the scope. We thus store and cache the + # FixtureDef on the node related to the scope. + if scope_ is Scope.Function: + name2pseudofixturedef = None + else: + collector = self.definition.parent + assert collector is not None + node = get_scope_node(collector, scope_) + if node is None: + # If used class scope and there is no class, use module-level + # collector (for now). + if scope_ is Scope.Class: + assert isinstance(collector, Module) + node = collector + # If used package scope and there is no package, use session + # (for now). + elif scope_ is Scope.Package: + node = collector.session + else: + assert False, f"Unhandled missing scope: {scope}" + default: dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + for argname in argnames: + if arg_directness[argname] == "indirect": + continue + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + fixturedef = name2pseudofixturedef[argname] + else: + fixturedef = FixtureDef( + config=self.config, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=scope_, + params=None, + ids=None, + _ispytest=True, + ) + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + self._arg2fixturedefs[argname] = [fixturedef] + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2()]: + for param_index, (param_id, param_set) in enumerate( + zip(ids, parametersets) + ): + newcallspec = callspec.setmulti( + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, + nodeid=nodeid, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_parameter_set_ids( + self, + argnames: Sequence[str], + ids: Iterable[object | None] | Callable[[Any], object | None] | None, + parametersets: Sequence[ParameterSet], + nodeid: str, + ) -> list[str | _HiddenParam]: + """Resolve the actual ids for the given parameter sets. + + :param argnames: + Argument names passed to ``parametrize()``. + :param ids: + The `ids` parameter of the ``parametrize()`` call (see docs). + :param parametersets: + The parameter sets, each containing a set of values corresponding + to ``argnames``. + :param nodeid str: + The nodeid of the definition item that generated this + parametrization. + :returns: + List with ids for each parameter set given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parametersets, self.function.__name__) + id_maker = IdMaker( + argnames, + parametersets, + idfn, + ids_, + self.config, + nodeid=nodeid, + func_name=self.function.__name__, + ) + return id_maker.make_unique_parameterset_ids() + + def _validate_ids( + self, + ids: Iterable[object | None], + parametersets: Sequence[ParameterSet], + func_name: str, + ) -> list[object | None]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parametersets) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parametersets) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) + + return list(itertools.islice(ids, num_ids)) + + def _resolve_args_directness( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> dict[str, Literal["indirect", "direct"]]: + """Resolve if each parametrized argument must be considered an indirect + parameter to a fixture of the same name, or a direct parameter to the + parametrized function, based on the ``indirect`` parameter of the + parametrized() call. + + :param argnames: + List of argument names passed to ``parametrize()``. + :param indirect: + Same as the ``indirect`` parameter of ``parametrize()``. + :returns + A dict mapping each arg name to either "indirect" or "direct". + """ + arg_directness: dict[str, Literal["indirect", "direct"]] + if isinstance(indirect, bool): + arg_directness = dict.fromkeys( + argnames, "indirect" if indirect else "direct" + ) + elif isinstance(indirect, Sequence): + arg_directness = dict.fromkeys(argnames, "direct") + for arg in indirect: + if arg not in argnames: + fail( + f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist", + pytrace=False, + ) + arg_directness[arg] = "indirect" + else: + fail( + f"In {self.function.__name__}: expected Sequence or boolean" + f" for indirect, got {type(indirect).__name__}", + pytrace=False, + ) + return arg_directness + + def _validate_if_using_arg_names( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + f"In {func_name}: function already takes an argument '{arg}' with a default value", + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + def _recompute_direct_params_indices(self) -> None: + for argname, param_type in self._params_directness.items(): + if param_type == "direct": + for i, callspec in enumerate(self._calls): + callspec.indices[argname] = i + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: bool | Sequence[str], +) -> Scope: + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[-1]._scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) + + return Scope.Function + + +def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +class Function(PyobjMixin, nodes.Item): + """Item responsible for setting up and executing a Python test function. + + :param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + :param parent: + The parent Node. + :param config: + The pytest Config object. + :param callspec: + If given, this function has been parametrized and the callspec contains + meta information about the parametrization. + :param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + :param keywords: + Keywords bound to the function object for "-k" matching. + :param session: + The pytest Session object. + :param fixtureinfo: + Fixture information already resolved at this fixture node.. + :param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Config | None = None, + callspec: CallSpec2 | None = None, + callobj=NOTSET, + keywords: Mapping[str, Any] | None = None, + session: Session | None = None, + fixtureinfo: FuncFixtureInfo | None = None, + originalname: str | None = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self._obj = callobj + self._instance = getattr(callobj, "__self__", None) + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + self.own_markers.extend(callspec.marks) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + # Note: the order of the updates is important here; indicates what + # takes priority (ctor argument over function attributes over markers). + # Take own_markers only; NodeKeywords handles parent traversal on its own. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + self.keywords.update(self.obj.__dict__) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + # todo: determine sound type limitations + @classmethod + def from_parent(cls, parent, **kw) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = fixtures.TopRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + @property + def instance(self): + try: + return self._instance + except AttributeError: + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + self._instance = self._getinstance() + else: + self._instance = None + return self._instance + + def _getinstance(self): + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + return self.parent.newinstance() + else: + return None + + def _getobj(self): + instance = self.instance + if instance is not None: + parent_obj = instance + else: + assert self.parent is not None + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + self._request._fillfixtures() + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + ntraceback = ntraceback.filter(excinfo) + + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(ntraceback) > 2: + ntraceback = Traceback( + ( + ntraceback[0], + *(t.with_repr_style("short") for t in ntraceback[1:-1]), + ntraceback[-1], + ) + ) + + return ntraceback + return excinfo.traceback + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """This class is a stop gap solution until we evolve to have actual function + definition nodes and manage to get rid of ``metafunc``.""" + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/.venv/lib/python3.11/site-packages/_pytest/python_api.py b/.venv/lib/python3.11/site-packages/_pytest/python_api.py new file mode 100644 index 0000000..74346c3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/python_api.py @@ -0,0 +1,809 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Collection +from collections.abc import Mapping +from collections.abc import Sequence +from collections.abc import Sized +from decimal import Decimal +import math +from numbers import Complex +import pprint +import sys +from typing import Any +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from numpy import ndarray + + +def _compare_approx( + full_object: object, + message_data: Sequence[tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> list[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def _repr_compare(self, other_side: Any) -> list[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> ApproxScalar: + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + + +def _recursive_sequence_map(f, x): + """Recursively map a function over a sequence of arbitrary depth""" + if isinstance(x, (list, tuple)): + seq_type = type(x) + return seq_type(_recursive_sequence_map(f, xi) for xi in x) + elif _is_sequence_like(x): + return [_recursive_sequence_map(f, xi) for xi in x] + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + return f"approx({list_scalars!r})" + + def _repr_compare(self, other_side: ndarray | list[Any]) -> list[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: list[Any], nd_index: tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_seq = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + + # convert other_side to numpy array to ensure shape attribute is available + other_side_as_array = _as_numpy_array(other_side) + assert other_side_as_array is not None + + if np_array_shape != other_side_as_array.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side_as_array.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_seq, index) + other_value = get_value_from_nested_list(other_side_as_array, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side_as_array, index)), + str(get_value_from_nested_list(approx_side_as_seq, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return f"approx({ ({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})" + + def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]: + import math + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values() + ): + if approx_value != other_value: + if approx_value.expected is not None and other_value is not None: + try: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + if approx_value.expected == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max( + max_rel_diff, + abs( + (approx_value.expected - other_value) + / approx_value.expected + ), + ) + except ZeroDivisionError: + pass + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequenceLike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list): + seq_type = list + return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})" + + def _repr_compare(self, other_side: Sequence[float]) -> list[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side) + ): + if approx_value != other_value: + try: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + # Ignore non-numbers for the diff calculations (#13012). + except TypeError: + pass + else: + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: float | Decimal = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: float | Decimal = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if ( + isinstance(self.expected, bool) + or (not isinstance(self.expected, (Complex, Decimal))) + or math.isinf(abs(self.expected) or isinstance(self.expected, bool)) + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + if 1e-3 <= self.tolerance < 1e3: + vetted_tolerance = f"{self.tolerance:n}" + else: + vetted_tolerance = f"{self.tolerance:.1e}" + + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + + def is_bool(val: Any) -> bool: + # Check if `val` is a native bool or numpy bool. + if isinstance(val, bool): + return True + if np := sys.modules.get("numpy"): + return isinstance(val, np.bool_) + return False + + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality, except for bool and np.bool_ + if is_bool(self.expected) and not is_bool(actual): + return False + elif actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. Also, consider bool to be + # non-numeric, even though it has the required arithmetic. + if is_bool(self.expected) or not ( + isinstance(self.expected, (Complex, Decimal)) + and isinstance(actual, (Complex, Decimal)) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {relative_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + def __repr__(self) -> str: + if isinstance(self.rel, float): + rel = Decimal.from_float(self.rel) + else: + rel = self.rel + + if isinstance(self.abs, float): + abs_ = Decimal.from_float(self.abs) + else: + abs_ = self.abs + + tol_str = "???" + if rel is not None and Decimal("1e-3") <= rel <= Decimal("1e3"): + tol_str = f"{rel:.1e}" + elif abs_ is not None: + tol_str = f"{abs_:.1e}" + + return f"{self.expected} ± {tol_str}" + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two ordered sequences of numbers) are equal to each other + within some tolerance. + + Due to the :doc:`python:tutorial/floatingpoint`, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for ordered sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + Only ordered sequences are supported, because ``approx`` needs + to infer the relative position of the sequences without ambiguity. This means + ``sets`` and other unordered sequences are not supported. + + Finally, dictionary *values* can also be compared:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + The comparison will be true if both mappings have the same keys and their + respective values match the expected tolerances. + + **Tolerances** + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + **Non-numeric types** + + You can also use ``approx`` to compare non-numeric types, or dicts and + sequences containing non-numeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. More information: :py:func:`math.isclose`. + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + + To match strings using regex, you can use + `Matches `_ + from the + `re_assert package `_. + + + .. note:: + + Unlike built-in equality, this function considers + booleans unequal to numeric zero or one. For example:: + + >>> 1 == approx(True) + False + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, :py:exc:`TypeError` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. More information: :py:meth:`object.__ge__` + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of non-numeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for non-numeric types instead + of raising ``TypeError``. + """ + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif _is_sequence_like(expected): + cls = ApproxSequenceLike + elif isinstance(expected, Collection) and not isinstance(expected, (str, bytes)): + msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}" + raise TypeError(msg) + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_sequence_like(expected: object) -> bool: + return ( + hasattr(expected, "__getitem__") + and isinstance(expected, Sized) + and not isinstance(expected, (str, bytes)) + ) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> ndarray | None: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None diff --git a/.venv/lib/python3.11/site-packages/_pytest/raises.py b/.venv/lib/python3.11/site-packages/_pytest/raises.py new file mode 100644 index 0000000..78fae6d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/raises.py @@ -0,0 +1,1519 @@ +from __future__ import annotations + +from abc import ABC +from abc import abstractmethod +import re +from re import Pattern +import sys +from textwrap import indent +from typing import Any +from typing import cast +from typing import final +from typing import Generic +from typing import get_args +from typing import get_origin +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import warnings + +from _pytest._code import ExceptionInfo +from _pytest._code.code import stringify_exception +from _pytest.outcomes import fail +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from collections.abc import Callable + from collections.abc import Sequence + + # for some reason Sphinx does not play well with 'from types import TracebackType' + import types + + from typing_extensions import ParamSpec + from typing_extensions import TypeGuard + from typing_extensions import TypeVar + + P = ParamSpec("P") + + # this conditional definition is because we want to allow a TypeVar default + BaseExcT_co_default = TypeVar( + "BaseExcT_co_default", + bound=BaseException, + default=BaseException, + covariant=True, + ) + + # Use short name because it shows up in docs. + E = TypeVar("E", bound=BaseException, default=BaseException) +else: + from typing import TypeVar + + BaseExcT_co_default = TypeVar( + "BaseExcT_co_default", bound=BaseException, covariant=True + ) + +# RaisesGroup doesn't work with a default. +BaseExcT_co = TypeVar("BaseExcT_co", bound=BaseException, covariant=True) +BaseExcT_1 = TypeVar("BaseExcT_1", bound=BaseException) +BaseExcT_2 = TypeVar("BaseExcT_2", bound=BaseException) +ExcT_1 = TypeVar("ExcT_1", bound=Exception) +ExcT_2 = TypeVar("ExcT_2", bound=Exception) + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + from exceptiongroup import ExceptionGroup + + +# String patterns default to including the unicode flag. +_REGEX_NO_FLAGS = re.compile(r"").flags + + +# pytest.raises helper +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + *, + match: str | re.Pattern[str] | None = ..., + check: Callable[[E], bool] = ..., +) -> RaisesExc[E]: ... + + +@overload +def raises( + *, + match: str | re.Pattern[str], + # If exception_type is not provided, check() must do any typechecks itself. + check: Callable[[BaseException], bool] = ..., +) -> RaisesExc[BaseException]: ... + + +@overload +def raises(*, check: Callable[[BaseException], bool]) -> RaisesExc[BaseException]: ... + + +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> ExceptionInfo[E]: ... + + +def raises( + expected_exception: type[E] | tuple[type[E], ...] | None = None, + *args: Any, + **kwargs: Any, +) -> RaisesExc[BaseException] | ExceptionInfo[E]: + r"""Assert that a code block/function call raises an exception type, or one of its subclasses. + + :param expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. Note that subclasses of the passed exceptions + will also match. + + This is not a required parameter, you may opt to only use ``match`` and/or + ``check`` for verifying the raised exception. + + :kwparam str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its :pep:`678` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + (This is only used when ``pytest.raises`` is used as a context manager, + and passed through to the function otherwise. + When using ``pytest.raises`` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + :kwparam Callable[[BaseException], bool] check: + + .. versionadded:: 8.4 + + If specified, a callable that will be called with the exception as a parameter + after checking the type and the match regex if specified. + If it returns ``True`` it will be considered a match, if not it will + be considered a failed match. + + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type, or any of its subclasses:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The ``match`` argument searches the formatted exception string, which includes any + `PEP-678 `__ ``__notes__``: + + >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP + ... e = ValueError("value must be 42") + ... e.add_note("had a note added") + ... raise e + + The ``check`` argument, if provided, must return True when passed the raised exception + for the match to be successful, otherwise an :exc:`AssertionError` is raised. + + >>> import errno + >>> with pytest.raises(OSError, check=lambda e: e.errno == errno.EACCES): + ... raise OSError(errno.EACCES, "no permission to view") + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. warning:: + + Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this:: + + # Careful, this will catch ANY exception raised. + with pytest.raises(Exception): + some_function() + + Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide + real bugs, where the user wrote this expecting a specific exception, but some other exception is being + raised due to a bug introduced during a refactoring. + + Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch + **any** exception raised. + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # This will not execute. + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Expecting exception groups** + + When expecting exceptions wrapped in :exc:`BaseExceptionGroup` or + :exc:`ExceptionGroup`, you should instead use :class:`pytest.RaisesGroup`. + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + .. seealso:: + + :ref:`assertraises` for more examples and detailed discussion. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if not args: + if set(kwargs) - {"match", "check", "expected_exception"}: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + + if expected_exception is None: + return RaisesExc(**kwargs) + return RaisesExc(expected_exception, **kwargs) + + if not expected_exception: + raise ValueError( + f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. " + f"Raising exceptions is already understood as failing the test, so you don't need " + f"any special code to say 'this should never raise an exception'." + ) + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with RaisesExc(expected_exception) as excinfo: + func(*args[1:], **kwargs) + try: + return excinfo + finally: + del excinfo + + +# note: RaisesExc/RaisesGroup uses fail() internally, so this alias +# indicates (to [internal] plugins?) that `pytest.raises` will +# raise `_pytest.outcomes.Failed`, where +# `outcomes.Failed is outcomes.fail.Exception is raises.Exception` +# note: this is *not* the same as `_pytest.main.Failed` +# note: mypy does not recognize this attribute, and it's not possible +# to use a protocol/decorator like the others in outcomes due to +# https://github.com/python/mypy/issues/18715 +raises.Exception = fail.Exception # type: ignore[attr-defined] + + +def _match_pattern(match: Pattern[str]) -> str | Pattern[str]: + """Helper function to remove redundant `re.compile` calls when printing regex""" + return match.pattern if match.flags == _REGEX_NO_FLAGS else match + + +def repr_callable(fun: Callable[[BaseExcT_1], bool]) -> str: + """Get the repr of a ``check`` parameter. + + Split out so it can be monkeypatched (e.g. by hypothesis) + """ + return repr(fun) + + +def backquote(s: str) -> str: + return "`" + s + "`" + + +def _exception_type_name( + e: type[BaseException] | tuple[type[BaseException], ...], +) -> str: + if isinstance(e, type): + return e.__name__ + if len(e) == 1: + return e[0].__name__ + return "(" + ", ".join(ee.__name__ for ee in e) + ")" + + +def _check_raw_type( + expected_type: type[BaseException] | tuple[type[BaseException], ...] | None, + exception: BaseException, +) -> str | None: + if expected_type is None or expected_type == (): + return None + + if not isinstance( + exception, + expected_type, + ): + actual_type_str = backquote(_exception_type_name(type(exception)) + "()") + expected_type_str = backquote(_exception_type_name(expected_type)) + if ( + isinstance(exception, BaseExceptionGroup) + and isinstance(expected_type, type) + and not issubclass(expected_type, BaseExceptionGroup) + ): + return f"Unexpected nested {actual_type_str}, expected {expected_type_str}" + return f"{actual_type_str} is not an instance of {expected_type_str}" + return None + + +def is_fully_escaped(s: str) -> bool: + # we know we won't compile with re.VERBOSE, so whitespace doesn't need to be escaped + metacharacters = "{}()+.*?^$[]" + return not any( + c in metacharacters and (i == 0 or s[i - 1] != "\\") for (i, c) in enumerate(s) + ) + + +def unescape(s: str) -> str: + return re.sub(r"\\([{}()+-.*?^$\[\]\s\\])", r"\1", s) + + +# These classes conceptually differ from ExceptionInfo in that ExceptionInfo is tied, and +# constructed from, a particular exception - whereas these are constructed with expected +# exceptions, and later allow matching towards particular exceptions. +# But there's overlap in `ExceptionInfo.match` and `AbstractRaises._check_match`, as with +# `AbstractRaises.matches` and `ExceptionInfo.errisinstance`+`ExceptionInfo.group_contains`. +# The interaction between these classes should perhaps be improved. +class AbstractRaises(ABC, Generic[BaseExcT_co]): + """ABC with common functionality shared between RaisesExc and RaisesGroup""" + + def __init__( + self, + *, + match: str | Pattern[str] | None, + check: Callable[[BaseExcT_co], bool] | None, + ) -> None: + if isinstance(match, str): + # juggle error in order to avoid context to fail (necessary?) + re_error = None + try: + self.match: Pattern[str] | None = re.compile(match) + except re.error as e: + re_error = e + if re_error is not None: + fail(f"Invalid regex pattern provided to 'match': {re_error}") + if match == "": + warnings.warn( + PytestWarning( + "matching against an empty string will *always* pass. If you want " + "to check for an empty message you need to pass '^$'. If you don't " + "want to match you should pass `None` or leave out the parameter." + ), + stacklevel=2, + ) + else: + self.match = match + + # check if this is a fully escaped regex and has ^$ to match fully + # in which case we can do a proper diff on error + self.rawmatch: str | None = None + if isinstance(match, str) or ( + isinstance(match, Pattern) and match.flags == _REGEX_NO_FLAGS + ): + if isinstance(match, Pattern): + match = match.pattern + if ( + match + and match[0] == "^" + and match[-1] == "$" + and is_fully_escaped(match[1:-1]) + ): + self.rawmatch = unescape(match[1:-1]) + + self.check = check + self._fail_reason: str | None = None + + # used to suppress repeated printing of `repr(self.check)` + self._nested: bool = False + + # set in self._parse_exc + self.is_baseexception = False + + def _parse_exc( + self, exc: type[BaseExcT_1] | types.GenericAlias, expected: str + ) -> type[BaseExcT_1]: + if isinstance(exc, type) and issubclass(exc, BaseException): + if not issubclass(exc, Exception): + self.is_baseexception = True + return exc + # because RaisesGroup does not support variable number of exceptions there's + # still a use for RaisesExc(ExceptionGroup[Exception]). + origin_exc: type[BaseException] | None = get_origin(exc) + if origin_exc and issubclass(origin_exc, BaseExceptionGroup): + exc_type = get_args(exc)[0] + if ( + issubclass(origin_exc, ExceptionGroup) and exc_type in (Exception, Any) + ) or ( + issubclass(origin_exc, BaseExceptionGroup) + and exc_type in (BaseException, Any) + ): + if not isinstance(exc, Exception): + self.is_baseexception = True + return cast(type[BaseExcT_1], origin_exc) + else: + raise ValueError( + f"Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseException]` " + f"are accepted as generic types but got `{exc}`. " + f"As `raises` will catch all instances of the specified group regardless of the " + f"generic argument specific nested exceptions has to be checked " + f"with `RaisesGroup`." + ) + # unclear if the Type/ValueError distinction is even helpful here + msg = f"expected exception must be {expected}, not " + if isinstance(exc, type): + raise ValueError(msg + f"{exc.__name__!r}") + if isinstance(exc, BaseException): + raise TypeError(msg + f"an exception instance ({type(exc).__name__})") + raise TypeError(msg + repr(type(exc).__name__)) + + @property + def fail_reason(self) -> str | None: + """Set after a call to :meth:`matches` to give a human-readable reason for why the match failed. + When used as a context manager the string will be printed as the reason for the + test failing.""" + return self._fail_reason + + def _check_check( + self: AbstractRaises[BaseExcT_1], + exception: BaseExcT_1, + ) -> bool: + if self.check is None: + return True + + if self.check(exception): + return True + + check_repr = "" if self._nested else " " + repr_callable(self.check) + self._fail_reason = f"check{check_repr} did not return True" + return False + + # TODO: harmonize with ExceptionInfo.match + def _check_match(self, e: BaseException) -> bool: + if self.match is None or re.search( + self.match, + stringified_exception := stringify_exception( + e, include_subexception_msg=False + ), + ): + return True + + # if we're matching a group, make sure we're explicit to reduce confusion + # if they're trying to match an exception contained within the group + maybe_specify_type = ( + f" the `{_exception_type_name(type(e))}()`" + if isinstance(e, BaseExceptionGroup) + else "" + ) + if isinstance(self.rawmatch, str): + # TODO: it instructs to use `-v` to print leading text, but that doesn't work + # I also don't know if this is the proper entry point, or tool to use at all + from _pytest.assertion.util import _diff_text + from _pytest.assertion.util import dummy_highlighter + + diff = _diff_text(self.rawmatch, stringified_exception, dummy_highlighter) + self._fail_reason = ("\n" if diff[0][0] == "-" else "") + "\n".join(diff) + return False + + # I don't love "Regex"+"Input" vs something like "expected regex"+"exception message" + # when they're similar it's not always obvious which is which + self._fail_reason = ( + f"Regex pattern did not match{maybe_specify_type}.\n" + f" Regex: {_match_pattern(self.match)!r}\n" + f" Input: {stringified_exception!r}" + ) + if _match_pattern(self.match) == stringified_exception: + self._fail_reason += "\n Did you mean to `re.escape()` the regex?" + return False + + @abstractmethod + def matches( + self: AbstractRaises[BaseExcT_1], exception: BaseException + ) -> TypeGuard[BaseExcT_1]: + """Check if an exception matches the requirements of this AbstractRaises. + If it fails, :meth:`AbstractRaises.fail_reason` should be set. + """ + + +@final +class RaisesExc(AbstractRaises[BaseExcT_co_default]): + """ + .. versionadded:: 8.4 + + + This is the class constructed when calling :func:`pytest.raises`, but may be used + directly as a helper class with :class:`RaisesGroup` when you want to specify + requirements on sub-exceptions. + + You don't need this if you only want to specify the type, since :class:`RaisesGroup` + accepts ``type[BaseException]``. + + :param type[BaseException] | tuple[type[BaseException]] | None expected_exception: + The expected type, or one of several possible types. + May be ``None`` in order to only make use of ``match`` and/or ``check`` + + The type is checked with :func:`isinstance`, and does not need to be an exact match. + If that is wanted you can use the ``check`` parameter. + + :kwparam str | Pattern[str] match: + A regex to match. + + :kwparam Callable[[BaseException], bool] check: + If specified, a callable that will be called with the exception as a parameter + after checking the type and the match regex if specified. + If it returns ``True`` it will be considered a match, if not it will + be considered a failed match. + + :meth:`RaisesExc.matches` can also be used standalone to check individual exceptions. + + Examples:: + + with RaisesGroup(RaisesExc(ValueError, match="string")) + ... + with RaisesGroup(RaisesExc(check=lambda x: x.args == (3, "hello"))): + ... + with RaisesGroup(RaisesExc(check=lambda x: type(x) is ValueError)): + ... + """ + + # Trio bundled hypothesis monkeypatching, we will probably instead assume that + # hypothesis will handle that in their pytest plugin by the time this is released. + # Alternatively we could add a version of get_pretty_function_description ourselves + # https://github.com/HypothesisWorks/hypothesis/blob/8ced2f59f5c7bea3344e35d2d53e1f8f8eb9fcd8/hypothesis-python/src/hypothesis/internal/reflection.py#L439 + + # At least one of the three parameters must be passed. + @overload + def __init__( + self, + expected_exception: ( + type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] + ), + /, + *, + match: str | Pattern[str] | None = ..., + check: Callable[[BaseExcT_co_default], bool] | None = ..., + ) -> None: ... + + @overload + def __init__( + self: RaisesExc[BaseException], # Give E a value. + /, + *, + match: str | Pattern[str] | None, + # If exception_type is not provided, check() must do any typechecks itself. + check: Callable[[BaseException], bool] | None = ..., + ) -> None: ... + + @overload + def __init__(self, /, *, check: Callable[[BaseException], bool]) -> None: ... + + def __init__( + self, + expected_exception: ( + type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] | None + ) = None, + /, + *, + match: str | Pattern[str] | None = None, + check: Callable[[BaseExcT_co_default], bool] | None = None, + ): + super().__init__(match=match, check=check) + if isinstance(expected_exception, tuple): + expected_exceptions = expected_exception + elif expected_exception is None: + expected_exceptions = () + else: + expected_exceptions = (expected_exception,) + + if (expected_exceptions == ()) and match is None and check is None: + raise ValueError("You must specify at least one parameter to match on.") + + self.expected_exceptions = tuple( + self._parse_exc(e, expected="a BaseException type") + for e in expected_exceptions + ) + + self._just_propagate = False + + def matches( + self, + exception: BaseException | None, + ) -> TypeGuard[BaseExcT_co_default]: + """Check if an exception matches the requirements of this :class:`RaisesExc`. + If it fails, :attr:`RaisesExc.fail_reason` will be set. + + Examples:: + + assert RaisesExc(ValueError).matches(my_exception): + # is equivalent to + assert isinstance(my_exception, ValueError) + + # this can be useful when checking e.g. the ``__cause__`` of an exception. + with pytest.raises(ValueError) as excinfo: + ... + assert RaisesExc(SyntaxError, match="foo").matches(excinfo.value.__cause__) + # above line is equivalent to + assert isinstance(excinfo.value.__cause__, SyntaxError) + assert re.search("foo", str(excinfo.value.__cause__) + + """ + self._just_propagate = False + if exception is None: + self._fail_reason = "exception is None" + return False + if not self._check_type(exception): + self._just_propagate = True + return False + + if not self._check_match(exception): + return False + + return self._check_check(exception) + + def __repr__(self) -> str: + parameters = [] + if self.expected_exceptions: + parameters.append(_exception_type_name(self.expected_exceptions)) + if self.match is not None: + # If no flags were specified, discard the redundant re.compile() here. + parameters.append( + f"match={_match_pattern(self.match)!r}", + ) + if self.check is not None: + parameters.append(f"check={repr_callable(self.check)}") + return f"RaisesExc({', '.join(parameters)})" + + def _check_type(self, exception: BaseException) -> TypeGuard[BaseExcT_co_default]: + self._fail_reason = _check_raw_type(self.expected_exceptions, exception) + return self._fail_reason is None + + def __enter__(self) -> ExceptionInfo[BaseExcT_co_default]: + self.excinfo: ExceptionInfo[BaseExcT_co_default] = ExceptionInfo.for_later() + return self.excinfo + + # TODO: move common code into superclass + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + if not self.expected_exceptions: + fail("DID NOT RAISE any exception") + if len(self.expected_exceptions) > 1: + fail(f"DID NOT RAISE any of {self.expected_exceptions!r}") + + fail(f"DID NOT RAISE {self.expected_exceptions[0]!r}") + + assert self.excinfo is not None, ( + "Internal error - should have been constructed in __enter__" + ) + + if not self.matches(exc_val): + if self._just_propagate: + return False + raise AssertionError(self._fail_reason) + + # Cast to narrow the exception type now that it's verified.... + # even though the TypeGuard in self.matches should be narrowing + exc_info = cast( + "tuple[type[BaseExcT_co_default], BaseExcT_co_default, types.TracebackType]", + (exc_type, exc_val, exc_tb), + ) + self.excinfo.fill_unfilled(exc_info) + return True + + +@final +class RaisesGroup(AbstractRaises[BaseExceptionGroup[BaseExcT_co]]): + """ + .. versionadded:: 8.4 + + Contextmanager for checking for an expected :exc:`ExceptionGroup`. + This works similar to :func:`pytest.raises`, but allows for specifying the structure of an :exc:`ExceptionGroup`. + :meth:`ExceptionInfo.group_contains` also tries to handle exception groups, + but it is very bad at checking that you *didn't* get unexpected exceptions. + + The catching behaviour differs from :ref:`except* `, being much + stricter about the structure by default. + By using ``allow_unwrapped=True`` and ``flatten_subgroups=True`` you can match + :ref:`except* ` fully when expecting a single exception. + + :param args: + Any number of exception types, :class:`RaisesGroup` or :class:`RaisesExc` + to specify the exceptions contained in this exception. + All specified exceptions must be present in the raised group, *and no others*. + + If you expect a variable number of exceptions you need to use + :func:`pytest.raises(ExceptionGroup) ` and manually check + the contained exceptions. Consider making use of :meth:`RaisesExc.matches`. + + It does not care about the order of the exceptions, so + ``RaisesGroup(ValueError, TypeError)`` + is equivalent to + ``RaisesGroup(TypeError, ValueError)``. + :kwparam str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception group and its :pep:`678` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + Note that " (5 subgroups)" will be stripped from the ``repr`` before matching. + :kwparam Callable[[E], bool] check: + If specified, a callable that will be called with the group as a parameter + after successfully matching the expected exceptions. If it returns ``True`` + it will be considered a match, if not it will be considered a failed match. + :kwparam bool allow_unwrapped: + If expecting a single exception or :class:`RaisesExc` it will match even + if the exception is not inside an exceptiongroup. + + Using this together with ``match``, ``check`` or expecting multiple exceptions + will raise an error. + :kwparam bool flatten_subgroups: + "flatten" any groups inside the raised exception group, extracting all exceptions + inside any nested groups, before matching. Without this it expects you to + fully specify the nesting structure by passing :class:`RaisesGroup` as expected + parameter. + + Examples:: + + with RaisesGroup(ValueError): + raise ExceptionGroup("", (ValueError(),)) + # match + with RaisesGroup( + ValueError, + ValueError, + RaisesExc(TypeError, match="^expected int$"), + match="^my group$", + ): + raise ExceptionGroup( + "my group", + [ + ValueError(), + TypeError("expected int"), + ValueError(), + ], + ) + # check + with RaisesGroup( + KeyboardInterrupt, + match="^hello$", + check=lambda x: isinstance(x.__cause__, ValueError), + ): + raise BaseExceptionGroup("hello", [KeyboardInterrupt()]) from ValueError + # nested groups + with RaisesGroup(RaisesGroup(ValueError)): + raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),)) + + # flatten_subgroups + with RaisesGroup(ValueError, flatten_subgroups=True): + raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),)) + + # allow_unwrapped + with RaisesGroup(ValueError, allow_unwrapped=True): + raise ValueError + + + :meth:`RaisesGroup.matches` can also be used directly to check a standalone exception group. + + + The matching algorithm is greedy, which means cases such as this may fail:: + + with RaisesGroup(ValueError, RaisesExc(ValueError, match="hello")): + raise ExceptionGroup("", (ValueError("hello"), ValueError("goodbye"))) + + even though it generally does not care about the order of the exceptions in the group. + To avoid the above you should specify the first :exc:`ValueError` with a :class:`RaisesExc` as well. + + .. note:: + When raised exceptions don't match the expected ones, you'll get a detailed error + message explaining why. This includes ``repr(check)`` if set, which in Python can be + overly verbose, showing memory locations etc etc. + + If installed and imported (in e.g. ``conftest.py``), the ``hypothesis`` library will + monkeypatch this output to provide shorter & more readable repr's. + """ + + # allow_unwrapped=True requires: singular exception, exception not being + # RaisesGroup instance, match is None, check is None + @overload + def __init__( + self, + expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co], + /, + *, + allow_unwrapped: Literal[True], + flatten_subgroups: bool = False, + ) -> None: ... + + # flatten_subgroups = True also requires no nested RaisesGroup + @overload + def __init__( + self, + expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co], + /, + *other_exceptions: type[BaseExcT_co] | RaisesExc[BaseExcT_co], + flatten_subgroups: Literal[True], + match: str | Pattern[str] | None = None, + check: Callable[[BaseExceptionGroup[BaseExcT_co]], bool] | None = None, + ) -> None: ... + + # simplify the typevars if possible (the following 3 are equivalent but go simpler->complicated) + # ... the first handles RaisesGroup[ValueError], the second RaisesGroup[ExceptionGroup[ValueError]], + # the third RaisesGroup[ValueError | ExceptionGroup[ValueError]]. + # ... otherwise, we will get results like RaisesGroup[ValueError | ExceptionGroup[Never]] (I think) + # (technically correct but misleading) + @overload + def __init__( + self: RaisesGroup[ExcT_1], + expected_exception: type[ExcT_1] | RaisesExc[ExcT_1], + /, + *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1], + match: str | Pattern[str] | None = None, + check: Callable[[ExceptionGroup[ExcT_1]], bool] | None = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[ExceptionGroup[ExcT_2]], + expected_exception: RaisesGroup[ExcT_2], + /, + *other_exceptions: RaisesGroup[ExcT_2], + match: str | Pattern[str] | None = None, + check: Callable[[ExceptionGroup[ExceptionGroup[ExcT_2]]], bool] | None = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[ExcT_1 | ExceptionGroup[ExcT_2]], + expected_exception: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2], + /, + *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2], + match: str | Pattern[str] | None = None, + check: ( + Callable[[ExceptionGroup[ExcT_1 | ExceptionGroup[ExcT_2]]], bool] | None + ) = None, + ) -> None: ... + + # same as the above 3 but handling BaseException + @overload + def __init__( + self: RaisesGroup[BaseExcT_1], + expected_exception: type[BaseExcT_1] | RaisesExc[BaseExcT_1], + /, + *other_exceptions: type[BaseExcT_1] | RaisesExc[BaseExcT_1], + match: str | Pattern[str] | None = None, + check: Callable[[BaseExceptionGroup[BaseExcT_1]], bool] | None = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[BaseExceptionGroup[BaseExcT_2]], + expected_exception: RaisesGroup[BaseExcT_2], + /, + *other_exceptions: RaisesGroup[BaseExcT_2], + match: str | Pattern[str] | None = None, + check: ( + Callable[[BaseExceptionGroup[BaseExceptionGroup[BaseExcT_2]]], bool] | None + ) = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], + expected_exception: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + /, + *other_exceptions: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + match: str | Pattern[str] | None = None, + check: ( + Callable[ + [BaseExceptionGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]]], + bool, + ] + | None + ) = None, + ) -> None: ... + + def __init__( + self: RaisesGroup[ExcT_1 | BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], + expected_exception: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + /, + *other_exceptions: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + allow_unwrapped: bool = False, + flatten_subgroups: bool = False, + match: str | Pattern[str] | None = None, + check: ( + Callable[[BaseExceptionGroup[BaseExcT_1]], bool] + | Callable[[ExceptionGroup[ExcT_1]], bool] + | None + ) = None, + ): + # The type hint on the `self` and `check` parameters uses different formats + # that are *very* hard to reconcile while adhering to the overloads, so we cast + # it to avoid an error when passing it to super().__init__ + check = cast( + "Callable[[BaseExceptionGroup[ExcT_1|BaseExcT_1|BaseExceptionGroup[BaseExcT_2]]], bool]", + check, + ) + super().__init__(match=match, check=check) + self.allow_unwrapped = allow_unwrapped + self.flatten_subgroups: bool = flatten_subgroups + self.is_baseexception = False + + if allow_unwrapped and other_exceptions: + raise ValueError( + "You cannot specify multiple exceptions with `allow_unwrapped=True.`" + " If you want to match one of multiple possible exceptions you should" + " use a `RaisesExc`." + " E.g. `RaisesExc(check=lambda e: isinstance(e, (...)))`", + ) + if allow_unwrapped and isinstance(expected_exception, RaisesGroup): + raise ValueError( + "`allow_unwrapped=True` has no effect when expecting a `RaisesGroup`." + " You might want it in the expected `RaisesGroup`, or" + " `flatten_subgroups=True` if you don't care about the structure.", + ) + if allow_unwrapped and (match is not None or check is not None): + raise ValueError( + "`allow_unwrapped=True` bypasses the `match` and `check` parameters" + " if the exception is unwrapped. If you intended to match/check the" + " exception you should use a `RaisesExc` object. If you want to match/check" + " the exceptiongroup when the exception *is* wrapped you need to" + " do e.g. `if isinstance(exc.value, ExceptionGroup):" + " assert RaisesGroup(...).matches(exc.value)` afterwards.", + ) + + self.expected_exceptions: tuple[ + type[BaseExcT_co] | RaisesExc[BaseExcT_co] | RaisesGroup[BaseException], ... + ] = tuple( + self._parse_excgroup(e, "a BaseException type, RaisesExc, or RaisesGroup") + for e in ( + expected_exception, + *other_exceptions, + ) + ) + + def _parse_excgroup( + self, + exc: ( + type[BaseExcT_co] + | types.GenericAlias + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2] + ), + expected: str, + ) -> type[BaseExcT_co] | RaisesExc[BaseExcT_1] | RaisesGroup[BaseExcT_2]: + # verify exception type and set `self.is_baseexception` + if isinstance(exc, RaisesGroup): + if self.flatten_subgroups: + raise ValueError( + "You cannot specify a nested structure inside a RaisesGroup with" + " `flatten_subgroups=True`. The parameter will flatten subgroups" + " in the raised exceptiongroup before matching, which would never" + " match a nested structure.", + ) + self.is_baseexception |= exc.is_baseexception + exc._nested = True + return exc + elif isinstance(exc, RaisesExc): + self.is_baseexception |= exc.is_baseexception + exc._nested = True + return exc + elif isinstance(exc, tuple): + raise TypeError( + f"expected exception must be {expected}, not {type(exc).__name__!r}.\n" + "RaisesGroup does not support tuples of exception types when expecting one of " + "several possible exception types like RaisesExc.\n" + "If you meant to expect a group with multiple exceptions, list them as separate arguments." + ) + else: + return super()._parse_exc(exc, expected) + + @overload + def __enter__( + self: RaisesGroup[ExcT_1], + ) -> ExceptionInfo[ExceptionGroup[ExcT_1]]: ... + @overload + def __enter__( + self: RaisesGroup[BaseExcT_1], + ) -> ExceptionInfo[BaseExceptionGroup[BaseExcT_1]]: ... + + def __enter__(self) -> ExceptionInfo[BaseExceptionGroup[BaseException]]: + self.excinfo: ExceptionInfo[BaseExceptionGroup[BaseExcT_co]] = ( + ExceptionInfo.for_later() + ) + return self.excinfo + + def __repr__(self) -> str: + reqs = [ + e.__name__ if isinstance(e, type) else repr(e) + for e in self.expected_exceptions + ] + if self.allow_unwrapped: + reqs.append(f"allow_unwrapped={self.allow_unwrapped}") + if self.flatten_subgroups: + reqs.append(f"flatten_subgroups={self.flatten_subgroups}") + if self.match is not None: + # If no flags were specified, discard the redundant re.compile() here. + reqs.append(f"match={_match_pattern(self.match)!r}") + if self.check is not None: + reqs.append(f"check={repr_callable(self.check)}") + return f"RaisesGroup({', '.join(reqs)})" + + def _unroll_exceptions( + self, + exceptions: Sequence[BaseException], + ) -> Sequence[BaseException]: + """Used if `flatten_subgroups=True`.""" + res: list[BaseException] = [] + for exc in exceptions: + if isinstance(exc, BaseExceptionGroup): + res.extend(self._unroll_exceptions(exc.exceptions)) + + else: + res.append(exc) + return res + + @overload + def matches( + self: RaisesGroup[ExcT_1], + exception: BaseException | None, + ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ... + @overload + def matches( + self: RaisesGroup[BaseExcT_1], + exception: BaseException | None, + ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ... + + def matches( + self, + exception: BaseException | None, + ) -> bool: + """Check if an exception matches the requirements of this RaisesGroup. + If it fails, `RaisesGroup.fail_reason` will be set. + + Example:: + + with pytest.raises(TypeError) as excinfo: + ... + assert RaisesGroup(ValueError).matches(excinfo.value.__cause__) + # the above line is equivalent to + myexc = excinfo.value.__cause + assert isinstance(myexc, BaseExceptionGroup) + assert len(myexc.exceptions) == 1 + assert isinstance(myexc.exceptions[0], ValueError) + """ + self._fail_reason = None + if exception is None: + self._fail_reason = "exception is None" + return False + if not isinstance(exception, BaseExceptionGroup): + # we opt to only print type of the exception here, as the repr would + # likely be quite long + not_group_msg = f"`{type(exception).__name__}()` is not an exception group" + if len(self.expected_exceptions) > 1: + self._fail_reason = not_group_msg + return False + # if we have 1 expected exception, check if it would work even if + # allow_unwrapped is not set + res = self._check_expected(self.expected_exceptions[0], exception) + if res is None and self.allow_unwrapped: + return True + + if res is None: + self._fail_reason = ( + f"{not_group_msg}, but would match with `allow_unwrapped=True`" + ) + elif self.allow_unwrapped: + self._fail_reason = res + else: + self._fail_reason = not_group_msg + return False + + actual_exceptions: Sequence[BaseException] = exception.exceptions + if self.flatten_subgroups: + actual_exceptions = self._unroll_exceptions(actual_exceptions) + + if not self._check_match(exception): + self._fail_reason = cast(str, self._fail_reason) + old_reason = self._fail_reason + if ( + len(actual_exceptions) == len(self.expected_exceptions) == 1 + and isinstance(expected := self.expected_exceptions[0], type) + and isinstance(actual := actual_exceptions[0], expected) + and self._check_match(actual) + ): + assert self.match is not None, "can't be None if _check_match failed" + assert self._fail_reason is old_reason is not None + self._fail_reason += ( + f"\n" + f" but matched the expected `{self._repr_expected(expected)}`.\n" + f" You might want " + f"`RaisesGroup(RaisesExc({expected.__name__}, match={_match_pattern(self.match)!r}))`" + ) + else: + self._fail_reason = old_reason + return False + + # do the full check on expected exceptions + if not self._check_exceptions( + exception, + actual_exceptions, + ): + self._fail_reason = cast(str, self._fail_reason) + assert self._fail_reason is not None + old_reason = self._fail_reason + # if we're not expecting a nested structure, and there is one, do a second + # pass where we try flattening it + if ( + not self.flatten_subgroups + and not any( + isinstance(e, RaisesGroup) for e in self.expected_exceptions + ) + and any(isinstance(e, BaseExceptionGroup) for e in actual_exceptions) + and self._check_exceptions( + exception, + self._unroll_exceptions(exception.exceptions), + ) + ): + # only indent if it's a single-line reason. In a multi-line there's already + # indented lines that this does not belong to. + indent = " " if "\n" not in self._fail_reason else "" + self._fail_reason = ( + old_reason + + f"\n{indent}Did you mean to use `flatten_subgroups=True`?" + ) + else: + self._fail_reason = old_reason + return False + + # Only run `self.check` once we know `exception` is of the correct type. + if not self._check_check(exception): + reason = ( + cast(str, self._fail_reason) + f" on the {type(exception).__name__}" + ) + if ( + len(actual_exceptions) == len(self.expected_exceptions) == 1 + and isinstance(expected := self.expected_exceptions[0], type) + # we explicitly break typing here :) + and self._check_check(actual_exceptions[0]) # type: ignore[arg-type] + ): + self._fail_reason = reason + ( + f", but did return True for the expected {self._repr_expected(expected)}." + f" You might want RaisesGroup(RaisesExc({expected.__name__}, check=<...>))" + ) + else: + self._fail_reason = reason + return False + + return True + + @staticmethod + def _check_expected( + expected_type: ( + type[BaseException] | RaisesExc[BaseException] | RaisesGroup[BaseException] + ), + exception: BaseException, + ) -> str | None: + """Helper method for `RaisesGroup.matches` and `RaisesGroup._check_exceptions` + to check one of potentially several expected exceptions.""" + if isinstance(expected_type, type): + return _check_raw_type(expected_type, exception) + res = expected_type.matches(exception) + if res: + return None + assert expected_type.fail_reason is not None + if expected_type.fail_reason.startswith("\n"): + return f"\n{expected_type!r}: {indent(expected_type.fail_reason, ' ')}" + return f"{expected_type!r}: {expected_type.fail_reason}" + + @staticmethod + def _repr_expected(e: type[BaseException] | AbstractRaises[BaseException]) -> str: + """Get the repr of an expected type/RaisesExc/RaisesGroup, but we only want + the name if it's a type""" + if isinstance(e, type): + return _exception_type_name(e) + return repr(e) + + @overload + def _check_exceptions( + self: RaisesGroup[ExcT_1], + _exception: Exception, + actual_exceptions: Sequence[Exception], + ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ... + @overload + def _check_exceptions( + self: RaisesGroup[BaseExcT_1], + _exception: BaseException, + actual_exceptions: Sequence[BaseException], + ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ... + + def _check_exceptions( + self, + _exception: BaseException, + actual_exceptions: Sequence[BaseException], + ) -> bool: + """Helper method for RaisesGroup.matches that attempts to pair up expected and actual exceptions""" + # The _exception parameter is not used, but necessary for the TypeGuard + + # full table with all results + results = ResultHolder(self.expected_exceptions, actual_exceptions) + + # (indexes of) raised exceptions that haven't (yet) found an expected + remaining_actual = list(range(len(actual_exceptions))) + # (indexes of) expected exceptions that haven't found a matching raised + failed_expected: list[int] = [] + # successful greedy matches + matches: dict[int, int] = {} + + # loop over expected exceptions first to get a more predictable result + for i_exp, expected in enumerate(self.expected_exceptions): + for i_rem in remaining_actual: + res = self._check_expected(expected, actual_exceptions[i_rem]) + results.set_result(i_exp, i_rem, res) + if res is None: + remaining_actual.remove(i_rem) + matches[i_exp] = i_rem + break + else: + failed_expected.append(i_exp) + + # All exceptions matched up successfully + if not remaining_actual and not failed_expected: + return True + + # in case of a single expected and single raised we simplify the output + if 1 == len(actual_exceptions) == len(self.expected_exceptions): + assert not matches + self._fail_reason = res + return False + + # The test case is failing, so we can do a slow and exhaustive check to find + # duplicate matches etc that will be helpful in debugging + for i_exp, expected in enumerate(self.expected_exceptions): + for i_actual, actual in enumerate(actual_exceptions): + if results.has_result(i_exp, i_actual): + continue + results.set_result( + i_exp, i_actual, self._check_expected(expected, actual) + ) + + successful_str = ( + f"{len(matches)} matched exception{'s' if len(matches) > 1 else ''}. " + if matches + else "" + ) + + # all expected were found + if not failed_expected and results.no_match_for_actual(remaining_actual): + self._fail_reason = ( + f"{successful_str}Unexpected exception(s):" + f" {[actual_exceptions[i] for i in remaining_actual]!r}" + ) + return False + # all raised exceptions were expected + if not remaining_actual and results.no_match_for_expected(failed_expected): + no_match_for_str = ", ".join( + self._repr_expected(self.expected_exceptions[i]) + for i in failed_expected + ) + self._fail_reason = f"{successful_str}Too few exceptions raised, found no match for: [{no_match_for_str}]" + return False + + # if there's only one remaining and one failed, and the unmatched didn't match anything else, + # we elect to only print why the remaining and the failed didn't match. + if ( + 1 == len(remaining_actual) == len(failed_expected) + and results.no_match_for_actual(remaining_actual) + and results.no_match_for_expected(failed_expected) + ): + self._fail_reason = f"{successful_str}{results.get_result(failed_expected[0], remaining_actual[0])}" + return False + + # there's both expected and raised exceptions without matches + s = "" + if matches: + s += f"\n{successful_str}" + indent_1 = " " * 2 + indent_2 = " " * 4 + + if not remaining_actual: + s += "\nToo few exceptions raised!" + elif not failed_expected: + s += "\nUnexpected exception(s)!" + + if failed_expected: + s += "\nThe following expected exceptions did not find a match:" + rev_matches = {v: k for k, v in matches.items()} + for i_failed in failed_expected: + s += ( + f"\n{indent_1}{self._repr_expected(self.expected_exceptions[i_failed])}" + ) + for i_actual, actual in enumerate(actual_exceptions): + if results.get_result(i_exp, i_actual) is None: + # we print full repr of match target + s += ( + f"\n{indent_2}It matches {backquote(repr(actual))} which was paired with " + + backquote( + self._repr_expected( + self.expected_exceptions[rev_matches[i_actual]] + ) + ) + ) + + if remaining_actual: + s += "\nThe following raised exceptions did not find a match" + for i_actual in remaining_actual: + s += f"\n{indent_1}{actual_exceptions[i_actual]!r}:" + for i_exp, expected in enumerate(self.expected_exceptions): + res = results.get_result(i_exp, i_actual) + if i_exp in failed_expected: + assert res is not None + if res[0] != "\n": + s += "\n" + s += indent(res, indent_2) + if res is None: + # we print full repr of match target + s += ( + f"\n{indent_2}It matches {backquote(self._repr_expected(expected))} " + f"which was paired with {backquote(repr(actual_exceptions[matches[i_exp]]))}" + ) + + if len(self.expected_exceptions) == len(actual_exceptions) and possible_match( + results + ): + s += ( + "\nThere exist a possible match when attempting an exhaustive check," + " but RaisesGroup uses a greedy algorithm. " + "Please make your expected exceptions more stringent with `RaisesExc` etc" + " so the greedy algorithm can function." + ) + self._fail_reason = s + return False + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(f"DID NOT RAISE any exception, expected `{self.expected_type()}`") + + assert self.excinfo is not None, ( + "Internal error - should have been constructed in __enter__" + ) + + # group_str is the only thing that differs between RaisesExc and RaisesGroup... + # I might just scrap it? Or make it part of fail_reason + group_str = ( + "(group)" + if self.allow_unwrapped and not issubclass(exc_type, BaseExceptionGroup) + else "group" + ) + + if not self.matches(exc_val): + fail(f"Raised exception {group_str} did not match: {self._fail_reason}") + + # Cast to narrow the exception type now that it's verified.... + # even though the TypeGuard in self.matches should be narrowing + exc_info = cast( + "tuple[type[BaseExceptionGroup[BaseExcT_co]], BaseExceptionGroup[BaseExcT_co], types.TracebackType]", + (exc_type, exc_val, exc_tb), + ) + self.excinfo.fill_unfilled(exc_info) + return True + + def expected_type(self) -> str: + subexcs = [] + for e in self.expected_exceptions: + if isinstance(e, RaisesExc): + subexcs.append(repr(e)) + elif isinstance(e, RaisesGroup): + subexcs.append(e.expected_type()) + elif isinstance(e, type): + subexcs.append(e.__name__) + else: # pragma: no cover + raise AssertionError("unknown type") + group_type = "Base" if self.is_baseexception else "" + return f"{group_type}ExceptionGroup({', '.join(subexcs)})" + + +@final +class NotChecked: + """Singleton for unchecked values in ResultHolder""" + + +class ResultHolder: + """Container for results of checking exceptions. + Used in RaisesGroup._check_exceptions and possible_match. + """ + + def __init__( + self, + expected_exceptions: tuple[ + type[BaseException] | AbstractRaises[BaseException], ... + ], + actual_exceptions: Sequence[BaseException], + ) -> None: + self.results: list[list[str | type[NotChecked] | None]] = [ + [NotChecked for _ in expected_exceptions] for _ in actual_exceptions + ] + + def set_result(self, expected: int, actual: int, result: str | None) -> None: + self.results[actual][expected] = result + + def get_result(self, expected: int, actual: int) -> str | None: + res = self.results[actual][expected] + assert res is not NotChecked + # mypy doesn't support identity checking against anything but None + return res # type: ignore[return-value] + + def has_result(self, expected: int, actual: int) -> bool: + return self.results[actual][expected] is not NotChecked + + def no_match_for_expected(self, expected: list[int]) -> bool: + for i in expected: + for actual_results in self.results: + assert actual_results[i] is not NotChecked + if actual_results[i] is None: + return False + return True + + def no_match_for_actual(self, actual: list[int]) -> bool: + for i in actual: + for res in self.results[i]: + assert res is not NotChecked + if res is None: + return False + return True + + +def possible_match(results: ResultHolder, used: set[int] | None = None) -> bool: + if used is None: + used = set() + curr_row = len(used) + if curr_row == len(results.results): + return True + return any( + val is None and i not in used and possible_match(results, used | {i}) + for (i, val) in enumerate(results.results[curr_row]) + ) diff --git a/.venv/lib/python3.11/site-packages/_pytest/recwarn.py b/.venv/lib/python3.11/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000..440e3ef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/recwarn.py @@ -0,0 +1,365 @@ +# mypy: allow-untyped-defs +"""Record warnings during test function execution.""" + +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterator +from pprint import pformat +import re +from types import TracebackType +from typing import Any +from typing import final +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import Self + +import warnings + +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.outcomes import Exit +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator[WarningsRecorder]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See :ref:`warnings` for information on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call( + *, match: str | re.Pattern[str] | None = ... +) -> WarningsRecorder: ... + + +@overload +def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: ... + + +def deprecated_call( + func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any +) -> WarningsRecorder | Any: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func, *args) + return warns( + (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs + ) + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = ..., + *, + match: str | re.Pattern[str] | None = ..., +) -> WarningsChecker: ... + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: ... + + +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + *args: Any, + match: str | re.Pattern[str] | None = None, + **kwargs: Any, +) -> WarningsChecker | Any: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or tuple + of warning classes, and the code inside the ``with`` block must issue at least one + warning of that class or classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, one for + each warning emitted (regardless of whether it is an ``expected_warning`` or not). + Since pytest 8.0, unmatched warnings are also re-emitted when the context closes. + + This function can be used as a context manager:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning): # catch re-emitted warning + ... with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests + such that some runs raise a warning and others do not. + + This could be achieved in the same way as with exceptions, see + :ref:`parametrizing_conditional_raising` for an example. + + """ + __tracebackhide__ = True + if not args: + if kwargs: + argnames = ", ".join(sorted(kwargs)) + raise TypeError( + f"Unexpected keyword arguments passed to pytest.warns: {argnames}" + "\nUse context-manager form instead?" + ) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg] + """A context manager to record raised warnings. + + Each recorded warning is an instance of :class:`warnings.WarningMessage`. + + Adapted from `warnings.catch_warnings`. + + .. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + super().__init__(record=True) + self._entered = False + self._list: list[warnings.WarningMessage] = [] + + @property + def list(self) -> list[warnings.WarningMessage]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> warnings.WarningMessage: + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator[warnings.WarningMessage]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: + """Pop the first recorded warning which is an instance of ``cls``, + but not an instance of a child class of any other match. + Raises ``AssertionError`` if there is no match. + """ + best_idx: int | None = None + for i, w in enumerate(self._list): + if w.category == cls: + return self._list.pop(i) # exact match, stop looking + if issubclass(w.category, cls) and ( + best_idx is None + or not issubclass(w.category, self._list[best_idx].category) + ): + best_idx = i + if best_idx is not None: + return self._list.pop(best_idx) + __tracebackhide__ = True + raise AssertionError(f"{cls!r} not found in warning list") + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self) -> Self: + if self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot enter {self!r} twice") + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot exit {self!r} without entering first") + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + match_expr: str | re.Pattern[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif isinstance(expected_warning, type) and issubclass( + expected_warning, Warning + ): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def matches(self, warning: warnings.WarningMessage) -> bool: + assert self.expected_warning is not None + return issubclass(warning.category, self.expected_warning) and bool( + self.match_expr is None or re.search(self.match_expr, str(warning.message)) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within + # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed + # when the warning doesn't happen. Control-flow exceptions should always + # propagate. + if exc_val is not None and ( + not isinstance(exc_val, Exception) + # Exit is an Exception, not a BaseException, for some reason. + or isinstance(exc_val, Exit) + ): + return + + def found_str() -> str: + return pformat([record.message for record in self], indent=2) + + try: + if not any(issubclass(w.category, self.expected_warning) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" + f" Emitted warnings: {found_str()}." + ) + elif not any(self.matches(w) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n" + f" Regex: {self.match_expr}\n" + f" Emitted warnings: {found_str()}." + ) + finally: + # Whether or not any warnings matched, we want to re-emit all unmatched warnings. + for w in self: + if not self.matches(w): + warnings.warn_explicit( + message=w.message, + category=w.category, + filename=w.filename, + lineno=w.lineno, + module=w.__module__, + source=w.source, + ) + + # Currently in Python it is possible to pass other types than an + # `str` message when creating `Warning` instances, however this + # causes an exception when :func:`warnings.filterwarnings` is used + # to filter those warnings. See + # https://github.com/python/cpython/issues/103577 for a discussion. + # While this can be considered a bug in CPython, we put guards in + # pytest as the error message produced without this check in place + # is confusing (#10865). + for w in self: + if type(w.message) is not UserWarning: + # If the warning was of an incorrect type then `warnings.warn()` + # creates a UserWarning. Any other warning must have been specified + # explicitly. + continue + if not w.message.args: + # UserWarning() without arguments must have been specified explicitly. + continue + msg = w.message.args[0] + if isinstance(msg, str): + continue + # It's possible that UserWarning was explicitly specified, and + # its first argument was not a string. But that case can't be + # distinguished from an invalid type. + raise TypeError( + f"Warning must be str or Warning, got {msg!r} (type {type(msg).__name__})" + ) diff --git a/.venv/lib/python3.11/site-packages/_pytest/reports.py b/.venv/lib/python3.11/site-packages/_pytest/reports.py new file mode 100644 index 0000000..480ffae --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/reports.py @@ -0,0 +1,637 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +from io import StringIO +import os +from pprint import pprint +from typing import Any +from typing import cast +from typing import final +from typing import Literal +from typing import NoReturn +from typing import TYPE_CHECKING + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "{}.{}.{}".format(*d["version_info"][:3]) + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +class BaseReport: + when: str | None + location: tuple[str, int | None, str] | None + longrepr: ( + None | ExceptionInfo[BaseException] | tuple[str, int, str] | str | TerminalRepr + ) + sections: list[tuple[str, str]] + nodeid: str + outcome: Literal["passed", "failed", "skipped"] + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" + + @property + def fspath(self) -> str: + """The path portion of the reported node, as a string.""" + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> str | None: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + return None + + def _get_verbose_word_with_markup( + self, config: Config, default_markup: Mapping[str, bool] + ) -> tuple[str, Mapping[str, bool]]: + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + + if isinstance(verbose, str): + return verbose, default_markup + + if isinstance(verbose, Sequence) and len(verbose) == 2: + word, markup = verbose + if isinstance(word, str) and isinstance(markup, Mapping): + return word, markup + + fail( # pragma: no cover + "pytest_report_teststatus() hook (from a plugin) returned " + f"an invalid verbose value: {verbose!r}.\nExpected either a string " + "or a tuple of (word, markup)." + ) + + def _to_json(self) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls, reportdict: dict[str, object]) -> Self: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: type[BaseReport], reportdict +) -> NoReturn: + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream) + pprint(f"report_name: {report_class}", stream=stream) + pprint(reportdict, stream=stream) + pprint(f"Please report this bug at {url}", stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +@final +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail). + + Reports can contain arbitrary extra attributes. + """ + + __test__ = False + + # Defined by skipping plugin. + # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish. + wasxfail: str + + def __init__( + self, + nodeid: str, + location: tuple[str, int | None, str], + keywords: Mapping[str, Any], + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + when: Literal["setup", "call", "teardown"], + sections: Iterable[tuple[str, str]] = (), + duration: float = 0, + start: float = 0, + stop: float = 0, + user_properties: Iterable[tuple[str, object]] | None = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + #: The filesystempath may be relative to ``config.rootdir``. + #: The line number is 0-based. + self.location: tuple[str, int | None, str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords: Mapping[str, Any] = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when: Literal["setup", "call", "teardown"] = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration: float = duration + + #: The system time when the call started, in seconds since the epoch. + self.start: float = start + #: The system time when the call ended, in seconds since the epoch. + self.stop: float = stop + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>" + + @classmethod + def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: + """Create and fill a TestReport with standard item and call info. + + :param item: The item. + :param call: The call info. + """ + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + start = call.start + stop = call.stop + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: ( + None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr + ) = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + assert r is not None, ( + "There should always be a traceback entry for skipping a test." + ) + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = os.fspath(path), line + 1, r.message + else: + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + start, + stop, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + result: list[Item | Collector] | None, + sections: Iterable[tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location( # type:ignore[override] + self, + ) -> tuple[str, int | None, str] | None: + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return f"" + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: dict[str, Any], +) -> CollectReport | TestReport | None: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: ReprEntry | ReprEntryNative, + ) -> dict[str, Any]: + data = dataclasses.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = dataclasses.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> dict[str, Any]: + result = dataclasses.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: ReprFileLocation | None, + ) -> dict[str, Any] | None: + if reprcrash is not None: + return dataclasses.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: dict[str, Any]) -> dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: ReprEntry | ReprEntryNative = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: dict[str, Any] | None): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: ExceptionChainRepr | ReprExceptionInfo = ExceptionChainRepr( + chain + ) + else: + exception_info = ReprExceptionInfo( + reprtraceback=reprtraceback, + reprcrash=reprcrash, + ) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/.venv/lib/python3.11/site-packages/_pytest/runner.py b/.venv/lib/python3.11/site-packages/_pytest/runner.py new file mode 100644 index 0000000..26e4e83 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/runner.py @@ -0,0 +1,571 @@ +# mypy: allow-untyped-defs +"""Basic collect and runtest protocol implementations.""" + +from __future__ import annotations + +import bdb +from collections.abc import Callable +import dataclasses +import os +import sys +import types +from typing import cast +from typing import final +from typing import Generic +from typing import Literal +from typing import TYPE_CHECKING +from typing import TypeVar + +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.nodes import Collector +from _pytest.nodes import Directory +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +if TYPE_CHECKING: + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="Show N slowest setup/test durations (N=0 for all)", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=None, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. " + "Default: 0.005 (or 0.0 if -vv is given).", + ) + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.get_verbosity() + if durations is None: + return + if durations_min is None: + durations_min = 0.005 if verbose < 2 else 0.0 + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", f"slowest {durations} durations") + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + tr.write_line("") + message = f"({len(dlist) - i} durations < {durations_min:g}s hidden." + if terminalreporter.config.option.durations_min is None: + message += " Use -vv to show these durations." + message += ")" + tr.write_line(message) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: Session) -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: Session) -> None: + session._setupstate.teardown_exact(None) + + +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Item | None = None +) -> list[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + # If the session is about to fail or stop, teardown everything - this is + # necessary to correctly report fixture teardown errors (see #11706) + if item.session.shouldfail or item.session.shouldstop: + nextitem = None + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.setup(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + if sys.version_info >= (3, 12, 0): + del sys.last_exc # type:ignore[attr-defined] + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + if sys.version_info >= (3, 12, 0): + sys.last_exc = e # type:ignore[attr-defined] + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Literal["setup", "call", "teardown"] | None +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: Literal["setup", "call", "teardown"], log: bool = True, **kwds +) -> TestReport: + ihook = item.ihook + if when == "setup": + runtest_hook: Callable[..., None] = ihook.pytest_runtest_setup + elif when == "call": + runtest_hook = ihook.pytest_runtest_call + elif when == "teardown": + runtest_hook = ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + call = CallInfo.from_call( + lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise + ) + report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) + if log: + ihook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + ihook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + # Special control flow exception. + return False + return True + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@dataclasses.dataclass +class CallInfo(Generic[TResult]): + """Result/Exception info of a function invocation.""" + + _result: TResult | None + #: The captured exception of the call, if it raised. + excinfo: ExceptionInfo[BaseException] | None + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: Literal["collect", "setup", "call", "teardown"] + + def __init__( + self, + result: TResult | None, + excinfo: ExceptionInfo[BaseException] | None, + start: float, + stop: float, + duration: float, + when: Literal["collect", "setup", "call", "teardown"], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when + + @property + def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: Callable[[], TResult], + when: Literal["collect", "setup", "call", "teardown"], + reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None, + ) -> CallInfo[TResult]: + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :type func: Callable[[], _pytest.runner.TResult] + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ + excinfo = None + instant = timing.Instant() + try: + result: TResult | None = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + duration = instant.elapsed() + return cls( + start=duration.start.time, + stop=duration.stop.time, + duration=duration.seconds, + when=when, + result=result, + excinfo=excinfo, + _ispytest=True, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + def collect() -> list[Item | Collector]: + # Before collecting, if this is a Directory, load the conftests. + # If a conftest import fails to load, it is considered a collection + # error of the Directory collector. This is why it's done inside of the + # CallInfo wrapper. + # + # Note: initial conftests are loaded early, not here. + if isinstance(collector, Directory): + collector.config.pluginmanager._loadconftestmodules( + collector.path, + collector.config.getoption("importmode"), + rootpath=collector.config.rootpath, + consider_namespace_packages=collector.config.getini( + "consider_namespace_packages" + ), + ) + + return list(collector.collect()) + + call = CallInfo.from_call( + collect, "collect", reraise=(KeyboardInterrupt, SystemExit) + ) + longrepr: None | tuple[str, int, str] | str | TerminalRepr = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + skip_exceptions.append(unittest.SkipTest) + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors + in a session. + + Suppose we have a collection tree as follows: + + + + + + + + The SetupState maintains a stack. The stack starts out empty: + + [] + + During the setup phase of item1, setup(item1) is called. What it does + is: + + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() + + The stack is: + + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: + + [session] + + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: dict[ + Node, + tuple[ + # Node's finalizers. + list[Callable[[], object]], + # Node's exception and original traceback, if its setup raised. + tuple[OutcomeException | Exception, types.TracebackType | None] | None, + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: + raise exc[0].with_traceback(exc[1]) + + for col in needed_collectors[len(self.stack) :]: + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) + try: + col.setup() + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], (exc, exc.__traceback__)) + raise + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Item | None) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = (nextitem and nextitem.listchain()) or [] + exceptions: list[BaseException] = [] + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + these_exceptions = [] + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + these_exceptions.append(e) + + if len(these_exceptions) == 1: + exceptions.extend(these_exceptions) + elif these_exceptions: + msg = f"errors while tearing down {node!r}" + exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1])) + + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise BaseExceptionGroup("errors during test teardown", exceptions[::-1]) + if nextitem is None: + assert not self.stack + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/.venv/lib/python3.11/site-packages/_pytest/scope.py b/.venv/lib/python3.11/site-packages/_pytest/scope.py new file mode 100644 index 0000000..2b007e8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" + +from __future__ import annotations + +from enum import Enum +from functools import total_ordering +from typing import Literal + + +_ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function = "function" + Class = "class" + Module = "module" + Package = "package" + Session = "session" + + def next_lower(self) -> Scope: + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> Scope: + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: Scope) -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: _ScopeName, descr: str, where: str | None = None + ) -> Scope: + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/.venv/lib/python3.11/site-packages/_pytest/setuponly.py b/.venv/lib/python3.11/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000..7e6b46b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/setuponly.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from collections.abc import Generator + +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +from _pytest.scope import Scope +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="Only setup fixtures, do not execute tests", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="Show setup of fixtures while executing tests", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, object, object]: + try: + return (yield) + finally: + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, request.config, "SETUP") + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[object], request: SubRequest +) -> None: + if fixturedef.cached_result is not None: + config = request.config + if config.option.setupshow: + _show_fixture_action(fixturedef, request.config, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action( + fixturedef: FixtureDef[object], config: Config, msg: str +) -> None: + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) + + scopename = fixturedef.scope[0].upper() + tw.write(f"{msg:<8} {scopename} {fixturedef.argname}") + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/.venv/lib/python3.11/site-packages/_pytest/setupplan.py b/.venv/lib/python3.11/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000..4e124cc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/setupplan.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="Show what fixtures and tests would be executed but " + "don't execute anything", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> object | None: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/.venv/lib/python3.11/site-packages/_pytest/skipping.py b/.venv/lib/python3.11/site-packages/_pytest/skipping.py new file mode 100644 index 0000000..ec118f2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/skipping.py @@ -0,0 +1,316 @@ +# mypy: allow-untyped-defs +"""Support for skip/xfail functions and markers.""" + +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Mapping +import dataclasses +import os +import platform +import sys +import traceback +from typing import Optional + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.raises import AbstractRaises +from _pytest.reports import BaseReport +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.stash import StashKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="Report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "xfail_strict", + "Default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}" + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition as a boolean", + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + f"Error evaluating {mark.name!r}: " + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@dataclasses.dataclass(frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason: str = "unconditional skip" + + +def evaluate_skip_marks(item: Item) -> Skip | None: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None + + return None + + +@dataclasses.dataclass(frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + __slots__ = ("raises", "reason", "run", "strict") + + reason: str + run: bool + strict: bool + raises: ( + type[BaseException] + | tuple[type[BaseException], ...] + | AbstractRaises[BaseException] + | None + ) + + +def evaluate_xfail_marks(item: Item) -> Xfail | None: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StashKey[Optional[Xfail]]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + if skipped: + raise skip.Exception(skipped.reason, _use_item_location=True) + + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(wrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None]: + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + try: + return (yield) + finally: + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(wrapper=True) +def pytest_runtest_makereport( + item: Item, call: CallInfo[None] +) -> Generator[None, TestReport, TestReport]: + rep = yield + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is None or ( + ( + isinstance(raises, (type, tuple)) + and isinstance(call.excinfo.value, raises) + ) + or ( + isinstance(raises, AbstractRaises) + and raises.matches(call.excinfo.value) + ) + ): + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + else: + rep.outcome = "failed" + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + return rep + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/.venv/lib/python3.11/site-packages/_pytest/stash.py b/.venv/lib/python3.11/site-packages/_pytest/stash.py new file mode 100644 index 0000000..6a9ff88 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/stash.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Generic +from typing import TypeVar + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + + .. versionadded:: 7.0 + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + + .. versionadded:: 7.0 + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> T | D: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/.venv/lib/python3.11/site-packages/_pytest/stepwise.py b/.venv/lib/python3.11/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000..8901540 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/stepwise.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +import dataclasses +from datetime import datetime +from datetime import timedelta +from typing import Any +from typing import TYPE_CHECKING + +from _pytest import nodes +from _pytest.cacheprovider import Cache +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + + +if TYPE_CHECKING: + from typing_extensions import Self + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="Exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="Ignore the first failing test but stop on the next failing test. " + "Implicitly enables --stepwise.", + ) + group.addoption( + "--sw-reset", + "--stepwise-reset", + action="store_true", + default=False, + dest="stepwise_reset", + help="Resets stepwise state, restarting the stepwise workflow. " + "Implicitly enables --stepwise.", + ) + + +def pytest_configure(config: Config) -> None: + # --stepwise-skip/--stepwise-reset implies stepwise. + if config.option.stepwise_skip or config.option.stepwise_reset: + config.option.stepwise = True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + if hasattr(session.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + + +@dataclasses.dataclass +class StepwiseCacheInfo: + # The nodeid of the last failed test. + last_failed: str | None + + # The number of tests in the last time --stepwise was run. + # We use this information as a simple way to invalidate the cache information, avoiding + # confusing behavior in case the cache is stale. + last_test_count: int | None + + # The date when the cache was last updated, for information purposes only. + last_cache_date_str: str + + @property + def last_cache_date(self) -> datetime: + return datetime.fromisoformat(self.last_cache_date_str) + + @classmethod + def empty(cls) -> Self: + return cls( + last_failed=None, + last_test_count=None, + last_cache_date_str=datetime.now().isoformat(), + ) + + def update_date_to_now(self) -> None: + self.last_cache_date_str = datetime.now().isoformat() + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Session | None = None + self.report_status: list[str] = [] + assert config.cache is not None + self.cache: Cache = config.cache + self.skip: bool = config.getoption("stepwise_skip") + self.reset: bool = config.getoption("stepwise_reset") + self.cached_info = self._load_cached_info() + + def _load_cached_info(self) -> StepwiseCacheInfo: + cached_dict: dict[str, Any] | None = self.cache.get(STEPWISE_CACHE_DIR, None) + if cached_dict: + try: + return StepwiseCacheInfo( + cached_dict["last_failed"], + cached_dict["last_test_count"], + cached_dict["last_cache_date_str"], + ) + except (KeyError, TypeError) as e: + error = f"{type(e).__name__}: {e}" + self.report_status.append(f"error reading cache, discarding ({error})") + + # Cache not found or error during load, return a new cache. + return StepwiseCacheInfo.empty() + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> None: + last_test_count = self.cached_info.last_test_count + self.cached_info.last_test_count = len(items) + + if self.reset: + self.report_status.append("resetting state, not skipping.") + self.cached_info.last_failed = None + return + + if not self.cached_info.last_failed: + self.report_status.append("no previously failed tests, not skipping.") + return + + if last_test_count is not None and last_test_count != len(items): + self.report_status.append( + f"test count changed, not skipping (now {len(items)} tests, previously {last_test_count})." + ) + self.cached_info.last_failed = None + return + + # Check all item nodes until we find a match on last failed. + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.cached_info.last_failed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status.append("previously failed test not found, not skipping.") + else: + cache_age = datetime.now() - self.cached_info.last_cache_date + # Round up to avoid showing microseconds. + cache_age = timedelta(seconds=int(cache_age.total_seconds())) + self.report_status.append( + f"skipping {failed_index} already passed items (cache from {cache_age} ago," + f" use --sw-reset to discard)." + ) + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.cached_info.last_failed: + self.cached_info.last_failed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.cached_info.last_failed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.cached_info.last_failed: + self.cached_info.last_failed = None + + def pytest_report_collectionfinish(self) -> list[str] | None: + if self.config.get_verbosity() >= 0 and self.report_status: + return [f"stepwise: {x}" for x in self.report_status] + return None + + def pytest_sessionfinish(self) -> None: + if hasattr(self.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + self.cached_info.update_date_to_now() + self.cache.set(STEPWISE_CACHE_DIR, dataclasses.asdict(self.cached_info)) diff --git a/.venv/lib/python3.11/site-packages/_pytest/terminal.py b/.venv/lib/python3.11/site-packages/_pytest/terminal.py new file mode 100644 index 0000000..a95f79b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/terminal.py @@ -0,0 +1,1643 @@ +# mypy: allow-untyped-defs +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" + +from __future__ import annotations + +import argparse +from collections import Counter +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import datetime +from functools import partial +import inspect +from pathlib import Path +import platform +import sys +import textwrap +from typing import Any +from typing import ClassVar +from typing import final +from typing import Literal +from typing import NamedTuple +from typing import TextIO +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import compat +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io import TerminalWriter +from _pytest._io.wcwidth import wcswidth +import _pytest._version +from _pytest.assertion.util import running_on_ci +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +if TYPE_CHECKING: + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: str | None = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[object] | None, + option_string: str | None = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +class TestShortLogReport(NamedTuple): + """Used to store the test status result category, shortletter and verbose word. + For example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :ivar category: + The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string. + + :ivar letter: + The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string. + + :ivar word: + Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``, + ``"ERROR"``, or the empty string. + """ + + category: str + letter: str + word: str | tuple[str, Mapping[str, bool]] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group._addoption( # private to use reserved lower-case short option + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="Increase verbosity", + ) + group.addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="Disable header", + ) + group.addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="Disable summary", + ) + group.addoption( + "--no-fold-skipped", + action="store_false", + dest="fold_skipped", + default=True, + help="Do not fold skipped tests in short summary.", + ) + group.addoption( + "--force-short-summary", + action="store_true", + dest="force_short_summary", + default=False, + help="Force condensed summary output regardless of verbosity level.", + ) + group._addoption( # private to use reserved lower-case short option + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="Decrease verbosity", + ) + group.addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="Set verbosity. Default: 0.", + ) + group._addoption( # private to use reserved lower-case short option + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="Show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group.addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="Disable warnings summary", + ) + group._addoption( # private to use reserved lower-case short option + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="Show locals in tracebacks (disabled by default)", + ) + group.addoption( + "--no-showlocals", + action="store_false", + dest="showlocals", + help="Hide locals in tracebacks (negate --showlocals passed through addopts)", + ) + group.addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="Traceback print mode (auto/long/short/line/native/no)", + ) + group.addoption( + "--xfail-tb", + action="store_true", + dest="xfail_tb", + default=False, + help="Show tracebacks for xfail (as long as --tb != no)", + ) + group.addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default: all.", + ) + group.addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="Don't cut any tracebacks (default is to cut)", + ) + group.addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="Color terminal output (yes/no/auto)", + ) + group.addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled). " + "Default: yes.", + ) + + parser.addini( + "console_output_style", + help='Console output: "classic", or with additional progress information ' + '("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces ' + "progress even when capture=no)", + default="progress", + ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_TEST_CASES, + help=( + "Specify a verbosity level for test case execution, overriding the main level. " + "Higher levels will provide more detailed information about each test case executed." + ), + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@dataclasses.dataclass +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message: str + nodeid: str | None = None + fslocation: tuple[str, int] | None = None + + count_towards_summary: ClassVar = True + + def get_location(self, config: Config) -> str | None: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: TextIO | None = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Session | None = None + self._showfspath: bool | None = None + + self.stats: dict[str, list[Any]] = {} + self._main_color: str | None = None + self._known_types: list[str] | None = None + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: None | Path | str | int = None + self.reportchars = getreportopt(config) + self.foldskipped = config.option.fold_skipped + self.hasmarkup = self._tw.hasmarkup + # isatty should be a method but was wrongly implemented as a boolean. + # We use CallableBool here to support both. + self.isatty = compat.CallableBool(file.isatty()) + self._progress_nodeids_reported: set[str] = set() + self._timing_nodeids_reported: set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write = timing.Instant() + self._already_displayed_warnings: int | None = None + self._keyboardinterrupt_memo: ExceptionRepr | None = None + + def _determine_show_progress_info( + self, + ) -> Literal["progress", "count", "times", False]: + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) unless explicitly + # overridden by progress-even-when-capture-no + if ( + self.config.getoption("capture", "no") == "no" + and self.config.getini("console_output_style") + != "progress-even-when-capture-no" + ): + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg in {"progress", "progress-even-when-capture-no"}: + return "progress" + elif cfg == "count": + return "count" + elif cfg == "times": + return "times" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: bool | None) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0 + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def wrap_write( + self, + content: str, + *, + flush: bool = False, + margin: int = 8, + line_sep: str = "\n", + **markup: bool, + ) -> None: + """Wrap message with margin for progress info.""" + width_of_current_line = self._tw.width_of_current_line + wrapped = line_sep.join( + textwrap.wrap( + " " * width_of_current_line + content, + width=self._screen_width - margin, + drop_whitespace=True, + replace_whitespace=False, + ), + ) + wrapped = wrapped[width_of_current_line:] + self._tw.write(wrapped, flush=flush, **markup) + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: str | bytes, **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, + warning_message: warnings.WarningMessage, + nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: tuple[str, int | None, str] + ) -> None: + fspath, lineno, domain = location + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, fspath, lineno, domain) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + + res = TestShortLogReport( + *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + ) + category, letter, word = res.category, res.letter, res.word + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + self._progress_nodeids_reported.add(rep.nodeid) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0: + self._tw.write(letter, **markup) + # When running in xdist, the logreport and logfinish of multiple + # items are interspersed, e.g. `logreport`, `logreport`, + # `logfinish`, `logfinish`. To avoid the "past edge" calculation + # from getting confused and overflowing (#7166), do the past edge + # printing here and not in logfinish, except for the 100% which + # should only be printed after all teardowns are finished. + if self._show_progress_info and not self._is_last_item: + self._write_progress_information_if_past_edge() + else: + line = self._locationline(rep.nodeid, *rep.location) + running_xdist = hasattr(rep, "node") + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + reason = _get_raw_skip_reason(rep) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2: + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + formatted_reason = _format_trimmed( + " ({})", reason, available_width + ) + else: + formatted_reason = f" ({reason})" + + if reason and formatted_reason is not None: + self.wrap_write(formatted_reason) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write(f"[{rep.node.gateway.id}]") + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return len(self._progress_nodeids_reported) == self._session.testscollected + + @hookimpl(wrapper=True) + def pytest_runtestloop(self) -> Generator[None, object, object]: + result = yield + + # Write the final/100% progress -- deferred until the loop is complete. + if ( + self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 + and self._show_progress_info + and self._progress_nodeids_reported + ): + self._write_progress_information_filling_space() + + return result + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = len(self._progress_nodeids_reported) + counter_format = f"{{:{len(str(collected))}d}}" + format_string = f" [{counter_format}/{{}}]" + return format_string.format(progress, collected) + return f" [ {collected} / {collected} ]" + if self._show_progress_info == "times": + if not collected: + return "" + all_reports = ( + self._get_reports_to_display("passed") + + self._get_reports_to_display("xpassed") + + self._get_reports_to_display("failed") + + self._get_reports_to_display("xfailed") + + self._get_reports_to_display("skipped") + + self._get_reports_to_display("error") + + self._get_reports_to_display("") + ) + current_location = all_reports[-1].location[0] + not_reported = [ + r for r in all_reports if r.nodeid not in self._timing_nodeids_reported + ] + tests_in_module = sum( + i.location[0] == current_location for i in self._session.items + ) + tests_completed = sum( + r.when == "setup" + for r in not_reported + if r.location[0] == current_location + ) + last_in_module = tests_completed == tests_in_module + if self.showlongtestinfo or last_in_module: + self._timing_nodeids_reported.update(r.nodeid for r in not_reported) + return format_node_duration( + sum(r.duration for r in not_reported if isinstance(r, TestReport)) + ) + return "" + if collected: + return f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]" + return " [100%]" + + def _write_progress_information_if_past_edge(self) -> None: + w = self._width_of_current_line + if self._show_progress_info == "count": + assert self._session + num_tests = self._session.testscollected + progress_length = len(f" [{num_tests}/{num_tests}]") + elif self._show_progress_info == "times": + progress_length = len(" 99h 59m") + else: + progress_length = len(" [100%]") + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + main_color, _ = self._get_main_color() + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty(): + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty(): + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write the "collecting" report every `REPORT_COLLECTING_RESOLUTION`. + if ( + self._collect_report_last_write.elapsed().seconds + < REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = timing.Instant() + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - deselected + line = "collected " if final else "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += f" / {errors} error{'s' if errors != 1 else ''}" + if deselected: + line += f" / {deselected} deselected" + if skipped: + line += f" / {skipped} skipped" + if self._numcollected > selected: + line += f" / {selected} selected" + if self.isatty(): + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: Session) -> None: + self._session = session + self._session_start = timing.Instant() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, start_path=self.startpath + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[str | Sequence[str]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> list[str]: + result = [f"rootdir: {config.rootpath}"] + + if config.inipath: + result.append("configfile: " + bestrelpath(config.rootpath, config.inipath)) + + if config.args_source == Config.ArgsSource.TESTPATHS: + testpaths: list[str] = config.getini("testpaths") + result.append("testpaths: {}".format(", ".join(testpaths))) + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append( + "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo))) + ) + return result + + def pytest_collection_finish(self, session: Session) -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, + start_path=self.startpath, + items=session.items, + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) + if test_cases_verbosity < 0: + if test_cases_verbosity < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line(f"{name}: {count}") + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: list[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if test_cases_verbosity >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(wrapper=True) + def pytest_sessionfinish( + self, session: Session, exitstatus: int | ExitCode + ) -> Generator[None]: + result = yield + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + return result + + @hookimpl(wrapper=True) + def pytest_terminal_summary(self) -> Generator[None]: + self.summary_errors() + self.summary_failures() + self.summary_xfailures() + self.summary_warnings() + self.summary_passes() + self.summary_xpasses() + try: + return (yield) + finally: + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline( + self, nodeid: str, fspath: str, lineno: int | None, domain: str + ) -> str: + def mkrel(nodeid: str) -> str: + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # fspath comes from testid which has a "/"-normalized path. + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, Path(fspath)) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: list[WarningReport] | None = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: dict[str, list[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: list[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) + + def summary_passes(self) -> None: + self.summary_passes_combined("passed", "PASSES", "P") + + def summary_xpasses(self) -> None: + self.summary_passes_combined("xpassed", "XPASSES", "X") + + def summary_passes_combined( + self, which_reports: str, sep_title: str, needed_opt: str + ) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt(needed_opt): + reports: list[TestReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> list[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + style = self.config.option.tbstyle + self.summary_failures_combined("failed", "FAILURES", style=style) + + def summary_xfailures(self) -> None: + show_tb = self.config.option.xfail_tb + style = self.config.option.tbstyle if show_tb else "no" + self.summary_failures_combined("xfailed", "XFAILURES", style=style) + + def summary_failures_combined( + self, + which_reports: str, + sep_title: str, + *, + style: str, + needed_opt: str | None = None, + ) -> None: + if style != "no": + if not needed_opt or self.hasopt(needed_opt): + reports: list[BaseReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + if style == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: list[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = self._session_start.elapsed() + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = f" in {format_session_duration(session_duration.seconds)}" + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(lines: list[str], *, stat: str) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + config = self.config + for rep in failed: + color = _color_for_type.get(stat, _color_for_type_default) + line = _get_line_with_reprcrash_message( + config, rep, self._tw, {color: True} + ) + lines.append(line) + + def show_xfailed(lines: list[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + + lines.append(line) + + def show_xpassed(lines: list[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped_folded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + prefix = "Skipped: " + for num, fspath, lineno, reason in fskips: + if reason.startswith(prefix): + reason = reason[len(prefix) :] + if lineno is not None: + lines.append(f"{markup_word} [{num}] {fspath}:{lineno}: {reason}") + else: + lines.append(f"{markup_word} [{num}] {fspath}: {reason}") + + def show_skipped_unfolded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + + for rep in skipped: + assert rep.longrepr is not None + assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr) + assert len(rep.longrepr) == 3, (rep, rep.longrepr) + + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.longrepr[2] + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped(lines: list[str]) -> None: + if self.foldskipped: + show_skipped_folded(lines) + else: + show_skipped_unfolded(lines) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, stat="failed"), + "s": show_skipped, + "p": partial(show_simple, stat="passed"), + "E": partial(show_simple, stat="error"), + } + + lines: list[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info", cyan=True, bold=True) + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> tuple[str, list[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: list[str] = [] + for found_type in self.stats: + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be:: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> list[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) # noqa: UP031 + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") # noqa: UP031 + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] # noqa: UP031 + + return parts, main_color + + +def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + path, *parts = nodeid.split("::") + if parts: + parts_markup = tw.markup("::".join(parts), bold=True) + return path + "::" + parts_markup + else: + return path + + +def _format_trimmed(format: str, msg: str, available_width: int) -> str | None: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: dict[str, bool] +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + config, word_markup + ) + word = tw.markup(verbose_word, **verbose_markup) + node = _get_node_id_with_markup(tw, config, rep) + + line = f"{word} {node}" + line_width = wcswidth(line) + + try: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + if ( + running_on_ci() or config.option.verbose >= 2 + ) and not config.option.force_short_summary: + msg = f" - {msg}" + else: + available_width = tw.fullwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, + skipped: Sequence[CollectReport], +) -> list[tuple[int, str, int | None, str]]: + d: dict[tuple[str, int | None, str], list[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: tuple[str, int | None, str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: list[tuple[int, str, int | None, str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> list[str]: + values: list[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = f"{dist.project_name}-{dist.version}" + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def format_node_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the test progress.""" + # The formatting is designed to be compact and readable, with at most 7 characters + # for durations below 100 hours. + if seconds < 0.00001: + return f" {seconds * 1000000:.3f}us" + if seconds < 0.0001: + return f" {seconds * 1000000:.2f}us" + if seconds < 0.001: + return f" {seconds * 1000000:.1f}us" + if seconds < 0.01: + return f" {seconds * 1000:.3f}ms" + if seconds < 0.1: + return f" {seconds * 1000:.2f}ms" + if seconds < 1: + return f" {seconds * 1000:.1f}ms" + if seconds < 60: + return f" {seconds:.3f}s" + if seconds < 3600: + return f" {seconds // 60:.0f}m {seconds % 60:.0f}s" + return f" {seconds // 3600:.0f}h {(seconds % 3600) // 60:.0f}m" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = report.wasxfail + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason diff --git a/.venv/lib/python3.11/site-packages/_pytest/threadexception.py b/.venv/lib/python3.11/site-packages/_pytest/threadexception.py new file mode 100644 index 0000000..eb57783 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/threadexception.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import collections +from collections.abc import Callable +import functools +import sys +import threading +import traceback +from typing import NamedTuple +from typing import TYPE_CHECKING +import warnings + +from _pytest.config import Config +from _pytest.nodes import Item +from _pytest.stash import StashKey +from _pytest.tracemalloc import tracemalloc_message +import pytest + + +if TYPE_CHECKING: + pass + +if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup + + +class ThreadExceptionMeta(NamedTuple): + msg: str + cause_msg: str + exc_value: BaseException | None + + +thread_exceptions: StashKey[collections.deque[ThreadExceptionMeta | BaseException]] = ( + StashKey() +) + + +def collect_thread_exception(config: Config) -> None: + pop_thread_exception = config.stash[thread_exceptions].pop + errors: list[pytest.PytestUnhandledThreadExceptionWarning | RuntimeError] = [] + meta = None + hook_error = None + try: + while True: + try: + meta = pop_thread_exception() + except IndexError: + break + + if isinstance(meta, BaseException): + hook_error = RuntimeError("Failed to process thread exception") + hook_error.__cause__ = meta + errors.append(hook_error) + continue + + msg = meta.msg + try: + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + except pytest.PytestUnhandledThreadExceptionWarning as e: + # This except happens when the warning is treated as an error (e.g. `-Werror`). + if meta.exc_value is not None: + # Exceptions have a better way to show the traceback, but + # warnings do not, so hide the traceback from the msg and + # set the cause so the traceback shows up in the right place. + e.args = (meta.cause_msg,) + e.__cause__ = meta.exc_value + errors.append(e) + + if len(errors) == 1: + raise errors[0] + if errors: + raise ExceptionGroup("multiple thread exception warnings", errors) + finally: + del errors, meta, hook_error + + +def cleanup( + *, config: Config, prev_hook: Callable[[threading.ExceptHookArgs], object] +) -> None: + try: + try: + # We don't join threads here, so exceptions raised from any + # threads still running by the time _threading_atexits joins them + # do not get captured (see #13027). + collect_thread_exception(config) + finally: + threading.excepthook = prev_hook + finally: + del config.stash[thread_exceptions] + + +def thread_exception_hook( + args: threading.ExceptHookArgs, + /, + *, + append: Callable[[ThreadExceptionMeta | BaseException], object], +) -> None: + try: + # we need to compute these strings here as they might change after + # the excepthook finishes and before the metadata object is + # collected by a pytest hook + thread_name = "" if args.thread is None else args.thread.name + summary = f"Exception in thread {thread_name}" + traceback_message = "\n\n" + "".join( + traceback.format_exception( + args.exc_type, + args.exc_value, + args.exc_traceback, + ) + ) + tracemalloc_tb = "\n" + tracemalloc_message(args.thread) + msg = summary + traceback_message + tracemalloc_tb + cause_msg = summary + tracemalloc_tb + + append( + ThreadExceptionMeta( + # Compute these strings here as they might change later + msg=msg, + cause_msg=cause_msg, + exc_value=args.exc_value, + ) + ) + except BaseException as e: + append(e) + # Raising this will cause the exception to be logged twice, once in our + # collect_thread_exception and once by sys.excepthook + # which is fine - this should never happen anyway and if it does + # it should probably be reported as a pytest bug. + raise + + +def pytest_configure(config: Config) -> None: + prev_hook = threading.excepthook + deque: collections.deque[ThreadExceptionMeta | BaseException] = collections.deque() + config.stash[thread_exceptions] = deque + config.add_cleanup(functools.partial(cleanup, config=config, prev_hook=prev_hook)) + threading.excepthook = functools.partial(thread_exception_hook, append=deque.append) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_setup(item: Item) -> None: + collect_thread_exception(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_call(item: Item) -> None: + collect_thread_exception(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_teardown(item: Item) -> None: + collect_thread_exception(item.config) diff --git a/.venv/lib/python3.11/site-packages/_pytest/timing.py b/.venv/lib/python3.11/site-packages/_pytest/timing.py new file mode 100644 index 0000000..221eeff --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/timing.py @@ -0,0 +1,94 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" + +from __future__ import annotations + +import dataclasses +from datetime import datetime +from datetime import timezone +from time import perf_counter +from time import sleep +from time import time +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from pytest import MonkeyPatch + + +@dataclasses.dataclass(frozen=True) +class Instant: + """ + Represents an instant in time, used to both get the timestamp value and to measure + the duration of a time span. + + Inspired by Rust's `std::time::Instant`. + """ + + # Creation time of this instant, using time.time(), to measure actual time. + # Note: using a `lambda` to correctly get the mocked time via `MockTiming`. + time: float = dataclasses.field(default_factory=lambda: time(), init=False) + + # Performance counter tick of the instant, used to measure precise elapsed time. + # Note: using a `lambda` to correctly get the mocked time via `MockTiming`. + perf_count: float = dataclasses.field( + default_factory=lambda: perf_counter(), init=False + ) + + def elapsed(self) -> Duration: + """Measure the duration since `Instant` was created.""" + return Duration(start=self, stop=Instant()) + + def as_utc(self) -> datetime: + """Instant as UTC datetime.""" + return datetime.fromtimestamp(self.time, timezone.utc) + + +@dataclasses.dataclass(frozen=True) +class Duration: + """A span of time as measured by `Instant.elapsed()`.""" + + start: Instant + stop: Instant + + @property + def seconds(self) -> float: + """Elapsed time of the duration in seconds, measured using a performance counter for precise timing.""" + return self.stop.perf_count - self.start.perf_count + + +@dataclasses.dataclass +class MockTiming: + """Mocks _pytest.timing with a known object that can be used to control timing in tests + deterministically. + + pytest itself should always use functions from `_pytest.timing` instead of `time` directly. + + This then allows us more control over time during testing, if testing code also + uses `_pytest.timing` functions. + + Time is static, and only advances through `sleep` calls, thus tests might sleep over large + numbers and obtain accurate time() calls at the end, making tests reliable and instant.""" + + _current_time: float = datetime(2020, 5, 22, 14, 20, 50).timestamp() + + def sleep(self, seconds: float) -> None: + self._current_time += seconds + + def time(self) -> float: + return self._current_time + + def patch(self, monkeypatch: MonkeyPatch) -> None: + from _pytest import timing # noqa: PLW0406 + + monkeypatch.setattr(timing, "sleep", self.sleep) + monkeypatch.setattr(timing, "time", self.time) + monkeypatch.setattr(timing, "perf_counter", self.time) + + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/.venv/lib/python3.11/site-packages/_pytest/tmpdir.py b/.venv/lib/python3.11/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000..dcd5784 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/tmpdir.py @@ -0,0 +1,312 @@ +# mypy: allow-untyped-defs +"""Support for providing temporary directories to test functions.""" + +from __future__ import annotations + +from collections.abc import Generator +import dataclasses +import os +from pathlib import Path +import re +from shutil import rmtree +import tempfile +from typing import Any +from typing import final +from typing import Literal + +from .pathlib import cleanup_dead_symlinks +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from _pytest.compat import get_user_id +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Item +from _pytest.reports import TestReport +from _pytest.stash import StashKey + + +tmppath_result_key = StashKey[dict[str, bool]]() +RetentionType = Literal["all", "failed", "none"] + + +@final +@dataclasses.dataclass +class TempPathFactory: + """Factory for temporary directories under the common base temp directory, + as discussed at :ref:`temporary directory location and retention`. + """ + + _given_basetemp: Path | None + # pluggy TagTracerSub, not currently exposed, so Any. + _trace: Any + _basetemp: Path | None + _retention_count: int + _retention_policy: RetentionType + + def __init__( + self, + given_basetemp: Path | None, + retention_count: int, + retention_policy: RetentionType, + trace, + basetemp: Path | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._retention_count = retention_count + self._retention_policy = retention_policy + self._basetemp = basetemp + + @classmethod + def from_config( + cls, + config: Config, + *, + _ispytest: bool = False, + ) -> TempPathFactory: + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + count = int(config.getini("tmp_path_retention_count")) + if count < 0: + raise ValueError( + f"tmp_path_retention_count must be >= 0. Current input: {count}." + ) + + policy = config.getini("tmp_path_retention_policy") + if policy not in ("all", "failed", "none"): + raise ValueError( + f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." + ) + + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + retention_count=count, + retention_policy=policy, + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed. + + :returns: + The base temporary directory. + """ + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + uid = get_user_id() + if uid is not None: + rootdir_stat = rootdir.stat() + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + keep = self._retention_count + if self._retention_policy == "none": + keep = 0 + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=keep, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +def get_user() -> str | None: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + try: + # In some exotic environments, getpass may not be importable. + import getpass + + return getpass.getuser() + except (ImportError, OSError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempPathFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmp_path_factory session fixture. + """ + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "tmp_path_retention_count", + help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.", + default=3, + ) + + parser.addini( + "tmp_path_retention_policy", + help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. " + "(all/failed/none)", + default="all", + ) + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmp_path( + request: FixtureRequest, tmp_path_factory: TempPathFactory +) -> Generator[Path]: + """Return a temporary directory (as :class:`pathlib.Path` object) + which is unique to each test function invocation. + The temporary directory is created as a subdirectory + of the base temporary directory, with configurable retention, + as discussed in :ref:`temporary directory location and retention`. + """ + path = _mk_tmp(request, tmp_path_factory) + yield path + + # Remove the tmpdir if the policy is "failed" and the test passed. + policy = tmp_path_factory._retention_policy + result_dict = request.node.stash[tmppath_result_key] + + if policy == "failed" and result_dict.get("call", True): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(path, ignore_errors=True) + + del request.node.stash[tmppath_result_key] + + +def pytest_sessionfinish(session, exitstatus: int | ExitCode): + """After each session, remove base directory if all the tests passed, + the policy is "failed", and the basetemp is not specified by a user. + """ + tmp_path_factory: TempPathFactory = session.config._tmp_path_factory + basetemp = tmp_path_factory._basetemp + if basetemp is None: + return + + policy = tmp_path_factory._retention_policy + if ( + exitstatus == 0 + and policy == "failed" + and tmp_path_factory._given_basetemp is None + ): + if basetemp.is_dir(): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(basetemp, ignore_errors=True) + + # Remove dead symlinks. + if basetemp.is_dir(): + cleanup_dead_symlinks(basetemp) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport( + item: Item, call +) -> Generator[None, TestReport, TestReport]: + rep = yield + assert rep.when is not None + empty: dict[str, bool] = {} + item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed + return rep diff --git a/.venv/lib/python3.11/site-packages/_pytest/tracemalloc.py b/.venv/lib/python3.11/site-packages/_pytest/tracemalloc.py new file mode 100644 index 0000000..5d0b198 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/tracemalloc.py @@ -0,0 +1,24 @@ +from __future__ import annotations + + +def tracemalloc_message(source: object) -> str: + if source is None: + return "" + + try: + import tracemalloc + except ImportError: + return "" + + tb = tracemalloc.get_object_traceback(source) + if tb is not None: + formatted_tb = "\n".join(tb.format()) + # Use a leading new line to better separate the (large) output + # from the traceback to the previous warning text. + return f"\nObject allocated at:\n{formatted_tb}" + # No need for a leading new line. + url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings" + return ( + "Enable tracemalloc to get traceback where the object was allocated.\n" + f"See {url} for more info." + ) diff --git a/.venv/lib/python3.11/site-packages/_pytest/unittest.py b/.venv/lib/python3.11/site-packages/_pytest/unittest.py new file mode 100644 index 0000000..ef6ef64 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/unittest.py @@ -0,0 +1,516 @@ +# mypy: allow-untyped-defs +"""Discover and run std-library "unittest" style tests.""" + +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from enum import auto +from enum import Enum +import inspect +import sys +import traceback +import types +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.runner import CallInfo +import pytest + + +if sys.version_info[:2] < (3, 11): + from exceptiongroup import ExceptionGroup + +if TYPE_CHECKING: + import unittest + + import twisted.trial.unittest + + +_SysExcInfoType = Union[ + tuple[type[BaseException], BaseException, types.TracebackType], + tuple[None, None, None], +] + + +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> UnitTestCase | None: + try: + # Has unittest been imported? + ut = sys.modules["unittest"] + # Is obj a subclass of unittest.TestCase? + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Is obj a concrete class? + # Abstract classes can't be instantiated so no point collecting them. + if inspect.isabstract(obj): + return None + # Yes, so let's collect it. + return UnitTestCase.from_parent(collector, name=name, obj=obj) + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def newinstance(self): + # TestCase __init__ takes the method (test) name. The TestCase + # constructor treats the name "runTest" as a special no-op, so it can be + # used when a dummy instance is needed. While unittest.TestCase has a + # default, some subclasses omit the default (#9610), so always supply + # it. + return self.obj("runTest") + + def collect(self) -> Iterable[Item | Collector]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._register_unittest_setup_method_fixture(cls) + self._register_unittest_setup_class_fixture(cls) + self._register_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + yield TestCaseFunction.from_parent(self, name=name) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction.from_parent(self, name="runTest") + + def _register_unittest_setup_class_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setUpClass and + tearDownClass (#517).""" + setup = getattr(cls, "setUpClass", None) + teardown = getattr(cls, "tearDownClass", None) + if setup is None and teardown is None: + return None + cleanup = getattr(cls, "doClassCleanups", lambda: None) + + def process_teardown_exceptions() -> None: + # tearDown_exceptions is a list set in the class containing exc_infos for errors during + # teardown for the class. + exc_infos = getattr(cls, "tearDown_exceptions", None) + if not exc_infos: + return + exceptions = [exc for (_, exc, _) in exc_infos] + # If a single exception, raise it directly as this provides a more readable + # error (hopefully this will improve in #12255). + if len(exceptions) == 1: + raise exceptions[0] + else: + raise ExceptionGroup("Unittest class cleanup errors", exceptions) + + def unittest_setup_class_fixture( + request: FixtureRequest, + ) -> Generator[None]: + cls = request.cls + if _is_skipped(cls): + reason = cls.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + try: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + cleanup() + process_teardown_exceptions() + raise + yield + try: + if teardown is not None: + teardown() + finally: + cleanup() + process_teardown_exceptions() + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setUpClass_fixture_{cls.__qualname__}", + func=unittest_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_unittest_setup_method_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setup_method and + teardown_method (#517).""" + setup = getattr(cls, "setup_method", None) + teardown = getattr(cls, "teardown_method", None) + if setup is None and teardown is None: + return None + + def unittest_setup_method_fixture( + request: FixtureRequest, + ) -> Generator[None]: + self = request.instance + if _is_skipped(self): + reason = self.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + setup(self, request.function) + yield + if teardown is not None: + teardown(self, request.function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setup_method_fixture_{cls.__qualname__}", + func=unittest_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None + + def _getinstance(self): + assert isinstance(self.parent, UnitTestCase) + return self.parent.obj(self.name) + + # Backward compat for pytest-django; can be removed after pytest-django + # updates + some slack. + @property + def _testcase(self): + return self.instance + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Callable[[], None] | None = None + super().setup() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._obj = None + del self._instance + super().teardown() + + def startTest(self, testcase: unittest.TestCase) -> None: + pass + + def _addexcinfo(self, rawexcinfo: _SysExcInfoType) -> None: + rawexcinfo = _handle_twisted_exc_info(rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info( + rawexcinfo # type: ignore[arg-type] + ) + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + _ = excinfo.value + _ = excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + f"representation:\n{rawexcinfo!r}", + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: + try: + raise pytest.skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure( + self, + testcase: unittest.TestCase, + rawexcinfo: _SysExcInfoType, + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, + testcase: unittest.TestCase, + reason: twisted.trial.unittest.Todo | None = None, + ) -> None: + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) + + def addSuccess(self, testcase: unittest.TestCase) -> None: + pass + + def stopTest(self, testcase: unittest.TestCase) -> None: + pass + + def addDuration(self, testcase: unittest.TestCase, elapsed: float) -> None: + pass + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + testcase = self.instance + assert testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + testcase(result=self) + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + # We need to consider if the test itself is skipped, or the whole class. + assert isinstance(self.parent, UnitTestCase) + skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj) + if self.config.getoption("usepdb") and not skipped: + self._explicit_tearDown = testcase.tearDown + setattr(testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(testcase, self.name, self.obj) + try: + testcase(result=self) + finally: + delattr(testcase, self.name) + + def _traceback_filter( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> _pytest._code.Traceback: + traceback = super()._traceback_filter(excinfo) + ntraceback = traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest"), + ) + if not ntraceback: + ntraceback = traceback + return ntraceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + # Convert unittest.SkipTest to pytest.skip. + # This is actually only needed for nose, which reuses unittest.SkipTest for + # its own nose.SkipTest. For unittest TestCases, SkipTest is already + # handled internally, and doesn't reach here. + unittest = sys.modules.get("unittest") + if unittest and call.excinfo and isinstance(call.excinfo.value, unittest.SkipTest): + excinfo = call.excinfo + call2 = CallInfo[None].from_call( + lambda: pytest.skip(str(excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) + + +def pytest_configure() -> None: + """Register the TestCaseFunction class as an IReporter if twisted.trial is available.""" + if _get_twisted_version() is not TwistedVersion.NotInstalled: + from twisted.trial.itrial import IReporter + from zope.interface import classImplements + + classImplements(TestCaseFunction, IReporter) + + +class TwistedVersion(Enum): + """ + The Twisted version installed in the environment. + + We have different workarounds in place for different versions of Twisted. + """ + + # Twisted version 24 or prior. + Version24 = auto() + # Twisted version 25 or later. + Version25 = auto() + # Twisted version is not available. + NotInstalled = auto() + + +def _get_twisted_version() -> TwistedVersion: + # We need to check if "twisted.trial.unittest" is specifically present in sys.modules. + # This is because we intend to integrate with Trial only when it's actively running + # the test suite, but not needed when only other Twisted components are in use. + if "twisted.trial.unittest" not in sys.modules: + return TwistedVersion.NotInstalled + + import importlib.metadata + + import packaging.version + + version_str = importlib.metadata.version("twisted") + version = packaging.version.parse(version_str) + if version.major <= 24: + return TwistedVersion.Version24 + else: + return TwistedVersion.Version25 + + +# Name of the attribute in `twisted.python.Failure` instances that stores +# the `sys.exc_info()` tuple. +# See twisted.trial support in `pytest_runtest_protocol`. +TWISTED_RAW_EXCINFO_ATTR = "_twisted_raw_excinfo" + + +@hookimpl(wrapper=True) +def pytest_runtest_protocol(item: Item) -> Iterator[None]: + if _get_twisted_version() is TwistedVersion.Version24: + import twisted.python.failure as ut + + # Monkeypatch `Failure.__init__` to store the raw exception info. + original__init__ = ut.Failure.__init__ + + def store_raw_exception_info( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): # pragma: no cover + if exc_value is None: + raw_exc_info = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + if exc_tb is None: + exc_tb = sys.exc_info()[2] + raw_exc_info = (exc_type, exc_value, exc_tb) + setattr(self, TWISTED_RAW_EXCINFO_ATTR, tuple(raw_exc_info)) + try: + original__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: # pragma: no cover + original__init__(self, exc_value, exc_type, exc_tb) + + with MonkeyPatch.context() as patcher: + patcher.setattr(ut.Failure, "__init__", store_raw_exception_info) + return (yield) + else: + return (yield) + + +def _handle_twisted_exc_info( + rawexcinfo: _SysExcInfoType | BaseException, +) -> _SysExcInfoType: + """ + Twisted passes a custom Failure instance to `addError()` instead of using `sys.exc_info()`. + Therefore, if `rawexcinfo` is a `Failure` instance, convert it into the equivalent `sys.exc_info()` tuple + as expected by pytest. + """ + twisted_version = _get_twisted_version() + if twisted_version is TwistedVersion.NotInstalled: + # Unfortunately, because we cannot import `twisted.python.failure` at the top of the file + # and use it in the signature, we need to use `type:ignore` here because we cannot narrow + # the type properly in the `if` statement above. + return rawexcinfo # type:ignore[return-value] + elif twisted_version is TwistedVersion.Version24: + # Twisted calls addError() passing its own classes (like `twisted.python.Failure`), which violates + # the `addError()` signature, so we extract the original `sys.exc_info()` tuple which is stored + # in the object. + if hasattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR): + saved_exc_info = getattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR) + # Delete the attribute from the original object to avoid leaks. + delattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR) + return saved_exc_info # type:ignore[no-any-return] + return rawexcinfo # type:ignore[return-value] + elif twisted_version is TwistedVersion.Version25: + if isinstance(rawexcinfo, BaseException): + import twisted.python.failure + + if isinstance(rawexcinfo, twisted.python.failure.Failure): + tb = rawexcinfo.__traceback__ + if tb is None: + tb = sys.exc_info()[2] + return type(rawexcinfo.value), rawexcinfo.value, tb + + return rawexcinfo # type:ignore[return-value] + else: + # Ideally we would use assert_never() here, but it is not available in all Python versions + # we support, plus we do not require `type_extensions` currently. + assert False, f"Unexpected Twisted version: {twisted_version}" diff --git a/.venv/lib/python3.11/site-packages/_pytest/unraisableexception.py b/.venv/lib/python3.11/site-packages/_pytest/unraisableexception.py new file mode 100644 index 0000000..0faca36 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +import collections +from collections.abc import Callable +import functools +import gc +import sys +import traceback +from typing import NamedTuple +from typing import TYPE_CHECKING +import warnings + +from _pytest.config import Config +from _pytest.nodes import Item +from _pytest.stash import StashKey +from _pytest.tracemalloc import tracemalloc_message +import pytest + + +if TYPE_CHECKING: + pass + +if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup + + +# This is a stash item and not a simple constant to allow pytester to override it. +gc_collect_iterations_key = StashKey[int]() + + +def gc_collect_harder(iterations: int) -> None: + for _ in range(iterations): + gc.collect() + + +class UnraisableMeta(NamedTuple): + msg: str + cause_msg: str + exc_value: BaseException | None + + +unraisable_exceptions: StashKey[collections.deque[UnraisableMeta | BaseException]] = ( + StashKey() +) + + +def collect_unraisable(config: Config) -> None: + pop_unraisable = config.stash[unraisable_exceptions].pop + errors: list[pytest.PytestUnraisableExceptionWarning | RuntimeError] = [] + meta = None + hook_error = None + try: + while True: + try: + meta = pop_unraisable() + except IndexError: + break + + if isinstance(meta, BaseException): + hook_error = RuntimeError("Failed to process unraisable exception") + hook_error.__cause__ = meta + errors.append(hook_error) + continue + + msg = meta.msg + try: + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + except pytest.PytestUnraisableExceptionWarning as e: + # This except happens when the warning is treated as an error (e.g. `-Werror`). + if meta.exc_value is not None: + # Exceptions have a better way to show the traceback, but + # warnings do not, so hide the traceback from the msg and + # set the cause so the traceback shows up in the right place. + e.args = (meta.cause_msg,) + e.__cause__ = meta.exc_value + errors.append(e) + + if len(errors) == 1: + raise errors[0] + if errors: + raise ExceptionGroup("multiple unraisable exception warnings", errors) + finally: + del errors, meta, hook_error + + +def cleanup( + *, config: Config, prev_hook: Callable[[sys.UnraisableHookArgs], object] +) -> None: + # A single collection doesn't necessarily collect everything. + # Constant determined experimentally by the Trio project. + gc_collect_iterations = config.stash.get(gc_collect_iterations_key, 5) + try: + try: + gc_collect_harder(gc_collect_iterations) + collect_unraisable(config) + finally: + sys.unraisablehook = prev_hook + finally: + del config.stash[unraisable_exceptions] + + +def unraisable_hook( + unraisable: sys.UnraisableHookArgs, + /, + *, + append: Callable[[UnraisableMeta | BaseException], object], +) -> None: + try: + # we need to compute these strings here as they might change after + # the unraisablehook finishes and before the metadata object is + # collected by a pytest hook + err_msg = ( + "Exception ignored in" if unraisable.err_msg is None else unraisable.err_msg + ) + summary = f"{err_msg}: {unraisable.object!r}" + traceback_message = "\n\n" + "".join( + traceback.format_exception( + unraisable.exc_type, + unraisable.exc_value, + unraisable.exc_traceback, + ) + ) + tracemalloc_tb = "\n" + tracemalloc_message(unraisable.object) + msg = summary + traceback_message + tracemalloc_tb + cause_msg = summary + tracemalloc_tb + + append( + UnraisableMeta( + msg=msg, + cause_msg=cause_msg, + exc_value=unraisable.exc_value, + ) + ) + except BaseException as e: + append(e) + # Raising this will cause the exception to be logged twice, once in our + # collect_unraisable and once by the unraisablehook calling machinery + # which is fine - this should never happen anyway and if it does + # it should probably be reported as a pytest bug. + raise + + +def pytest_configure(config: Config) -> None: + prev_hook = sys.unraisablehook + deque: collections.deque[UnraisableMeta | BaseException] = collections.deque() + config.stash[unraisable_exceptions] = deque + config.add_cleanup(functools.partial(cleanup, config=config, prev_hook=prev_hook)) + sys.unraisablehook = functools.partial(unraisable_hook, append=deque.append) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_setup(item: Item) -> None: + collect_unraisable(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_call(item: Item) -> None: + collect_unraisable(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_teardown(item: Item) -> None: + collect_unraisable(item.config) diff --git a/.venv/lib/python3.11/site-packages/_pytest/warning_types.py b/.venv/lib/python3.11/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000..5e78deb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/warning_types.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import dataclasses +import inspect +from types import FunctionType +from typing import Any +from typing import final +from typing import Generic +from typing import TypeVar +import warnings + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +class PytestRemovedIn9Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 9.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> PytestExperimentalApiWarning: + return cls(f"{apiname} is an experimental api that may change over time") + + +@final +class PytestReturnNotNoneWarning(PytestWarning): + """ + Warning emitted when a test function returns a value other than ``None``. + + See :ref:`return-not-none` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@dataclasses.dataclass +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category: type[_W] + template: str + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) + + +@final +class PytestFDWarning(PytestWarning): + """When the lsof plugin finds leaked fds.""" + + __module__ = "pytest" + + +def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None: + """ + Issue the warning :param:`message` for the definition of the given :param:`method` + + this helps to log warnings for functions defined prior to finding an issue with them + (like hook wrappers being marked in a legacy mechanism) + """ + lineno = method.__code__.co_firstlineno + filename = inspect.getfile(method) + module = method.__module__ + mod_globals = method.__globals__ + try: + warnings.warn_explicit( + message, + type(message), + filename=filename, + module=module, + registry=mod_globals.setdefault("__warningregistry__", {}), + lineno=lineno, + ) + except Warning as w: + # If warnings are errors (e.g. -Werror), location information gets lost, so we add it to the message. + raise type(w)(f"{w}\n at {filename}:{lineno}") from None diff --git a/.venv/lib/python3.11/site-packages/_pytest/warnings.py b/.venv/lib/python3.11/site-packages/_pytest/warnings.py new file mode 100644 index 0000000..806681a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_pytest/warnings.py @@ -0,0 +1,152 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Generator +from contextlib import contextmanager +from contextlib import ExitStack +import sys +from typing import Literal +import warnings + +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter +from _pytest.tracemalloc import tracemalloc_message +import pytest + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: Literal["config", "collect", "runtest"], + item: Item | None, + *, + record: bool = True, +) -> Generator[None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=record) as log: + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + # To be enabled in pytest 9.0.0. + # warnings.filterwarnings("error", category=pytest.PytestRemovedIn9Warning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + try: + yield + finally: + if record: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + return warnings.formatwarning( + str(warning_message.message), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + tracemalloc_message(warning_message.source) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + return (yield) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, object, object]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_load_initial_conftests( + early_config: Config, +) -> Generator[None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + return (yield) + + +def pytest_configure(config: Config) -> None: + with ExitStack() as stack: + stack.enter_context( + catch_warnings_for_item( + config=config, + ihook=config.hook, + when="config", + item=None, + # this disables recording because the terminalreporter has + # finished by the time it comes to reporting logged warnings + # from the end of config cleanup. So for now, this is only + # useful for setting a warning filter with an 'error' action. + record=False, + ) + ) + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", + ) + config.add_cleanup(stack.pop_all().close) diff --git a/.venv/lib/python3.11/site-packages/_yaml/__init__.py b/.venv/lib/python3.11/site-packages/_yaml/__init__.py new file mode 100644 index 0000000..7baa8c4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/METADATA b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/METADATA new file mode 100644 index 0000000..28026c3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/METADATA @@ -0,0 +1,212 @@ +Metadata-Version: 2.4 +Name: aboutcode-toolkit +Version: 11.1.1 +Summary: AboutCode-toolkit is a tool to document the provenance (origin and license) of third-party software using small text files. Collect inventories and generate attribution documentation. +Home-page: https://github.com/nexB/aboutcode-toolkit +Author: nexB. Inc. and others +Author-email: info@aboutcode.org +License: Apache-2.0 +Keywords: license,about,metadata,package,copyright,attribution,software,inventory,open source,sca,SBOM,spdx +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Documentation +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: System :: Software Distribution +Classifier: Topic :: Utilities +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: apache-2.0.LICENSE +License-File: NOTICE +License-File: AUTHORS.rst +License-File: CHANGELOG.rst +License-File: CODE_OF_CONDUCT.rst +Requires-Dist: attrs +Requires-Dist: boolean.py>=3.5 +Requires-Dist: certifi +Requires-Dist: click +Requires-Dist: jinja2 +Requires-Dist: license_expression>=0.94 +Requires-Dist: openpyxl +Requires-Dist: packageurl_python>=0.9.0 +Requires-Dist: requests +Requires-Dist: saneyaml +Provides-Extra: testing +Requires-Dist: pytest!=7.0.0,>=6; extra == "testing" +Requires-Dist: pytest-xdist>=2; extra == "testing" +Requires-Dist: twine; extra == "testing" +Requires-Dist: black; extra == "testing" +Requires-Dist: isort; extra == "testing" +Provides-Extra: docs +Requires-Dist: Sphinx>=5.0.2; extra == "docs" +Requires-Dist: sphinx-rtd-theme>=1.0.0; extra == "docs" +Requires-Dist: sphinx-reredirects>=0.1.2; extra == "docs" +Requires-Dist: doc8>=0.11.2; extra == "docs" +Dynamic: license-file + +================= +AboutCode Toolkit +================= + +Introduction +------------ +The AboutCode Toolkit and ABOUT files provide a simple way to document the +origin, license, usage and other important or interesting information about +third-party software components that you use in your project. + +You start by storing ABOUT files (a small YAML formatted text file with field/value pairs) +side-by-side with each of the third-party software components you use. +Each ABOUT file documents origin and license for one software. +There are many examples of ABOUT files (valid or invalid) in the testdata/ +directory of the whole repository. + +The current version of the AboutCode Toolkit can read these ABOUT files so that you +can collect and validate the inventory of third-party components that you use. + +In addition, this tool is able to generate attribution notices and +identify redistributable source code used in your project to help you comply +with open source licenses conditions. + +This version of the AboutCode Toolkit follows the ABOUT specification version 3.3.2 at: +https://aboutcode-toolkit.readthedocs.io/en/latest/specification.html + + +Build and tests status +---------------------- + ++-------+-----------------+--------------+ +|Branch | **Linux/macOS** | **Windows** | ++=======+=================+==============+ +|Master | |master-posix| | |master-win| | ++-------+-----------------+--------------+ +|Develop| |devel-posix| | |devel-win| | ++-------+-----------------+--------------+ + + +REQUIREMENTS +------------ +The AboutCode Toolkit is tested with Python 3.9 or above only on Linux, Mac and Windows. +You will need to install a Python interpreter if you do not have one already +installed. + +On Linux and Mac, Python is typically pre-installed. To verify which +version may be pre-installed, open a terminal and type: + + python --version + +Note +~~~~ + Debian has decided that distutils is not a core python package, so it is not included in the last versions of debian and debian-based OSes. + A solution is to run: `sudo apt install python3-distutils` + +On Windows or Mac, you can download the latest Python here: + https://www.python.org/downloads/ + +Download the .msi installer for Windows or the .dmg archive for Mac. +Open and run the installer using all the default options. + +INSTALLATION +------------ +Checkout or download and extract the AboutCode Toolkit from: + https://github.com/nexB/aboutcode-toolkit/ + +To install all the needed dependencies in a virtualenv, run (on posix): + ./configure +or on windows: + configure + +ACTIVATE the VIRTUALENV +----------------------- +To activate the virtualenv, run (on posix): + source venv/bin/activate +or on windows: + venv\\bin\\activate + + +DEACTIVATE the VIRTUALENV +------------------------- +To deactivate the virtualenv, run (on both posix and windows): + deactivate + + +VERSIONING SCHEMA +----------------- +Starting at AboutCode version 4.0.0, the AboutCode Toolkit will follow SemVer for the versioning schema. + +i.e. MAJOR.MINOR.PATCH format + 1. MAJOR version when making incompatible API changes, + 2. MINOR version when making functionality in a backwards compatible manner, and + 3. PATCH version when making backwards compatible bug fixes. + + +REFERENCE +--------- +See https://aboutcode-toolkit.readthedocs.io/en/latest/ for documentation. + +See https://aboutcode-toolkit.readthedocs.io/en/latest/reference.html for reference. + +TESTS and DEVELOPMENT +--------------------- +To install all the needed development dependencies, run (on posix): + ./configure --dev +or on windows: + configure --dev + +To verify that everything works fine you can run the test suite with: + pytest + + +CLEAN BUILD AND INSTALLED FILES +------------------------------- +To clean the built and installed files, run (on posix): + ./configure --clean +or on windows: + configure --clean + + +HELP and SUPPORT +---------------- +If you have a question or find a bug, enter a ticket at: + + https://github.com/nexB/aboutcode-toolkit + +For issues, you can use: + + https://github.com/nexB/aboutcode-toolkit/issues + + +SOURCE CODE +----------- +The AboutCode Toolkit is available through GitHub. For the latest version visit: + https://github.com/nexB/aboutcode-toolkit + + +HACKING +------- +We accept pull requests provided under the same license as this tool. +You agree to the http://developercertificate.org/ + + +LICENSE +------- +The AboutCode Toolkit is released under the Apache 2.0 license. +See (of course) the about.ABOUT file for details. + + +.. |master-posix| image:: https://api.travis-ci.org/nexB/aboutcode-toolkit.png?branch=master + :target: https://travis-ci.org/nexB/aboutcode-toolkit + :alt: Linux Master branch tests status +.. |devel-posix| image:: https://api.travis-ci.org/nexB/aboutcode-toolkit.png?branch=develop + :target: https://travis-ci.org/nexB/aboutcode-toolkit + :alt: Linux Develop branch tests status + +.. |master-win| image:: https://ci.appveyor.com/api/projects/status/uwj2gh8i9ga1mqwn/branch/master?png=true + :target: https://ci.appveyor.com/project/nexB/aboutcode-toolkit + :alt: Windows Master branch tests status +.. |devel-win| image:: https://ci.appveyor.com/api/projects/status/uwj2gh8i9ga1mqwn/branch/develop?png=true + :target: https://ci.appveyor.com/project/nexB/aboutcode-toolkit + :alt: Windows Develop branch tests status diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/RECORD b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/RECORD new file mode 100644 index 0000000..6d4d915 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/RECORD @@ -0,0 +1,40 @@ +../../../bin/about,sha256=m4lnwdlDD6lbw7IYja0bsYnUShDujZmRqNQmX6LWBD0,281 +aboutcode_toolkit-11.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +aboutcode_toolkit-11.1.1.dist-info/METADATA,sha256=8w3kUpqqmZRN4obTPyOaOn3JNzxflIb9UjewW3Jv5FU,7336 +aboutcode_toolkit-11.1.1.dist-info/RECORD,, +aboutcode_toolkit-11.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +aboutcode_toolkit-11.1.1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91 +aboutcode_toolkit-11.1.1.dist-info/entry_points.txt,sha256=CZT0HhqtyGSDg-3k31FJ-dBJCZWVriiAriiWezqkC54,50 +aboutcode_toolkit-11.1.1.dist-info/licenses/AUTHORS.rst,sha256=ZjbduGIWwLqKhp-kADJzkum26DpGrYiMS8TkJMtxTN8,88 +aboutcode_toolkit-11.1.1.dist-info/licenses/CHANGELOG.rst,sha256=grSi1Cby0Fl23z1zyfylOSQW1V2NqpBeGoQ9TfCl73w,12767 +aboutcode_toolkit-11.1.1.dist-info/licenses/CODE_OF_CONDUCT.rst,sha256=xXDTFpYY3Y7JoxrWADihieUvhGlwJc_oxsbmLJaBvE4,3422 +aboutcode_toolkit-11.1.1.dist-info/licenses/NOTICE,sha256=SM7ELF-ckyHeexaDSX7P2EqDbjYkb6S8c_CWcsdMyXM,752 +aboutcode_toolkit-11.1.1.dist-info/licenses/apache-2.0.LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558 +aboutcode_toolkit-11.1.1.dist-info/top_level.txt,sha256=AZqc0bTnzp-gGSKh0dHQ6HyO7yAidVQFShB88ntF_yA,14 +attributecode/__init__.py,sha256=Gp1kQdT1M5P9Sqtsrqu5k4ICYfaH81u0jXC90hHPp9E,3524 +attributecode/__main__.py,sha256=vQg4laOfHZB8a66tnRd2sjgwnTDPPN7dF3oYJJErFQg,921 +attributecode/__pycache__/__init__.cpython-311.pyc,, +attributecode/__pycache__/__main__.cpython-311.pyc,, +attributecode/__pycache__/api.cpython-311.pyc,, +attributecode/__pycache__/attrib.cpython-311.pyc,, +attributecode/__pycache__/attrib_util.cpython-311.pyc,, +attributecode/__pycache__/cmd.cpython-311.pyc,, +attributecode/__pycache__/gen.cpython-311.pyc,, +attributecode/__pycache__/licenses.cpython-311.pyc,, +attributecode/__pycache__/model.cpython-311.pyc,, +attributecode/__pycache__/transform.cpython-311.pyc,, +attributecode/__pycache__/util.cpython-311.pyc,, +attributecode/api.py,sha256=kjMEd4KyVQBT5tKWtShry2px0uoD8klS0OqUZEpuTZI,3343 +attributecode/attrib.py,sha256=LO9jPbAJcuo3L9FLlTUnt3h1HdjfeA_EiwUDqkC7X-E,13758 +attributecode/attrib_util.py,sha256=Jf3CeUQtO--RIlRWIPgICp6rUpILChgyY_dO0yugHkE,4274 +attributecode/cmd.py,sha256=G_NGNS97Lf6GxhU5FRG9ai0ZxYdtth8XeGfav_ak1R0,36036 +attributecode/gen.py,sha256=nI-0MNWBePEZwUFpUBDYEt4Tym3K1q6F_ZwSnVCZJ0g,14291 +attributecode/licenses.py,sha256=jSDAc9VlMhMwS_ciwdkvBFNFUMRPO2uxojT6SVctCKA,2050 +attributecode/model.py,sha256=6AqpGZvhSIRnk68l8x_Rz-wwYPwsB-mJlpUdRyAdGhQ,86303 +attributecode/templates/default_html.template,sha256=Gguh-iEGZ7WPWgh6TXjEDe20ixz0SVljtqMlSHjexRE,3368 +attributecode/templates/default_json.template,sha256=XWkJ49WUZhOTlU11ETDG6zsjClvwN_a6vzwnZg6KPbQ,768 +attributecode/templates/license_ref.template,sha256=Kmrdw1aCAGQNKhuLqubiEKOL6DBZO8zJm2xIgZaZRc4,2631 +attributecode/templates/list.csv,sha256=60O3BXox2NwRQPI8k9-LxfnactTLuw1B1tuqx0WkWOw,165 +attributecode/templates/scancode_html.template,sha256=_YhH0dOh2zDNlubXXyCi_CgglCpfz_CrfzFB4VXNZ1U,3296 +attributecode/transform.py,sha256=1hEcJiip6rrPGVj4viYxR6pjGsnFdAsCCjSZtphwlhY,14865 +attributecode/util.py,sha256=VeCfVamZIqnBtUPhv1tErxihEmtOlz5Ki5r6i8tZiCg,28425 diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/WHEEL new file mode 100644 index 0000000..1eb3c49 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (78.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/entry_points.txt b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/entry_points.txt new file mode 100644 index 0000000..052318b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +about = attributecode.cmd:about diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/AUTHORS.rst b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/AUTHORS.rst new file mode 100644 index 0000000..2e91982 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/AUTHORS.rst @@ -0,0 +1,3 @@ +The following organizations or individuals have contributed to this repo: + +- nexB, Inc. diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/CHANGELOG.rst b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/CHANGELOG.rst new file mode 100644 index 0000000..3bd5cb0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/CHANGELOG.rst @@ -0,0 +1,433 @@ +============================== +Changelog + +2025-03-31 + Release 11.1.1 + + * Deleted unused files + * Updated AppVeyor configuration to use Python 3.9 + * Modified the PyPI release script to utilize upload-artifact@v4 + + +2025-03-28 + Release 11.1.0 + + * Drop support for python version earlier than 3.9 + * Add ability to "exclude" path in the check and inventory #583 + * Add support for the special '-' FILE to print to on screen/to stdout in inventory #584 + + +2024-09-16 + Release 11.0.2 + + * Fixed the installation issues with docker (#571, #572) + + +2024-08-06 + Release 11.0.1 + + * Update doc formatting + * Added fields type and types description into the spec + * Update link references of ownership from nexB to aboutcode-org + + +2024-07-15 + Release 11.0.0 + + * Add character support (at most 2 characters) for `attribute` field + * Strip empty newline characters when loading an inventory + * Catch invalid license_expression + * Update the specification to 3.3.2 + * Support "declared_license_expression" and "other_license_expression" + * Updated "about_resource" to be an optional field + * Updated spec to v4.0.0 as moving `about_resource` from mandatory to optional + + +2023-09-25 + Release 10.1.0 + + * Fixed `transform` with nested list #531 + * Added curl dependency in Dockerfile #532 + * Introduce spdx_license_expression + * Ability to transform spdx license key from spdx_license_expression to + license_expression (i.e. Generate attribution with + spdx_license_expression) #513 + * Ability to configure the proxy settings #533 + * Fixed licenses issue #534 + +2023-08-20 + Release 10.0.0 + + * Fixd error in load_json in util.py + * Code cleanup + * Work with the SCTK version 32 and later (Drop support for SCTK version 31 and earlier) + * Implement `--scancode` option for `gen` + + +2023-07-14 + Release 9.0.0 + + * The tool will now show which worksheet (if .xlsx input) is the tool working on + * Error handling if defined worksheet does not exist + * Adopt 3.3.1 specification: introduce ``ignored_resources`` + + +2023-03-09 + Release 8.0.0 + + * Fixed the transform code for xlsx and json + * Remove irrelevant error for attrib + * Add support to identify worksheet name for XLSX input + * The severity error level for "contains illegal name characters (or empty spaces) and is ignored" is changed from ERROR to WARNING + * Remove the limitation to ASCII only + * Drop support for python3.6 + * Update valid chatacters for file/path name + * Adopt 3.3.0 specification + + +2022-10-24 + Release 7.2.0 + + * Add option to validate `license_expression` in the `check` command + + +2022-10-24 + Release 7.1.1 + + * This new release has no feature changes. + + +2022-10-24 + Release 7.1.0 + + * Fixed version mismatch (https://github.com/nexB/aboutcode-toolkit/issues/510) + * Improve `check` performance (https://github.com/nexB/aboutcode-toolkit/issues/511) + * Relax the requirement to have the same format for input and output for `transform` + * Collect and handle the "matched_text" from the `--license-text` option from the scancode-toolkit + + +2022-03-21 + Release 7.0.2 + + * Relax dependency constraints + * Use latest skeleton and settings + + +2022-03-21 + Release 7.0.1 + + * Bump openpyxl to 3.0.9 + * Better handling of invalid API URL + * Better formatting for default HTML template + +2022-03-01 + Release 7.0.0 + + * Add '@' as a supported character for filename #451 + * Add support to collect redistributable sources #22 + * Handle trailing spaces in field names during `transform` #456 + * Remove restriction of python27 only on windows #453 + * Documentation updated + * Code enhancement + * Remove thirdparty/ + * Update configuration scripts + * Use readthedocs for documentation + * Add Dockerfile to run aboutcode with docker + * Add new option to choose extract license from ScanCode LicenseDB or DJC License Library + * Add ability to transform XLSX file + * Support XLSX file format for `inventory`, `gen` and `attrib` + * Add 'spdx_license_key' support + * Add option to save error log in `check` command + * New `gen_license` option + * Bump PyYAML to 6.0 + * Add '%" as a supported character + * Update default template + * All errors are logged if and only if the `verbose` option is set. Otherwise, ony 'Critical' and 'Warning' errors will be showed/logged + * Ability to generate attribution notice directly from an input inventory + * Remove the restriction of requiring 'about_resource' field in the input if performing `attrib` from an inventory + +2021-04-02 + Release 6.0.0 + + * This new release has no feature changes. + * It has relaxed PyYAML dependencies and drop Python 2 support + +2020-09-01 + Release 5.1.0 + + * Add support for `package_url` #396 + * Fixed #443 and #444 issue with multiple licenses/license_files + * Fixed #442 no special characters allowed for `license_key`, `license_name` and `license_expression` + * Fixed #446 Better error handling + +2020-08-11 + Release 5.0.0 + + * Enhance the `transform` to also work with JSON file + * Update transform code (See #427 and #428) + * Fixed #431 - Error handling for empty "_file" fields + * Fixed #432 - Handled UTF-8 variant invented by Microsoft + * Fixed #433 - problem was caused by the different multi-lic file format between json and CSV (CSV with '\n' line break) + * Fixed #436 - issue about copy with the `--reference` option + * Fixed #396 - support for alternative output for Android + +2020-05-05 + Release 4.0.2 + + * Upgrade license-expression library to v1.2 + * Fix the missing `multi_sort` filter for Jinja2 + * Update help text for `--vartext` + +2019-10-17 + Release 4.0.1 + + * Declare to follow SemVer for versioning schema + * Update REFERENCE.rst and README.rst + * Update license-expression library + + +2019-10-09 + + Release 4.0.0 + + * Support filenames/path with special characters #310 #378 #392 + * Update ABOUT file format to match the specification + * Log version of which AbcTK was used #397 + * Fix the licenses (key, name, file) not in sync issue #406 + * Correct invalid msg for boolean fields #403 + * Remove the `about_file_path` key/column from input/output #364 + * Use ',' to support multiple files #404 + * Fix bugs in `transform` #408, #412 + +2018-11-15 + + Release 3.3.0 + + * Update the list of common license keys + * New UrlListField introduced for list of urls + * The UrlField is now only taking single URL value + * The owner is now a StringField instead of ListField + * Format the ordering of the generated ABOUT file (See https://github.com/nexB/aboutcode-toolkit/issues/349#issuecomment-438871444) + * '+' and '(' and ')' is now supported in license_expression + * The key 'about_resource_path' is removed + * Revert back the requirement of the 'name' field + * Add a new supported 'internal_use_only' key + +2018-10-23 + + Release 3.2.2 + + * Fix the version number + * `name` field is no longer a required field + +2018-10-22 + + Release 3.2.1 + + * The 'license' field is now become 'license_key' + * Multiple licenses support + * No support for referenceing multuiple resources + * Fix the incorrect boolean field behaviors + * Display number of errors/warnings in the error log + +2018-09-19 + + Release 3.1.3 + + * Minor update + +2018-09-19 + + Release 3.1.2 + + * New `--vartext` option for `attrib` + * Add support for `checksum_sha256` and `author_file` + * `check` command will not count INFO message as error when `--verbose` is set + * Update `track_change` to `track_changes` + * New `--filter` and `--mapping-output` options for `inventory` + +2018-6-25 + + Release 3.1.1 + + * No support of multiple occurrence keys in the input + * Updated the specification document + * Fixed bug that cause template processing error in attrib + + etc... + + +2018-6-8 Chin-Yeung Li + + Release 3.1.0 + + * Fixed JSON input from AboutCode manger export and ScanCode output + * Added a new option `mapping-file` to support using a custom file for mapping + * Change the name of the option `--show-all` to `--verbose` + * Better error handling for copying file with permission issue + * Support timestamp in attribution output + + etc... + + +2017-11-17 Chin-Yeung Li + + Release 3.0.* + + ABOUT files is now YAML formatted. + Supported license expression. + Supported JSON input and output format: https://github.com/nexB/aboutcode-toolkit/issues/246 and https://github.com/nexB/aboutcode-toolkit/issues/277 + Support Python 3: https://github.com/nexB/aboutcode-toolkit/issues/280 + Refined help texts + Refined USAGE texts + Refined SPECs + + Input key changes: + ================== + `about_file` is replaced by `about_file_path` + `dje_license_key` is replaced by `license_expression` + `version` is no longer a required field + `home_url` is now `homepage_url` + + API Updated: + ============ + - Break down the 3 major functions: `inventory`, `genabout` and `genattrib` into 3 subcommands: + i.e. + `about inventory`, `about generate` and `about attrib` + + - A new `check` subcommand: https://github.com/nexB/aboutcode-toolkit/issues/281 + + - Some options changes + `--extract_license` becomes `--fetch-license` + `--license_text_location` becomes `--license-notice-text-location` + + etc... + + +2016-06-27 Chin-Yeung Li + + Release 2.3.2 + + * Documentation updates + + +2016-03-29 Philippe Ombredanne + + Release 2.3.1 + + * Various minor bug fixes and improvements + * Support for the latest DejaCode API if you use DejaCode + + +2016-03-28 Philippe Ombredanne + + Release 2.3.0 + + * Various minor bug fixes and improvements + * Support for the latest DejaCode API if you use DejaCode + + +2015-10-23 Chin-Yeung Li + + Release 2.2.0 + + * Improved CLI error messages + * Fixed the filtering of dicts with empty values. + * Refined help texts + * Updated configure script + * Refactorings and code simplifications + * Fixed misleading error message when using invalid api_url + + +2015-10-09 Chin-Yeung Li + + Release 2.1.0 + + * Minor code refactoring + * Handle long path error on Windows OS when using genattrib with a zip + + +2015-09-29 Chin-Yeung Li + + Release 2.0.4 + + * Added support to run genattrib with a zip file and tests + * Display a "Completed" message once the generation is completed + + +2015-08-01 Chin-Yeung Li + + Release 2.0.3 + + * Fix the bug of using genattrib.py on Windows OS + * Display version when running about.py, genabout.py and genattrib.py + + +2015-07-07 Chin-Yeung Li + + Release 2.0.2 + + * Handle input's encoding issues + * Better error handling + * Writing to and reading from Windows OS with paths > 255 chars + + +2015-06-09 Chin-Yeung Li + + Release 2.0.1 + + * Configuration script fixes and updates basic documentation. + + +2015-05-05 Chin-Yeung Li + + Release 2.0.0 + + * Breaking API changes: + + * the dje_license field has been renamed to dje_license_key + * when a dje_license_key is present, a new dje_license_url will be + reported when fetching data from the DejaCode API. + * In genabout, the '--all_in_one' command line option has been removed. + It was not well specified and did not work as advertised. + + * in genattrib: + + * the Component List is now optional. + * there is a new experimental '--verification_location' command line + option. This option will be removed in the future version. Do not use + it. + * the '+' character is now supported in file names. + * several bugs have been fixed. + * error handling and error and warning reporting have been improved. + * New documentation in doc: UsingAboutCodetoDocumentYourSoftwareAssets.pdf + + +2014-11-05 Philippe Ombredanne + + Release 1.0.2 + + * Minor bug fixes and improved error reporting. + + +2014-11-03 Philippe Ombredanne + + Release 1.0.1 + + * Minor bug fixes, such as extraneous debug printouts. + + +2014-10-31 Philippe Ombredanne + + Release 1.0.0 + + * Some changes in the spec, such as supporting only text in external + files. + * Several refinements including support for common licenses. + +2014-06-24 Chin-Yeung Li + + Release 0.8.1 + + * Initial release with minimal capabilities to read and validate + ABOUT files format 0.8.0 and output a CSV inventory. diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/CODE_OF_CONDUCT.rst b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/CODE_OF_CONDUCT.rst new file mode 100644 index 0000000..590ba19 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/CODE_OF_CONDUCT.rst @@ -0,0 +1,86 @@ +Contributor Covenant Code of Conduct +==================================== + +Our Pledge +---------- + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our +project and our community a harassment-free experience for everyone, +regardless of age, body size, disability, ethnicity, gender identity and +expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +Our Standards +------------- + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual + attention or advances +- Trolling, insulting/derogatory comments, and personal or political + attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or + electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +Our Responsibilities +-------------------- + +Project maintainers are responsible for clarifying the standards of +acceptable behavior and are expected to take appropriate and fair +corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, +or reject comments, commits, code, wiki edits, issues, and other +contributions that are not aligned to this Code of Conduct, or to ban +temporarily or permanently any contributor for other behaviors that they +deem inappropriate, threatening, offensive, or harmful. + +Scope +----- + +This Code of Conduct applies both within project spaces and in public +spaces when an individual is representing the project or its community. +Examples of representing a project or community include using an +official project e-mail address, posting via an official social media +account, or acting as an appointed representative at an online or +offline event. Representation of a project may be further defined and +clarified by project maintainers. + +Enforcement +----------- + +Instances of abusive, harassing, or otherwise unacceptable behavior may +be reported by contacting the project team at pombredanne@gmail.com +or on the Gitter chat channel at https://gitter.im/aboutcode-org/discuss . +All complaints will be reviewed and investigated and will result in a +response that is deemed necessary and appropriate to the circumstances. +The project team is obligated to maintain confidentiality with regard to +the reporter of an incident. Further details of specific enforcement +policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in +good faith may face temporary or permanent repercussions as determined +by other members of the project’s leadership. + +Attribution +----------- + +This Code of Conduct is adapted from the `Contributor Covenant`_ , +version 1.4, available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +.. _Contributor Covenant: https://www.contributor-covenant.org diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/NOTICE b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/NOTICE new file mode 100644 index 0000000..65936b2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/NOTICE @@ -0,0 +1,19 @@ +# +# Copyright (c) nexB Inc. and others. +# SPDX-License-Identifier: Apache-2.0 +# +# Visit https://aboutcode.org and https://github.com/nexB/ for support and download. +# ScanCode is a trademark of nexB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/apache-2.0.LICENSE b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/apache-2.0.LICENSE new file mode 100644 index 0000000..29f81d8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/licenses/apache-2.0.LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/top_level.txt new file mode 100644 index 0000000..6212f86 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/aboutcode_toolkit-11.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +attributecode diff --git a/.venv/lib/python3.11/site-packages/attr/__init__.py b/.venv/lib/python3.11/site-packages/attr/__init__.py new file mode 100644 index 0000000..5c6e065 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/__init__.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: MIT + +""" +Classes Without Boilerplate +""" + +from functools import partial +from typing import Callable, Literal, Protocol + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Converter, + Factory, + _Nothing, + attrib, + attrs, + evolve, + fields, + fields_dict, + make_class, + validate, +) +from ._next_gen import define, field, frozen, mutable +from ._version_info import VersionInfo + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + + +class AttrsInstance(Protocol): + pass + + +NothingType = Literal[_Nothing.NOTHING] + +__all__ = [ + "NOTHING", + "Attribute", + "AttrsInstance", + "Converter", + "Factory", + "NothingType", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "get_run_validators", + "has", + "ib", + "make_class", + "mutable", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + + +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + if name not in ("__version__", "__version_info__"): + msg = f"module {mod_name} has no attribute {name}" + raise AttributeError(msg) + + from importlib.metadata import metadata + + meta = metadata("attrs") + + if name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + + return meta["version"] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/.venv/lib/python3.11/site-packages/attr/__init__.pyi b/.venv/lib/python3.11/site-packages/attr/__init__.pyi new file mode 100644 index 0000000..8d78fa1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/__init__.pyi @@ -0,0 +1,389 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Generic, + Literal, + Mapping, + Protocol, + Sequence, + TypeVar, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo +from attrs import ( + define as define, + field as field, + mutable as mutable, + frozen as frozen, + _EqOrderType, + _ValidatorType, + _ConverterType, + _ReprArgType, + _OnSetAttrType, + _OnSetAttrArgType, + _FieldTransformer, + _ValidatorArgType, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard, TypeAlias +else: + from typing_extensions import TypeGuard, TypeAlias + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_FilterType = Callable[["Attribute[_T]", _T], bool] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=type[AttrsInstance]) + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING +NothingType: TypeAlias = Literal[_Nothing.NOTHING] + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. + +@overload +def Factory(factory: Callable[[], _T]) -> _T: ... +@overload +def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], +) -> _T: ... +@overload +def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], +) -> _T: ... + +In = TypeVar("In") +Out = TypeVar("Out") + +class Converter(Generic[In, Out]): + @overload + def __init__(self, converter: Callable[[In], Out]) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance, Attribute], Out], + *, + takes_self: Literal[True], + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, Attribute], Out], + *, + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance], Out], + *, + takes_self: Literal[True], + ) -> None: ... + +class Attribute(Generic[_T]): + name: str + default: _T | None + validator: _ValidatorType[_T] | None + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: bool | None + init: bool + converter: Converter | None + metadata: dict[Any, Any] + type: type[_T] | None + kw_only: bool + on_setattr: _OnSetAttrType + alias: str | None + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: object = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: _C, + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> _C: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: type[AttrsInstance]) -> Any: ... +def fields_dict(cls: type[AttrsInstance]) -> dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: dict[str, Any] | None = ..., + localns: dict[str, Any] | None = ..., + attribs: list[Attribute[Any]] | None = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: list[str] | tuple[str, ...] | dict[str, Any], + bases: tuple[type, ...] = ..., + class_body: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + collect_by_mro: bool = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + dict_factory: type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Callable[[type, Attribute[Any], Any], Any] | None = ..., + tuple_keys: bool | None = ..., +) -> dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + tuple_factory: type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/.venv/lib/python3.11/site-packages/attr/_cmp.py b/.venv/lib/python3.11/site-packages/attr/_cmp.py new file mode 100644 index 0000000..09bab49 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_cmp.py @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import __ne__ + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + Args: + eq (typing.Callable | None): + Callable used to evaluate equality of two objects. + + lt (typing.Callable | None): + Callable used to evaluate whether one object is less than another + object. + + le (typing.Callable | None): + Callable used to evaluate whether one object is less than or equal + to another object. + + gt (typing.Callable | None): + Callable used to evaluate whether one object is greater than + another object. + + ge (typing.Callable | None): + Callable used to evaluate whether one object is greater than or + equal to another object. + + require_same_type (bool): + When `True`, equality and ordering methods will return + `NotImplemented` if objects are not of the same type. + + class_name (str | None): Name of class. Defaults to "Comparable". + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = __ne__ + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + msg = "eq must be define is order to complete ordering from lt, le, gt, ge." + raise ValueError(msg) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + return all(func(self, other) for func in self._requirements) + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/.venv/lib/python3.11/site-packages/attr/_cmp.pyi b/.venv/lib/python3.11/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000..cc7893b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: _CompareWithType | None = ..., + lt: _CompareWithType | None = ..., + le: _CompareWithType | None = ..., + gt: _CompareWithType | None = ..., + ge: _CompareWithType | None = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> type: ... diff --git a/.venv/lib/python3.11/site-packages/attr/_compat.py b/.venv/lib/python3.11/site-packages/attr/_compat.py new file mode 100644 index 0000000..bc68ed9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_compat.py @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: MIT + +import inspect +import platform +import sys +import threading + +from collections.abc import Mapping, Sequence # noqa: F401 +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_10_PLUS = sys.version_info[:2] >= (3, 10) +PY_3_11_PLUS = sys.version_info[:2] >= (3, 11) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) +PY_3_13_PLUS = sys.version_info[:2] >= (3, 13) +PY_3_14_PLUS = sys.version_info[:2] >= (3, 14) + + +if PY_3_14_PLUS: + import annotationlib + + # We request forward-ref annotations to not break in the presence of + # forward references. + + def _get_annotations(cls): + return annotationlib.get_annotations( + cls, format=annotationlib.Format.FORWARDREF + ) + +else: + + def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + return cls.__dict__.get("__annotations__", {}) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/.venv/lib/python3.11/site-packages/attr/_config.py b/.venv/lib/python3.11/site-packages/attr/_config.py new file mode 100644 index 0000000..4b25772 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + +__all__ = ["get_run_validators", "set_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + msg = "'run' must be bool." + raise TypeError(msg) + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/.venv/lib/python3.11/site-packages/attr/_funcs.py b/.venv/lib/python3.11/site-packages/attr/_funcs.py new file mode 100644 index 0000000..1adb500 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_funcs.py @@ -0,0 +1,497 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import get_generic_base +from ._make import _OBJ_SETATTR, NOTHING, fields +from .exceptions import AttrsAttributeNotFoundError + + +_ATOMIC_TYPES = frozenset( + { + type(None), + bool, + int, + float, + str, + complex, + bytes, + type(...), + type, + range, + property, + } +) + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + dict_factory (~typing.Callable): + A callable to produce dictionaries from. For example, to produce + ordered dictionaries instead of normal Python dictionaries, pass in + ``collections.OrderedDict``. + + retain_collection_types (bool): + Do not convert to `list` when encountering an attribute whose type + is `tuple` or `set`. Only meaningful if *recurse* is `True`. + + value_serializer (typing.Callable | None): + A hook that is called for every attribute or dict key/value. It + receives the current instance, field and value and must return the + (updated) value. The hook is run *after* the optional *filter* has + been applied. + + Returns: + Return type of *dict_factory*. + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 + If a dict has a collection for a key, it is serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + value_type = type(v) + if value_type in _ATOMIC_TYPES: + rv[a.name] = v + elif has(value_type): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif issubclass(value_type, (tuple, list, set, frozenset)): + cf = value_type if retain_collection_types is True else list + items = [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + try: + rv[a.name] = cf(items) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv[a.name] = cf(*items) + elif issubclass(value_type, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + val_type = type(val) + if val_type in _ATOMIC_TYPES: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + elif getattr(val_type, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif issubclass(val_type, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif issubclass(val_type, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): + Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + tuple_factory (~typing.Callable): + A callable to produce tuples from. For example, to produce lists + instead of tuples. + + retain_collection_types (bool): + Do not convert to `list` or `dict` when encountering an attribute + which type is `tuple`, `dict` or `set`. Only meaningful if + *recurse* is `True`. + + Returns: + Return type of *tuple_factory* + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + value_type = type(v) + if recurse is True: + if value_type in _ATOMIC_TYPES: + rv.append(v) + elif has(value_type): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif issubclass(value_type, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + items = [ + ( + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + ) + for j in v + ] + try: + rv.append(cf(items)) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv.append(cf(*items)) + elif issubclass(value_type, dict): + df = value_type if retain is True else dict + rv.append( + df( + ( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk + ), + ( + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv + ), + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + Returns: + bool: + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + Args: + inst: Instance of a class with *attrs* attributes. + + changes: Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + attrs.exceptions.AttrsAttributeNotFoundError: + If *attr_name* couldn't be found on *cls*. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. This function will not be + removed du to the slightly different approach compared to + `attrs.evolve`, though. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + msg = f"{k} is not an attrs attribute on {new.__class__}." + raise AttrsAttributeNotFoundError(msg) + _OBJ_SETATTR(new, k, v) + return new + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in :class:`Attribute`'s + *type* field. In other words, you don't need to resolve your types if you + only use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, for example, if the name only + exists inside a method, you may pass *globalns* or *localns* to specify + other dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + Args: + cls (type): Class to resolve. + + globalns (dict | None): Dictionary containing global variables. + + localns (dict | None): Dictionary containing local variables. + + attribs (list | None): + List of attribs for the given class. This is necessary when calling + from inside a ``field_transformer`` since *cls* is not an *attrs* + class yet. + + include_extras (bool): + Resolve more accurately, if possible. Pass ``include_extras`` to + ``typing.get_hints``, if supported by the typing module. On + supported Python versions (3.9+), this resolves the types more + accurately. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class and you didn't pass any attribs. + + NameError: If types cannot be resolved because of missing variables. + + Returns: + *cls* so you can use this function also as a class decorator. Please + note that you have to apply it **after** `attrs.define`. That means the + decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = { + "globalns": globalns, + "localns": localns, + "include_extras": include_extras, + } + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _OBJ_SETATTR(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/.venv/lib/python3.11/site-packages/attr/_make.py b/.venv/lib/python3.11/site-packages/attr/_make.py new file mode 100644 index 0000000..d24d9ba --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_make.py @@ -0,0 +1,3362 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +import abc +import contextlib +import copy +import enum +import inspect +import itertools +import linecache +import sys +import types +import unicodedata +import weakref + +from collections.abc import Callable, Mapping +from functools import cached_property +from typing import Any, NamedTuple, TypeVar + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY_3_10_PLUS, + PY_3_11_PLUS, + PY_3_13_PLUS, + _AnnotationExtractor, + _get_annotations, + get_generic_base, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_OBJ_SETATTR = object.__setattr__ +_INIT_FACTORY_PAT = "__attr_factory_%s" +_CLASSVAR_PREFIXES = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_HASH_CACHE_FIELD = "_attrs_cached_hash" + +_EMPTY_METADATA_SINGLETON = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_SENTINEL = object() + +_DEFAULT_ON_SETATTR = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when `None` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when `None` is ambiguous. + +When using in 3rd party code, use `attrs.NothingType` for type annotations. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since `None` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): # noqa: B008 + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=None, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new field / attribute on a class. + + Identical to `attrs.field`, except it's not keyword-only. + + Consider using `attrs.field` in new code (``attr.ib`` will *never* go away, + though). + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attr.s` (or similar)! + + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is `None` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 + *converter* as a replacement for the deprecated *convert* to achieve + consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + .. versionchanged:: 25.4.0 + *kw_only* can now be None, and its default is also changed from False to + None. + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if factory is not None: + if default is not NOTHING: + msg = ( + "The `default` and `factory` arguments are mutually exclusive." + ) + raise ValueError(msg) + if not callable(factory): + msg = "The `factory` argument must be a callable." + raise ValueError(msg) + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval( + script: str, + globs: dict[str, Any] | None, + locs: Mapping[str, object] | None = None, + filename: str = "", +) -> None: + """ + Evaluate the script with the given global (globs) and local (locs) + variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _linecache_and_compile( + script: str, + filename: str, + globs: dict[str, Any] | None, + locals: Mapping[str, object] | None = None, +) -> dict[str, Any]: + """ + Cache the script with _linecache_, compile it and return the _locals_. + """ + + locs = {} if locals is None else locals + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs + + +def _make_attr_tuple_class(cls_name: str, attr_names: list[str]) -> type: + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + body = {} + for i, attr_name in enumerate(attr_names): + + def getter(self, i=i): + return self[i] + + body[attr_name] = property(getter) + return type(attr_class_name, (tuple,), body) + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +class _Attributes(NamedTuple): + attrs: type + base_attrs: list[Attribute] + base_attrs_map: dict[str, type] + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_CLASSVAR_PREFIXES) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + return attrib_name in cls.__dict__ + + +def _collect_base_attrs( + cls, taken_attr_names +) -> tuple[list[Attribute], dict[str, type]]: + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, +) -> _Attributes: + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + If *collect_by_mro* is True, collect them in the correct MRO order, + otherwise use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = list(these.items()) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if attr.__class__ is _CountingAttr + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if a.__class__ is not _CountingAttr: + a = attrib(a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if unannotated: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if attr.__class__ is _CountingAttr + ), + key=lambda e: e[1].counter, + ) + + fca = Attribute.from_counting_attr + no = ClassProps.KeywordOnly.NO + own_attrs = [ + fca( + attr_name, + ca, + kw_only is not no, + anns.get(attr_name), + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only is ClassProps.KeywordOnly.FORCE: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + if field_transformer is not None: + attrs = tuple(field_transformer(cls, attrs)) + + # Check attr order after executing the field_transformer. + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + msg = f"No mandatory attributes allowed after an attribute with a default value or factory. Attribute in question: {a!r}" + raise ValueError(msg) + + if had_default is False and a.default is not NOTHING: + had_default = True + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + for a in attrs: + if not a.alias: + # Evolve is very slow, so we hold our nose and do it dirty. + _OBJ_SETATTR.__get__(a)("alias", _default_init_alias_for(a.name)) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes(AttrsClass(attrs), base_attrs, base_attr_map) + + +def _make_cached_property_getattr(cached_properties, original_getattr, cls): + lines = [ + # Wrapped to get `__class__` into closure cell for super() + # (It will be replaced with the newly constructed class after construction). + "def wrapper(_cls):", + " __class__ = _cls", + " def __getattr__(self, item, cached_properties=cached_properties, original_getattr=original_getattr, _cached_setattr_get=_cached_setattr_get):", + " func = cached_properties.get(item)", + " if func is not None:", + " result = func(self)", + " _setter = _cached_setattr_get(self)", + " _setter(item, result)", + " return result", + ] + if original_getattr is not None: + lines.append( + " return original_getattr(self, item)", + ) + else: + lines.extend( + [ + " try:", + " return super().__getattribute__(item)", + " except AttributeError:", + " if not hasattr(super(), '__getattr__'):", + " raise", + " return super().__getattr__(item)", + " original_error = f\"'{self.__class__.__name__}' object has no attribute '{item}'\"", + " raise AttributeError(original_error)", + ] + ) + + lines.extend( + [ + " return __getattr__", + "__getattr__ = wrapper(_cls)", + ] + ) + + unique_filename = _generate_unique_filename(cls, "getattr") + + glob = { + "cached_properties": cached_properties, + "_cached_setattr_get": _OBJ_SETATTR.__get__, + "original_getattr": original_getattr, + } + + return _linecache_and_compile( + "\n".join(lines), unique_filename, glob, locals={"_cls": cls} + )["__getattr__"] + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + "__suppress_context__", + "__notes__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + if isinstance(self, BaseException) and name in ("__notes__",): + BaseException.__delattr__(self, name) + return + + raise FrozenInstanceError + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + .. tip:: + + On Python 3.13 and later, you can also use `copy.replace` instead. + + Args: + + inst: + Instance of a class with *attrs* attributes. *inst* must be passed + as a positional argument. + + changes: + Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + TypeError: + If *attr_name* couldn't be found in the class ``__init__``. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + .. versionchanged:: 24.1.0 + *inst* can't be passed as a keyword argument anymore. + """ + try: + (inst,) = args + except ValueError: + msg = ( + f"evolve() takes 1 positional argument, but {len(args)} were given" + ) + raise TypeError(msg) from None + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_add_method_dunders", + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_custom_setattr", + "_has_post_init", + "_has_pre_init", + "_is_exc", + "_on_setattr", + "_pre_init_has_args", + "_repr_added", + "_script_snippets", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + ) + + def __init__( + self, + cls: type, + these, + auto_attribs: bool, + props: ClassProps, + has_custom_setattr: bool, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + props.kw_only, + props.collected_fields_by_mro, + props.field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if props.is_slotted else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = props.is_slotted + self._frozen = props.is_frozen + self._weakref_slot = props.has_weakref_slot + self._cache_hash = ( + props.hashability is ClassProps.Hashability.HASHABLE_CACHED + ) + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._pre_init_has_args = False + if self._has_pre_init: + # Check if the pre init method has more arguments than just `self` + # We want to pass arguments if pre init expects arguments + pre_init_func = cls.__attrs_pre_init__ + pre_init_signature = inspect.signature(pre_init_func) + self._pre_init_has_args = len(pre_init_signature.parameters) > 1 + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = props.is_exception + self._on_setattr = props.on_setattr_hook + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + self._cls_dict["__attrs_props__"] = props + + if props.is_frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif self._on_setattr in ( + _DEFAULT_ON_SETATTR, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + self._on_setattr == _DEFAULT_ON_SETATTR + and not (has_validator or has_converter) + ) + or (self._on_setattr == setters.validate and not has_validator) + or (self._on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if props.added_pickling: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + # tuples of script, globs, hook + self._script_snippets: list[ + tuple[str, dict, Callable[[dict, dict], Any]] + ] = [] + self._repr_added = False + + # We want to only do this check once; in 99.9% of cases these + # exist. + if not hasattr(self._cls, "__module__") or not hasattr( + self._cls, "__qualname__" + ): + self._add_method_dunders = self._add_method_dunders_safe + else: + self._add_method_dunders = self._add_method_dunders_unsafe + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + def _eval_snippets(self) -> None: + """ + Evaluate any registered snippets in one go. + """ + script = "\n".join([snippet[0] for snippet in self._script_snippets]) + globs = {} + for _, snippet_globs, _ in self._script_snippets: + globs.update(snippet_globs) + + locs = _linecache_and_compile( + script, + _generate_unique_filename(self._cls, "methods"), + globs, + ) + + for _, _, hook in self._script_snippets: + hook(self._cls_dict, locs) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + self._eval_snippets() + if self._slots is True: + cls = self._create_slots_class() + self._cls.__attrs_base_of_slotted__ = weakref.ref(cls) + else: + cls = self._patch_original_class() + if PY_3_10_PLUS: + cls = abc.update_abstractmethods(cls) + + # The method gets only called if it's not inherited from a base class. + # _has_own_attribute does NOT work properly for classmethods. + if ( + getattr(cls, "__attrs_init_subclass__", None) + and "__attrs_init_subclass__" not in cls.__dict__ + ): + cls.__attrs_init_subclass__() + + return cls + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _SENTINEL) is not _SENTINEL + ): + # An AttributeError can happen if a base class defines a + # class variable and we want to set an attribute with the + # same name by using only a type annotation. + with contextlib.suppress(AttributeError): + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _OBJ_SETATTR + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in (*tuple(self._attr_names), "__dict__", "__weakref__") + } + + # 3.14.0rc2+ + if hasattr(sys, "_clear_type_descriptors"): + sys._clear_type_descriptors(self._cls) + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _OBJ_SETATTR + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = {} + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + cached_properties = { + name: cached_prop.func + for name, cached_prop in cd.items() + if isinstance(cached_prop, cached_property) + } + + # Collect methods with a `__class__` reference that are shadowed in the new class. + # To know to update them. + additional_closure_functions_to_update = [] + if cached_properties: + class_annotations = _get_annotations(self._cls) + for name, func in cached_properties.items(): + # Add cached properties to names for slotting. + names += (name,) + # Clear out function from class to avoid clashing. + del cd[name] + additional_closure_functions_to_update.append(func) + annotation = inspect.signature(func).return_annotation + if annotation is not inspect.Parameter.empty: + class_annotations[name] = annotation + + original_getattr = cd.get("__getattr__") + if original_getattr is not None: + additional_closure_functions_to_update.append(original_getattr) + + cd["__getattr__"] = _make_cached_property_getattr( + cached_properties, original_getattr, self._cls + ) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_HASH_CACHE_FIELD) + + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in itertools.chain( + cls.__dict__.values(), additional_closure_functions_to_update + ): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # noqa: PERF203 + # ValueError: Cell is empty + pass + else: + if match: + cell.cell_contents = cls + return cls + + def add_repr(self, ns): + script, globs = _make_repr_script(self._attrs, ns) + + def _attach_repr(cls_dict, globs): + cls_dict["__repr__"] = self._add_method_dunders(globs["__repr__"]) + + self._script_snippets.append((script, globs, _attach_repr)) + self._repr_added = True + return self + + def add_str(self): + if not self._repr_added: + msg = "__str__ can only be generated if a __repr__ exists." + raise ValueError(msg) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _OBJ_SETATTR.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_HASH_CACHE_FIELD, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + script, globs = _make_hash_script( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + + def attach_hash(cls_dict: dict, locs: dict) -> None: + cls_dict["__hash__"] = self._add_method_dunders(locs["__hash__"]) + + self._script_snippets.append((script, globs, attach_hash)) + + return self + + def add_init(self): + script, globs, annotations = _make_init_script( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + + def _attach_init(cls_dict, globs): + init = globs["__init__"] + init.__annotations__ = annotations + cls_dict["__init__"] = self._add_method_dunders(init) + + self._script_snippets.append((script, globs, _attach_init)) + + return self + + def add_replace(self): + self._cls_dict["__replace__"] = self._add_method_dunders( + lambda self, **changes: evolve(self, **changes) + ) + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + script, globs, annotations = _make_init_script( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + + def _attach_attrs_init(cls_dict, globs): + init = globs["__attrs_init__"] + init.__annotations__ = annotations + cls_dict["__attrs_init__"] = self._add_method_dunders(init) + + self._script_snippets.append((script, globs, _attach_attrs_init)) + + return self + + def add_eq(self): + cd = self._cls_dict + + script, globs = _make_eq_script(self._attrs) + + def _attach_eq(cls_dict, globs): + cls_dict["__eq__"] = self._add_method_dunders(globs["__eq__"]) + + self._script_snippets.append((script, globs, _attach_eq)) + + cd["__ne__"] = __ne__ + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + msg = "Can't combine custom __setattr__ with on_setattr hooks." + raise ValueError(msg) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _OBJ_SETATTR(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders_unsafe(self, method: Callable) -> Callable: + """ + Add __module__ and __qualname__ to a *method*. + """ + method.__module__ = self._cls.__module__ + + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + method.__doc__ = ( + f"Method generated by attrs for class {self._cls.__qualname__}." + ) + + return method + + def _add_method_dunders_safe(self, method: Callable) -> Callable: + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + with contextlib.suppress(AttributeError): + method.__module__ = self._cls.__module__ + + with contextlib.suppress(AttributeError): + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + with contextlib.suppress(AttributeError): + method.__doc__ = f"Method generated by attrs for class {self._cls.__qualname__}." + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, + force_kw_only=True, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Consider using `attrs.define` / `attrs.frozen` in new code (``attr.s`` will + *never* go away, though). + + Args: + repr_ns (str): + When using nested classes, there was no way in Python 2 to + automatically detect that. This argument allows to set a custom + name for a more meaningful ``repr`` output. This argument is + pointless in Python 3 and is therefore deprecated. + + .. caution:: + Refer to `attrs.define` for the rest of the parameters, but note that they + can have different defaults. + + Notably, leaving *on_setattr* as `None` will **not** add any hooks. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports `None` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. deprecated:: 24.1.0 *repr_ns* + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + .. versionchanged:: 25.4.0 + *kw_only* now only applies to attributes defined in the current class, + and respects attribute-level ``kw_only=False`` settings. + .. versionadded:: 25.4.0 *force_kw_only* + """ + if repr_ns is not None: + import warnings + + warnings.warn( + DeprecationWarning( + "The `repr_ns` argument is deprecated and will be removed in or after August 2025." + ), + stacklevel=2, + ) + + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + nonlocal hash + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + msg = "Can't freeze a class with a custom __setattr__." + raise ValueError(msg) + + eq = not is_exc and _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + + Hashability = ClassProps.Hashability + + if is_exc: + hashability = Hashability.LEAVE_ALONE + elif hash is True: + hashability = ( + Hashability.HASHABLE_CACHED + if cache_hash + else Hashability.HASHABLE + ) + elif hash is False: + hashability = Hashability.LEAVE_ALONE + elif hash is None: + if auto_detect is True and _has_own_attribute(cls, "__hash__"): + hashability = Hashability.LEAVE_ALONE + elif eq is True and is_frozen is True: + hashability = ( + Hashability.HASHABLE_CACHED + if cache_hash + else Hashability.HASHABLE + ) + elif eq is False: + hashability = Hashability.LEAVE_ALONE + else: + hashability = Hashability.UNHASHABLE + else: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + KeywordOnly = ClassProps.KeywordOnly + if kw_only: + kwo = KeywordOnly.FORCE if force_kw_only else KeywordOnly.YES + else: + kwo = KeywordOnly.NO + + props = ClassProps( + is_exception=is_exc, + is_frozen=is_frozen, + is_slotted=slots, + collected_fields_by_mro=collect_by_mro, + added_init=_determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ), + added_repr=_determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ), + added_eq=eq, + added_ordering=not is_exc + and _determine_whether_to_implement( + cls, + order_, + auto_detect, + ("__lt__", "__le__", "__gt__", "__ge__"), + ), + hashability=hashability, + added_match_args=match_args, + kw_only=kwo, + has_weakref_slot=weakref_slot, + added_str=str, + added_pickling=_determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + on_setattr_hook=on_setattr, + field_transformer=field_transformer, + ) + + if not props.is_hashable and cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + + builder = _ClassBuilder( + cls, + these, + auto_attribs=auto_attribs, + props=props, + has_custom_setattr=has_own_setattr, + ) + + if props.added_repr: + builder.add_repr(repr_ns) + + if props.added_str: + builder.add_str() + + if props.added_eq: + builder.add_eq() + if props.added_ordering: + builder.add_order() + + if not frozen: + builder.add_setattr() + + if props.is_hashable: + builder.add_hash() + elif props.hashability is Hashability.UNHASHABLE: + builder.make_unhashable() + + if props.added_init: + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, init must be True." + raise TypeError(msg) + + if PY_3_13_PLUS and not _has_own_attribute(cls, "__replace__"): + builder.add_replace() + + if ( + PY_3_10_PLUS + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls: type, func_name: str) -> str: + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash_script( + cls: type, attrs: list[Attribute], frozen: bool, cache_hash: bool +) -> tuple[str, dict]: + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + type_hash = hash(_generate_unique_filename(cls, "hash")) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ", _cache_wrapper=__import__('attr._make')._make._CacheHashWrapper):" + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_HASH_CACHE_FIELD} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_HASH_CACHE_FIELD}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_HASH_CACHE_FIELD} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_HASH_CACHE_FIELD}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return script, globs + + +def _add_hash(cls: type, attrs: list[Attribute]): + """ + Add a hash method to *cls*. + """ + script, globs = _make_hash_script( + cls, attrs, frozen=False, cache_hash=False + ) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__hash__") + ) + cls.__hash__ = globs["__hash__"] + return cls + + +def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + +def _make_eq_script(attrs: list) -> tuple[str, dict]: + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + globs = {} + if attrs: + lines.append(" return (") + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + f" {cmp_name}(self.{a.name}) == {cmp_name}(other.{a.name})" + ) + else: + lines.append(f" self.{a.name} == other.{a.name}") + if a is not attrs[-1]: + lines[-1] = f"{lines[-1]} and" + lines.append(" )") + else: + lines.append(" return True") + + script = "\n".join(lines) + + return script, globs + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + script, globs = _make_eq_script(attrs) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__eq__") + ) + cls.__eq__ = globs["__eq__"] + cls.__ne__ = __ne__ + + return cls + + +def _make_repr_script(attrs, ns) -> tuple[str, dict]: + """ + Create the source and globs for a __repr__ and return it. + """ + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return "\n".join(lines), globs + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + script, globs = _make_repr_script(attrs, ns) + _compile_and_eval( + script, globs, filename=_generate_unique_filename(cls, "__repr__") + ) + cls.__repr__ = globs["__repr__"] + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose keys + are the attribute names. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + dict[str, attrs.Attribute]: Dict of attribute name to definition + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + Args: + inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + cls = base_attr_map.get(a_name) + return cls and "__slots__" in cls.__dict__ + + +def _make_init_script( + cls, + attrs, + pre_init, + pre_init_has_args, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +) -> tuple[str, dict, dict]: + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + "__attrs_init__" if attrs_init else "__init__", + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _OBJ_SETATTR.__get__ + + return script, globs, annotations + + +def _setattr(attr_name: str, value_var: str, has_on_setattr: bool) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return f"_setattr('{attr_name}', {converter._fmt_converter_call(attr_name, value_var)})" + + +def _assign(attr_name: str, value: str, has_on_setattr: bool) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True, converter) + + return f"self.{attr_name} = {converter._fmt_converter_call(attr_name, value_var)}" + + +def _determine_setters( + frozen: bool, slots: bool, base_attr_map: dict[str, type] +): + """ + Determine the correct setter functions based on whether a class is frozen + and/or slotted. + """ + if frozen is True: + if slots is True: + return (), _setattr, _setattr_with_converter + + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + + def fmt_setter( + attr_name: str, value_var: str, has_on_setattr: bool + ) -> str: + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name: str, + value_var: str, + has_on_setattr: bool, + converter: Converter, + ) -> str: + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr, converter + ) + + return f"_inst_dict['{attr_name}'] = {converter._fmt_converter_call(attr_name, value_var)}" + + return ( + ("_inst_dict = self.__dict__",), + fmt_setter, + fmt_setter_with_converter, + ) + + # Not frozen -- we can just assign directly. + return (), _assign, _assign_with_converter + + +def _attrs_to_init_script( + attrs: list[Attribute], + is_frozen: bool, + is_slotted: bool, + call_pre_init: bool, + pre_init_has_args: bool, + call_post_init: bool, + does_cache_hash: bool, + base_attr_map: dict[str, type], + is_exc: bool, + needs_cached_setattr: bool, + has_cls_on_setattr: bool, + method_name: str, +) -> tuple[str, dict, dict]: + """ + Return a script of an initializer for *attrs*, a dict of globals, and + annotations for the initializer. + + The globals are required by the generated script. + """ + lines = ["self.__attrs_pre_init__()"] if call_pre_init else [] + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. Note _setattr will be used again below if + # does_cache_hash is True. + "_setattr = _cached_setattr_get(self)" + ) + + extra_lines, fmt_setter, fmt_setter_with_converter = _determine_setters( + is_frozen, is_slotted, base_attr_map + ) + lines.extend(extra_lines) + + args = [] # Parameters in the definition of __init__ + pre_init_args = [] # Parameters in the call to __attrs_pre_init__ + kw_only_args = [] # Used for both 'args' and 'pre_init_args' above + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + maybe_self = "self" if has_factory and a.default.takes_self else "" + + if a.converter is not None and not isinstance(a.converter, Converter): + converter = Converter(a.converter) + else: + converter = a.converter + + if a.init is False: + if has_factory: + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + elif converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + pre_init_args.append(arg_name) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + pre_init_args.append(arg_name) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + pre_init_args.append(arg_name) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and converter is None: + annotations[arg_name] = a.type + elif converter is not None and converter._first_param_type: + # Use the type from the converter if present. + annotations[arg_name] = converter._first_param_type + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if call_post_init: + lines.append("self.__attrs_post_init__()") + + # Because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to field + # values during post-init combined with post-init accessing the hash code + # would result in silent bugs. + if does_cache_hash: + if is_frozen: + if is_slotted: + init_hash_cache = f"_setattr('{_HASH_CACHE_FIELD}', None)" + else: + init_hash_cache = f"_inst_dict['{_HASH_CACHE_FIELD}'] = None" + else: + init_hash_cache = f"self.{_HASH_CACHE_FIELD} = None" + lines.append(init_hash_cache) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + pre_init_args = ", ".join(pre_init_args) + if kw_only_args: + # leading comma & kw_only args + args += f"{', ' if args else ''}*, {', '.join(kw_only_args)}" + pre_init_kw_only_args = ", ".join( + [ + f"{kw_arg_name}={kw_arg_name}" + # We need to remove the defaults from the kw_only_args. + for kw_arg_name in (kwa.split("=")[0] for kwa in kw_only_args) + ] + ) + pre_init_args += ", " if pre_init_args else "" + pre_init_args += pre_init_kw_only_args + + if call_pre_init and pre_init_has_args: + # If pre init method has arguments, pass the values given to __init__. + lines[0] = f"self.__attrs_pre_init__({pre_init_args})" + + # Python <3.12 doesn't allow backslashes in f-strings. + NL = "\n " + return ( + f"""def {method_name}(self, {args}): + {NL.join(lines) if lines else "pass"} +""", + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` which is + only syntactic sugar for ``default=Factory(...)`` plus the following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The + callables that are used for comparing and ordering objects by this + attribute, respectively. These are set by passing a callable to + `attr.ib`'s ``eq``, ``order``, or ``cmp`` arguments. See also + :ref:`comparison customization `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + # These slots must NOT be reordered because we use them later for + # instantiation. + __slots__ = ( # noqa: RUF023 + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _OBJ_SETATTR.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _EMPTY_METADATA_SINGLETON + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError + + @classmethod + def from_counting_attr( + cls, name: str, ca: _CountingAttr, kw_only: bool, type=None + ): + # The 'kw_only' argument is the class-level setting, and is used if the + # attribute itself does not explicitly set 'kw_only'. + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + msg = f"Type annotation and type argument cannot both be present for '{name}'." + raise ValueError(msg) + return cls( + name, + ca._default, + ca._validator, + ca.repr, + None, + ca.hash, + ca.init, + False, + ca.metadata, + type, + ca.converter, + kw_only if ca.kw_only is None else ca.kw_only, + ca.eq, + ca.eq_key, + ca.order, + ca.order_key, + ca.on_setattr, + ca.alias, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with :class:`attrs.Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _OBJ_SETATTR.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + ( + types.MappingProxyType(dict(value)) + if value + else _EMPTY_METADATA_SINGLETON + ), + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "_default", + "_validator", + "alias", + "converter", + "counter", + "eq", + "eq_key", + "hash", + "init", + "kw_only", + "metadata", + "on_setattr", + "order", + "order_key", + "repr", + "type", + ) + __attrs_attrs__ = ( + *tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ), + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + Raises: + DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class ClassProps: + """ + Effective class properties as derived from parameters to `attr.s()` or + `define()` decorators. + + This is the same data structure that *attrs* uses internally to decide how + to construct the final class. + + Warning: + + This feature is currently **experimental** and is not covered by our + strict backwards-compatibility guarantees. + + + Attributes: + is_exception (bool): + Whether the class is treated as an exception class. + + is_slotted (bool): + Whether the class is `slotted `. + + has_weakref_slot (bool): + Whether the class has a slot for weak references. + + is_frozen (bool): + Whether the class is frozen. + + kw_only (KeywordOnly): + Whether / how the class enforces keyword-only arguments on the + ``__init__`` method. + + collected_fields_by_mro (bool): + Whether the class fields were collected by method resolution order. + That is, correctly but unlike `dataclasses`. + + added_init (bool): + Whether the class has an *attrs*-generated ``__init__`` method. + + added_repr (bool): + Whether the class has an *attrs*-generated ``__repr__`` method. + + added_eq (bool): + Whether the class has *attrs*-generated equality methods. + + added_ordering (bool): + Whether the class has *attrs*-generated ordering methods. + + hashability (Hashability): How `hashable ` the class is. + + added_match_args (bool): + Whether the class supports positional `match ` over its + fields. + + added_str (bool): + Whether the class has an *attrs*-generated ``__str__`` method. + + added_pickling (bool): + Whether the class has *attrs*-generated ``__getstate__`` and + ``__setstate__`` methods for `pickle`. + + on_setattr_hook (Callable[[Any, Attribute[Any], Any], Any] | None): + The class's ``__setattr__`` hook. + + field_transformer (Callable[[Attribute[Any]], Attribute[Any]] | None): + The class's `field transformers `. + + .. versionadded:: 25.4.0 + """ + + class Hashability(enum.Enum): + """ + The hashability of a class. + + .. versionadded:: 25.4.0 + """ + + HASHABLE = "hashable" + """Write a ``__hash__``.""" + HASHABLE_CACHED = "hashable_cache" + """Write a ``__hash__`` and cache the hash.""" + UNHASHABLE = "unhashable" + """Set ``__hash__`` to ``None``.""" + LEAVE_ALONE = "leave_alone" + """Don't touch ``__hash__``.""" + + class KeywordOnly(enum.Enum): + """ + How attributes should be treated regarding keyword-only parameters. + + .. versionadded:: 25.4.0 + """ + + NO = "no" + """Attributes are not keyword-only.""" + YES = "yes" + """Attributes in current class without kw_only=False are keyword-only.""" + FORCE = "force" + """All attributes are keyword-only.""" + + __slots__ = ( # noqa: RUF023 -- order matters for __init__ + "is_exception", + "is_slotted", + "has_weakref_slot", + "is_frozen", + "kw_only", + "collected_fields_by_mro", + "added_init", + "added_repr", + "added_eq", + "added_ordering", + "hashability", + "added_match_args", + "added_str", + "added_pickling", + "on_setattr_hook", + "field_transformer", + ) + + def __init__( + self, + is_exception, + is_slotted, + has_weakref_slot, + is_frozen, + kw_only, + collected_fields_by_mro, + added_init, + added_repr, + added_eq, + added_ordering, + hashability, + added_match_args, + added_str, + added_pickling, + on_setattr_hook, + field_transformer, + ): + self.is_exception = is_exception + self.is_slotted = is_slotted + self.has_weakref_slot = has_weakref_slot + self.is_frozen = is_frozen + self.kw_only = kw_only + self.collected_fields_by_mro = collected_fields_by_mro + self.added_init = added_init + self.added_repr = added_repr + self.added_eq = added_eq + self.added_ordering = added_ordering + self.hashability = hashability + self.added_match_args = added_match_args + self.added_str = added_str + self.added_pickling = added_pickling + self.on_setattr_hook = on_setattr_hook + self.field_transformer = field_transformer + + @property + def is_hashable(self): + return ( + self.hashability is ClassProps.Hashability.HASHABLE + or self.hashability is ClassProps.Hashability.HASHABLE_CACHED + ) + + +_cas = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in ClassProps.__slots__ +] + +ClassProps = _add_eq(_add_repr(ClassProps, attrs=_cas), attrs=_cas) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + Args: + factory (typing.Callable): + A callable that takes either none or exactly one mandatory + positional argument depending on *takes_self*. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +class Converter: + """ + Stores a converter callable. + + Allows for the wrapped converter to take additional arguments. The + arguments are passed in the order they are documented. + + Args: + converter (Callable): A callable that converts the passed value. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. (default: `False`) + + takes_field (bool): + Pass the field definition (an :class:`Attribute`) into the + converter as a positional argument. (default: `False`) + + .. versionadded:: 24.1.0 + """ + + __slots__ = ( + "__call__", + "_first_param_type", + "_global_name", + "converter", + "takes_field", + "takes_self", + ) + + def __init__(self, converter, *, takes_self=False, takes_field=False): + self.converter = converter + self.takes_self = takes_self + self.takes_field = takes_field + + ex = _AnnotationExtractor(converter) + self._first_param_type = ex.get_first_param_type() + + if not (self.takes_self or self.takes_field): + self.__call__ = lambda value, _, __: self.converter(value) + elif self.takes_self and not self.takes_field: + self.__call__ = lambda value, instance, __: self.converter( + value, instance + ) + elif not self.takes_self and self.takes_field: + self.__call__ = lambda value, __, field: self.converter( + value, field + ) + else: + self.__call__ = lambda value, instance, field: self.converter( + value, instance, field + ) + + rt = ex.get_return_type() + if rt is not None: + self.__call__.__annotations__["return"] = rt + + @staticmethod + def _get_global_name(attr_name: str) -> str: + """ + Return the name that a converter for an attribute name *attr_name* + would have. + """ + return f"__attr_converter_{attr_name}" + + def _fmt_converter_call(self, attr_name: str, value_var: str) -> str: + """ + Return a string that calls the converter for an attribute name + *attr_name* and the value in variable named *value_var* according to + `self.takes_self` and `self.takes_field`. + """ + if not (self.takes_self or self.takes_field): + return f"{self._get_global_name(attr_name)}({value_var})" + + if self.takes_self and self.takes_field: + return f"{self._get_global_name(attr_name)}({value_var}, self, attr_dict['{attr_name}'])" + + if self.takes_self: + return f"{self._get_global_name(attr_name)}({value_var}, self)" + + return f"{self._get_global_name(attr_name)}({value_var}, attr_dict['{attr_name}'])" + + def __getstate__(self): + """ + Return a dict containing only converter and takes_self -- the rest gets + computed when loading. + """ + return { + "converter": self.converter, + "takes_self": self.takes_self, + "takes_field": self.takes_field, + } + + def __setstate__(self, state): + """ + Load instance from state. + """ + self.__init__(**state) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in ("converter", "takes_self", "takes_field") +] + +Converter = _add_hash( + _add_eq(_add_repr(Converter, attrs=_f), attrs=_f), attrs=_f +) + + +def make_class( + name, attrs, bases=(object,), class_body=None, **attributes_arguments +): + r""" + A quick way to create a new class called *name* with *attrs*. + + .. note:: + + ``make_class()`` is a thin wrapper around `attr.s`, not `attrs.define` + which means that it doesn't come with some of the improved defaults. + + For example, if you want the same ``on_setattr`` behavior as in + `attrs.define`, you have to pass the hooks yourself: ``make_class(..., + on_setattr=setters.pipe(setters.convert, setters.validate)`` + + .. warning:: + + It is *your* duty to ensure that the class name and the attribute names + are valid identifiers. ``make_class()`` will *not* validate them for + you. + + Args: + name (str): The name for the new class. + + attrs (list | dict): + A list of names or a dictionary of mappings of names to `attr.ib`\ + s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes + inside *attrs*. Otherwise the order of the definition of the + attributes is used. + + bases (tuple[type, ...]): Classes that the new class will subclass. + + class_body (dict): + An optional dictionary of class attributes for the new class. + + attributes_arguments: Passed unmodified to `attr.s`. + + Returns: + type: A new class with *attrs*. + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + .. versionchanged:: 23.2.0 *class_body* + .. versionchanged:: 25.2.0 Class names can now be unicode. + """ + # Class identifiers are converted into the normal form NFKC while parsing + name = unicodedata.normalize("NFKC", name) + + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + msg = "attrs argument must be a dict or a list." + raise TypeError(msg) + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if class_body is not None: + body.update(class_body) + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + with contextlib.suppress(AttributeError, ValueError): + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + cls = _attrs(these=cls_dict, **attributes_arguments)(type_) + # Only add type annotations now or "_attrs()" will complain: + cls.__annotations__ = { + k: v.type for k, v in cls_dict.items() if v.type is not None + } + return cls + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, unsafe_hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if they + have any. + + converters (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + return_instance = any(isinstance(c, Converter) for c in converters) + + if return_instance: + + def pipe_converter(val, inst, field): + for c in converters: + val = ( + c(val, inst, field) if isinstance(c, Converter) else c(val) + ) + + return val + + else: + + def pipe_converter(val): + for c in converters: + val = c(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = TypeVar("A") + pipe_converter.__annotations__.update({"val": A, "return": A}) + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + last = converters[-1] + if not PY_3_11_PLUS and isinstance(last, Converter): + last = last.__call__ + + # Get return type from last converter. + rt = _AnnotationExtractor(last).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + if return_instance: + return Converter(pipe_converter, takes_self=True, takes_field=True) + return pipe_converter diff --git a/.venv/lib/python3.11/site-packages/attr/_next_gen.py b/.venv/lib/python3.11/site-packages/attr/_next_gen.py new file mode 100644 index 0000000..4ccd0da --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_next_gen.py @@ -0,0 +1,674 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + _DEFAULT_ON_SETATTR, + NOTHING, + _frozen_setattrs, + attrib, + attrs, +) +from .exceptions import NotAnAttrsClassError, UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + force_kw_only=False, +): + r""" + A class decorator that adds :term:`dunder methods` according to + :term:`fields ` specified using :doc:`type annotations `, + `field()` calls, or the *these* argument. + + Since *attrs* patches or replaces an existing class, you cannot use + `object.__init_subclass__` with *attrs* classes, because it runs too early. + As a replacement, you can define ``__attrs_init_subclass__`` on your class. + It will be called by *attrs* classes that subclass it after they're + created. See also :ref:`init-subclass`. + + Args: + slots (bool): + Create a :term:`slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, + so we encourage you to read the :term:`glossary entry `. + + auto_detect (bool): + Instead of setting the *init*, *repr*, *eq*, and *hash* arguments + explicitly, assume they are set to True **unless any** of the + involved methods for one of the arguments is implemented in the + *current* class (meaning, it is *not* inherited from some base + class). + + So, for example by implementing ``__eq__`` on a class yourself, + *attrs* will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a + sensible ``__ne__`` by default, so it *should* be enough to only + implement ``__eq__`` in most cases). + + Passing :data:`True` or :data:`False` to *init*, *repr*, *eq*, or *hash* + overrides whatever *auto_detect* would determine. + + auto_exc (bool): + If the class subclasses `BaseException` (which implicitly includes + any subclass of any exception), the following happens to behave + like a well-behaved Python exception class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids [#]_ , + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the + ``args`` attribute, + - the value of *str* is ignored leaving ``__str__`` to base + classes. + + .. [#] + Note that *attrs* will *not* remove existing implementations of + ``__hash__`` or the equality methods. It just won't add own + ones. + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + A callable that is run whenever the user attempts to set an + attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same + arguments as validators: the instance, the attribute that is being + modified, and the new value. + + If no exception is raised, the attribute is set to the return value + of the callable. + + If a list of callables is passed, they're automatically wrapped in + an `attrs.setters.pipe`. + + If left None, the default behavior is to run converters and + validators whenever an attribute is set. + + init (bool): + Create a ``__init__`` method that initializes the *attrs* + attributes. Leading underscores are stripped for the argument name, + unless an alias is set on the attribute. + + .. seealso:: + `init` shows advanced ways to customize the generated + ``__init__`` method, including executing code before and after. + + repr(bool): + Create a ``__repr__`` method with a human readable representation + of *attrs* attributes. + + str (bool): + Create a ``__str__`` method that is identical to ``__repr__``. This + is usually not necessary except for `Exception`\ s. + + eq (bool | None): + If True or None (default), add ``__eq__`` and ``__ne__`` methods + that check two instances for equality. + + .. seealso:: + `comparison` describes how to customize the comparison behavior + going as far comparing NumPy arrays. + + order (bool | None): + If True, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` + methods that behave like *eq* above and allow instances to be + ordered. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are + *identical*. + + If `None` mirror value of *eq*. + + .. seealso:: `comparison` + + unsafe_hash (bool | None): + If None (default), the ``__hash__`` method is generated according + how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for + you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set + to None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning + the ``__hash__`` method of the base class will be used. If the + base class is `object`, this means it will fall back to id-based + hashing. + + Although not recommended, you can decide for yourself and force + *attrs* to create one (for example, if the class is immutable even + though you didn't freeze it programmatically) by passing True or + not. Both of these cases are rather special and should be used + carefully. + + .. seealso:: + + - Our documentation on `hashing`, + - Python's documentation on `object.__hash__`, + - and the `GitHub issue that led to the default \ behavior + `_ for more + details. + + hash (bool | None): + Deprecated alias for *unsafe_hash*. *unsafe_hash* takes precedence. + + cache_hash (bool): + Ensure that the object's hash code is computed only once and stored + on the object. If this is set to True, hashing must be either + explicitly or implicitly enabled for this class. If the hash code + is cached, avoid any reassignments of fields involved in hash code + computation or mutations of the objects those fields point to after + object creation. If such changes occur, the behavior of the + object's hash code is undefined. + + frozen (bool): + Make instances immutable after initialization. If someone attempts + to modify a frozen instance, `attrs.exceptions.FrozenInstanceError` + is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` + method on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other + words: ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You + can circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + kw_only (bool): + Make attributes keyword-only in the generated ``__init__`` (if + *init* is False, this parameter is ignored). Attributes that + explicitly set ``kw_only=False`` are not affected; base class + attributes are also not affected. + + Also see *force_kw_only*. + + weakref_slot (bool): + Make instances weak-referenceable. This has no effect unless + *slots* is True. + + field_transformer (~typing.Callable | None): + A function that is called with the original class object and all + fields right before *attrs* finalizes the class. You can use this, + for example, to automatically add converters or validators to + fields based on their types. + + .. seealso:: `transform-fields` + + match_args (bool): + If True (default), set ``__match_args__`` on the class to support + :pep:`634` (*Structural Pattern Matching*). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and + later. Ignored on older Python versions. + + collect_by_mro (bool): + If True, *attrs* collects attributes from base classes correctly + according to the `method resolution order + `_. If False, *attrs* + will mimic the (wrong) behavior of `dataclasses` and :pep:`681`. + + See also `issue #428 + `_. + + force_kw_only (bool): + A back-compat flag for restoring pre-25.4.0 behavior. If True and + ``kw_only=True``, all attributes are made keyword-only, including + base class attributes, and those set to ``kw_only=False`` at the + attribute level. Defaults to False. + + See also `issue #980 + `_. + + getstate_setstate (bool | None): + .. note:: + + This is usually only interesting for slotted classes and you + should probably just set *auto_detect* to True. + + If True, ``__getstate__`` and ``__setstate__`` are generated and + attached to the class. This is necessary for slotted classes to be + pickleable. If left None, it's True by default for slotted classes + and False for dict classes. + + If *auto_detect* is True, and *getstate_setstate* is left None, and + **either** ``__getstate__`` or ``__setstate__`` is detected + directly on the class (meaning: not inherited), it is set to False + (this is usually what you want). + + auto_attribs (bool | None): + If True, look at type annotations to determine which attributes to + use, like `dataclasses`. If False, it will only look for explicit + :func:`field` class attributes, like classic *attrs*. + + If left None, it will guess: + + 1. If any attributes are annotated and no unannotated + `attrs.field`\ s are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.field`\ s. + + If *attrs* decides to look at type annotations, **all** fields + **must** be annotated. If *attrs* encounters a field that is set to + a :func:`field` / `attr.ib` but lacks a type annotation, an + `attrs.exceptions.UnannotatedAttributeError` is raised. Use + ``field_name: typing.Any = field(...)`` if you don't want to set a + type. + + .. warning:: + + For features that use the attribute name to create decorators + (for example, :ref:`validators `), you still *must* + assign :func:`field` / `attr.ib` to them. Otherwise Python will + either not find the name or try to use the default value to + call, for example, ``validator`` on it. + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `field()` are **ignored**. + + these (dict[str, object]): + A dictionary of name to the (private) return value of `field()` + mappings. This is useful to avoid the definition of your attributes + within the class body because you can't (for example, if you want + to add ``__repr__`` methods to Django models) or don't want to. + + If *these* is not `None`, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside + *these*. + + Arguably, this is a rather obscure feature. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + .. versionadded:: 24.3.0 + Unless already present, a ``__replace__`` method is automatically + created for `copy.replace` (Python 3.13+ only). + .. versionchanged:: 25.4.0 + *kw_only* now only applies to attributes defined in the current class, + and respects attribute-level ``kw_only=False`` settings. + .. versionadded:: 25.4.0 + Added *force_kw_only* to go back to the previous *kw_only* behavior. + + .. note:: + + The main differences to the classic `attr.s` are: + + - Automatically detect whether or not *auto_attribs* should be `True` + (c.f. *auto_attribs* parameter). + - Converters and validators run when attributes are set by default -- + if *frozen* is `False`. + - *slots=True* + + Usually, this has only upsides and few visible effects in everyday + programming. But it *can* lead to some surprising behaviors, so + please make sure to read :term:`slotted classes`. + + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - *force_kw_only=False* + - Some options that were only relevant on Python 2 or were kept around + for backwards-compatibility have been removed. + + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + force_kw_only=force_kw_only, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _DEFAULT_ON_SETATTR + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)." + raise ValueError(msg) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=None, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new :term:`field` / :term:`attribute` on a class. + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attrs.define` (or similar)! + + Args: + default: + A value that is used if an *attrs*-generated ``__init__`` is used + and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will + be used to construct a new value (useful for mutable data types + like lists or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a + value *must* be supplied when instantiating; otherwise a + `TypeError` will be raised. + + .. seealso:: `defaults` + + factory (~typing.Callable): + Syntactic sugar for ``default=attr.Factory(factory)``. + + validator (~typing.Callable | list[~typing.Callable]): + Callable that is called by *attrs*-generated ``__init__`` methods + after the instance has been initialized. They receive the + initialized instance, the :func:`~attrs.Attribute`, and the passed + value. + + The return value is *not* inspected so the validator has to throw + an exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown + below. + + .. seealso:: :ref:`validators` + + repr (bool | ~typing.Callable): + Include this attribute in the generated ``__repr__`` method. If + True, include the attribute; if False, omit it. By default, the + built-in ``repr()`` function is used. To override how the attribute + value is formatted, pass a ``callable`` that takes a single value + and returns a string. Note that the resulting string is used as-is, + which means it will be used directly *instead* of calling + ``repr()`` (the default). + + eq (bool | ~typing.Callable): + If True (default), include this attribute in the generated + ``__eq__`` and ``__ne__`` methods that check two instances for + equality. To override how the attribute value is compared, pass a + callable that takes a single value and returns the value to be + compared. + + .. seealso:: `comparison` + + order (bool | ~typing.Callable): + If True (default), include this attributes in the generated + ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. To + override how the attribute value is ordered, pass a callable that + takes a single value and returns the value to be ordered. + + .. seealso:: `comparison` + + hash (bool | None): + Include this attribute in the generated ``__hash__`` method. If + None (default), mirror *eq*'s value. This is the correct behavior + according the Python spec. Setting this value to anything else + than None is *discouraged*. + + .. seealso:: `hashing` + + init (bool): + Include this attribute in the generated ``__init__`` method. + + It is possible to set this to False and set a default value. In + that case this attributed is unconditionally initialized with the + specified default value or factory. + + .. seealso:: `init` + + converter (typing.Callable | Converter): + A callable that is called by *attrs*-generated ``__init__`` methods + to convert attribute's value to the desired format. + + If a vanilla callable is passed, it is given the passed-in value as + the only positional argument. It is possible to receive additional + arguments by wrapping the callable in a `Converter`. + + Either way, the returned value will be used as the new value of the + attribute. The value is converted before being passed to the + validator, if any. + + .. seealso:: :ref:`converters` + + metadata (dict | None): + An arbitrary mapping, to be used by third-party code. + + .. seealso:: `extending-metadata`. + + type (type): + The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). + This argument is provided for backwards-compatibility and for usage + with `make_class`. Regardless of the approach used, the type will + be stored on ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for `static type + checking `. + + kw_only (bool | None): + Make this attribute keyword-only in the generated ``__init__`` (if + *init* is False, this parameter is ignored). If None (default), + mirror the setting from `attrs.define`. + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + Allows to overwrite the *on_setattr* setting from `attr.s`. If left + None, the *on_setattr* value from `attr.s` is used. Set to + `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `define()`. + + alias (str | None): + Override this attribute's parameter name in the generated + ``__init__`` method. If left None, default to ``name`` stripped + of leading underscores. See `private-attributes`. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionadded:: 22.2.0 *alias* + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for `attrs.make_class`. + Please note that type checkers ignore this metadata. + .. versionchanged:: 25.4.0 + *kw_only* can now be None, and its default is also changed from False to + None. + + .. seealso:: + + `attr.ib` + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) + + +def inspect(cls): + """ + Inspect the class and return its effective build parameters. + + Warning: + This feature is currently **experimental** and is not covered by our + strict backwards-compatibility guarantees. + + Args: + cls: The *attrs*-decorated class to inspect. + + Returns: + The effective build parameters of the class. + + Raises: + NotAnAttrsClassError: If the class is not an *attrs*-decorated class. + + .. versionadded:: 25.4.0 + """ + try: + return cls.__dict__["__attrs_props__"] + except KeyError: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) from None diff --git a/.venv/lib/python3.11/site-packages/attr/_typing_compat.pyi b/.venv/lib/python3.11/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000..ca7b71e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/.venv/lib/python3.11/site-packages/attr/_version_info.py b/.venv/lib/python3.11/site-packages/attr/_version_info.py new file mode 100644 index 0000000..27f1888 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_version_info.py @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them + + def __hash__(self): + return hash((self.year, self.minor, self.micro, self.releaselevel)) diff --git a/.venv/lib/python3.11/site-packages/attr/_version_info.pyi b/.venv/lib/python3.11/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000..45ced08 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/.venv/lib/python3.11/site-packages/attr/converters.py b/.venv/lib/python3.11/site-packages/attr/converters.py new file mode 100644 index 0000000..0a79dee --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/converters.py @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Converter, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to `None`. + + Type annotations will be inferred from the wrapped converter's, if it has + any. + + Args: + converter (typing.Callable): + the converter that is used for non-`None` values. + + .. versionadded:: 17.1.0 + """ + + if isinstance(converter, Converter): + + def optional_converter(val, inst, field): + if val is None: + return None + return converter(val, inst, field) + + else: + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + if isinstance(converter, Converter): + return Converter(optional_converter, takes_self=True, takes_field=True) + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace `None` values by *default* or the result + of *factory*. + + Args: + default: + Value to be used if `None` is passed. Passing an instance of + `attrs.Factory` is supported, however the ``takes_self`` option is + *not*. + + factory (typing.Callable): + A callable that takes no parameters whose result is used if `None` + is passed. + + Raises: + TypeError: If **neither** *default* or *factory* is passed. + + TypeError: If **both** *default* and *factory* are passed. + + ValueError: + If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + msg = "Must pass either `default` or `factory`." + raise TypeError(msg) + + if default is not NOTHING and factory is not None: + msg = "Must pass either `default` or `factory` but not both." + raise TypeError(msg) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + msg = "`takes_self` is not supported by default_if_none." + raise ValueError(msg) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (for example, from environment variables) to real + booleans. + + Values mapping to `True`: + + - ``True`` + - ``"true"`` / ``"t"`` + - ``"yes"`` / ``"y"`` + - ``"on"`` + - ``"1"`` + - ``1`` + + Values mapping to `False`: + + - ``False`` + - ``"false"`` / ``"f"`` + - ``"no"`` / ``"n"`` + - ``"off"`` + - ``"0"`` + - ``0`` + + Raises: + ValueError: For any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + + if val in (True, "true", "t", "yes", "y", "on", "1", 1): + return True + if val in (False, "false", "f", "no", "n", "off", "0", 0): + return False + + msg = f"Cannot convert value to bool: {val!r}" + raise ValueError(msg) diff --git a/.venv/lib/python3.11/site-packages/attr/converters.pyi b/.venv/lib/python3.11/site-packages/attr/converters.pyi new file mode 100644 index 0000000..12bd0c4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/converters.pyi @@ -0,0 +1,19 @@ +from typing import Callable, Any, overload + +from attrs import _ConverterType, _CallableConverterType + +@overload +def pipe(*validators: _CallableConverterType) -> _CallableConverterType: ... +@overload +def pipe(*validators: _ConverterType) -> _ConverterType: ... +@overload +def optional(converter: _CallableConverterType) -> _CallableConverterType: ... +@overload +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: Any) -> _CallableConverterType: ... +@overload +def default_if_none( + *, factory: Callable[[], Any] +) -> _CallableConverterType: ... +def to_bool(val: str | int | bool) -> bool: ... diff --git a/.venv/lib/python3.11/site-packages/attr/exceptions.py b/.venv/lib/python3.11/site-packages/attr/exceptions.py new file mode 100644 index 0000000..3b7abb8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/exceptions.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +from typing import ClassVar + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args: ClassVar[tuple[str]] = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/.venv/lib/python3.11/site-packages/attr/exceptions.pyi b/.venv/lib/python3.11/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000..f268011 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/.venv/lib/python3.11/site-packages/attr/filters.py b/.venv/lib/python3.11/site-packages/attr/filters.py new file mode 100644 index 0000000..689b170 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/filters.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attrs.asdict` and `attrs.astuple`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Create a filter that only allows *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to include. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Create a filter that does **not** allow *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to exclude. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/.venv/lib/python3.11/site-packages/attr/filters.pyi b/.venv/lib/python3.11/site-packages/attr/filters.pyi new file mode 100644 index 0000000..974abdc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any + +from . import Attribute, _FilterType + +def include(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... +def exclude(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... diff --git a/.venv/lib/python3.11/site-packages/attr/py.typed b/.venv/lib/python3.11/site-packages/attr/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/attr/setters.py b/.venv/lib/python3.11/site-packages/attr/setters.py new file mode 100644 index 0000000..78b0839 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/setters.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + # This can be removed once we drop 3.8 and use attrs.Converter instead. + from ._make import Converter + + if not isinstance(c, Converter): + return c(new_value) + + return c(new_value, instance, attrib) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# Sphinx's autodata stopped working, so the docstring is inlined in the API +# docs. +NO_OP = object() diff --git a/.venv/lib/python3.11/site-packages/attr/setters.pyi b/.venv/lib/python3.11/site-packages/attr/setters.pyi new file mode 100644 index 0000000..73abf36 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/setters.pyi @@ -0,0 +1,20 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute +from attrs import _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/.venv/lib/python3.11/site-packages/attr/validators.py b/.venv/lib/python3.11/site-packages/attr/validators.py new file mode 100644 index 0000000..837e003 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/validators.py @@ -0,0 +1,748 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "or_", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + Args: + disabled (bool): If `True`, disable running all validators. + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + Returns: + bool:`True` if validators are currently disabled. + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + msg = f"'{attr.name}' must be {self.type!r} (got {value!r} that is a {value.__class__!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type]): The type to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + msg = f"'{attr.name}' must match regex {self.pattern.pattern!r} ({value!r} doesn't)" + raise ValueError( + msg, + attr, + self.pattern, + value, + ) + + def __repr__(self): + return f"" + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called with a + string that doesn't match *regex*. + + Args: + regex (str, re.Pattern): + A regex string or precompiled pattern to match against + + flags (int): + Flags that will be passed to the underlying re function (default 0) + + func (typing.Callable): + Which underlying `re` function to call. Valid options are + `re.fullmatch`, `re.search`, and `re.match`; the default `None` + means `re.fullmatch`. For performance reasons, the pattern is + always precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + msg = "'func' must be one of {}.".format( + ", ".join( + sorted((e and e.__name__) or "None" for e in set(valid_funcs)) + ) + ) + raise ValueError(msg) + + if isinstance(regex, Pattern): + if flags: + msg = "'flags' can only be used with a string pattern; pass flags to re.compile() instead" + raise TypeError(msg) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return f"" + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to `None` in addition to satisfying the requirements of + the sub-validator. + + Args: + validator + (typing.Callable | tuple[typing.Callable] | list[typing.Callable]): + A validator (or validators) that is used for non-`None` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InValidator: + options = attrib() + _original_options = attrib(hash=False) + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + msg = f"'{attr.name}' must be in {self._original_options!r} (got {value!r})" + raise ValueError( + msg, + attr, + self._original_options, + value, + ) + + def __repr__(self): + return f"" + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called with a + value that does not belong in the *options* provided. + + The check is performed using ``value in options``, so *options* has to + support that operation. + + To keep the validator hashable, dicts, lists, and sets are transparently + transformed into a `tuple`. + + Args: + options: Allowed options. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected options, and the value it got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + .. versionchanged:: 24.1.0 + *options* that are a list, dict, or a set are now transformed into a + tuple to keep the validator hashable. + """ + repr_options = options + if isinstance(options, (list, dict, set)): + options = tuple(options) + + return _InValidator(options, repr_options) + + +@attrs(repr=False, slots=False, unsafe_hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute that is + not callable. + + .. versionadded:: 19.1.0 + + Raises: + attrs.exceptions.NotCallableError: + With a human readable error message containing the attribute + (`attrs.Attribute`) name, and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + f"" + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + Args: + member_validator: Validator(s) to apply to iterable members. + + iterable_validator: + Validator(s) to apply to iterable itself (optional). + + Raises + TypeError: if any sub-validators fail + + .. versionadded:: 19.1.0 + + .. versionchanged:: 25.4.0 + *member_validator* and *iterable_validator* can now be a list or tuple + of validators. + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + if isinstance(iterable_validator, (list, tuple)): + iterable_validator = and_(*iterable_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepMapping: + key_validator = attrib(validator=optional(is_callable())) + value_validator = attrib(validator=optional(is_callable())) + mapping_validator = attrib(validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + if self.key_validator is not None: + self.key_validator(inst, attr, key) + if self.value_validator is not None: + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return f"" + + +def deep_mapping( + key_validator=None, value_validator=None, mapping_validator=None +): + """ + A validator that performs deep validation of a dictionary. + + All validators are optional, but at least one of *key_validator* or + *value_validator* must be provided. + + Args: + key_validator: Validator(s) to apply to dictionary keys. + + value_validator: Validator(s) to apply to dictionary values. + + mapping_validator: + Validator(s) to apply to top-level mapping attribute. + + .. versionadded:: 19.1.0 + + .. versionchanged:: 25.4.0 + *key_validator* and *value_validator* are now optional, but at least one + of them must be provided. + + .. versionchanged:: 25.4.0 + *key_validator*, *value_validator*, and *mapping_validator* can now be a + list or tuple of validators. + + Raises: + TypeError: If any sub-validator fails on validation. + + ValueError: + If neither *key_validator* nor *value_validator* is provided on + instantiation. + """ + if key_validator is None and value_validator is None: + msg = ( + "At least one of key_validator or value_validator must be provided" + ) + raise ValueError(msg) + + if isinstance(key_validator, (list, tuple)): + key_validator = and_(*key_validator) + if isinstance(value_validator, (list, tuple)): + value_validator = and_(*value_validator) + if isinstance(mapping_validator, (list, tuple)): + mapping_validator = and_(*mapping_validator) + + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number larger or equal to *val*. + + The validator uses `operator.lt` to compare the values. + + Args: + val: Exclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number greater than *val*. + + The validator uses `operator.le` to compare the values. + + Args: + val: Inclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller than *val*. + + The validator uses `operator.ge` to compare the values. + + Args: + val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller or equal to *val*. + + The validator uses `operator.gt` to compare the values. + + Args: + val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + Args: + length (int): Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + Args: + length (int): Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type, ...]): The type(s) to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return f"" + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + Args: + validator: A validator to be logically inverted. + + msg (str): + Message to raise if validator fails. Formatted with keys + ``exc_types`` and ``validator``. + + exc_types (tuple[type, ...]): + Exception type(s) to capture. Other types raised by child + validators will not be intercepted and pass through. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the validator that failed to raise an + exception, the value it got, and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OrValidator: + validators = attrib() + + def __call__(self, inst, attr, value): + for v in self.validators: + try: + v(inst, attr, value) + except Exception: # noqa: BLE001, PERF203, S112 + continue + else: + return + + msg = f"None of {self.validators!r} satisfied for value {value!r}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def or_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators until one of them is + satisfied. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + Raises: + ValueError: + If no validator is satisfied. Raised with a human-readable error + message listing all the wrapped validators and the value that + failed all of them. + + .. versionadded:: 24.1.0 + """ + vals = [] + for v in validators: + vals.extend(v.validators if isinstance(v, _OrValidator) else [v]) + + return _OrValidator(tuple(vals)) diff --git a/.venv/lib/python3.11/site-packages/attr/validators.pyi b/.venv/lib/python3.11/site-packages/attr/validators.pyi new file mode 100644 index 0000000..36a7e80 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attr/validators.pyi @@ -0,0 +1,140 @@ +from types import UnionType +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + Mapping, + Match, + Pattern, + TypeVar, + overload, +) + +from attrs import _ValidatorType +from attrs import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_T4 = TypeVar("_T4") +_T5 = TypeVar("_T5") +_T6 = TypeVar("_T6") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: tuple[type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2]], +) -> _ValidatorType[_T1 | _T2]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2], type[_T3]], +) -> _ValidatorType[_T1 | _T2 | _T3]: ... +@overload +def instance_of(type: tuple[type, ...]) -> _ValidatorType[Any]: ... +@overload +def instance_of(type: UnionType) -> _ValidatorType[Any]: ... +def optional( + validator: ( + _ValidatorType[_T] + | list[_ValidatorType[_T]] + | tuple[_ValidatorType[_T]] + ), +) -> _ValidatorType[_T | None]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Pattern[AnyStr] | AnyStr, + flags: int = ..., + func: Callable[[AnyStr, AnyStr, int], Match[AnyStr] | None] | None = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: _ValidatorArgType[_I] | None = ..., +) -> _ValidatorType[_I]: ... +@overload +def deep_mapping( + key_validator: _ValidatorArgType[_K], + value_validator: _ValidatorArgType[_V] | None = ..., + mapping_validator: _ValidatorArgType[_M] | None = ..., +) -> _ValidatorType[_M]: ... +@overload +def deep_mapping( + key_validator: _ValidatorArgType[_K] | None = ..., + value_validator: _ValidatorArgType[_V] = ..., + mapping_validator: _ValidatorArgType[_M] | None = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: str | None = None, + exc_types: type[Exception] | Iterable[type[Exception]] = ..., +) -> _ValidatorType[_T]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], +) -> _ValidatorType[_T1 | _T2]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], +) -> _ValidatorType[_T1 | _T2 | _T3]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], + __v4: _ValidatorType[_T4], +) -> _ValidatorType[_T1 | _T2 | _T3 | _T4]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], + __v4: _ValidatorType[_T4], + __v5: _ValidatorType[_T5], +) -> _ValidatorType[_T1 | _T2 | _T3 | _T4 | _T5]: ... +@overload +def or_( + __v1: _ValidatorType[_T1], + __v2: _ValidatorType[_T2], + __v3: _ValidatorType[_T3], + __v4: _ValidatorType[_T4], + __v5: _ValidatorType[_T5], + __v6: _ValidatorType[_T6], +) -> _ValidatorType[_T1 | _T2 | _T3 | _T4 | _T5 | _T6]: ... +@overload +def or_( + __v1: _ValidatorType[Any], + __v2: _ValidatorType[Any], + __v3: _ValidatorType[Any], + __v4: _ValidatorType[Any], + __v5: _ValidatorType[Any], + __v6: _ValidatorType[Any], + *validators: _ValidatorType[Any], +) -> _ValidatorType[Any]: ... diff --git a/.venv/lib/python3.11/site-packages/attributecode/__init__.py b/.venv/lib/python3.11/site-packages/attributecode/__init__.py new file mode 100644 index 0000000..74e7215 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/__init__.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from collections import namedtuple +import logging +import os + +import saneyaml + +__version__ = '11.1.1' + +__about_spec_version__ = '4.0.0' + +__copyright__ = """ +Copyright (c) nexB Inc. All rights reserved. http://dejacode.org +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class Error(namedtuple('Error', ['severity', 'message'])): + """ + An Error data with a severity and message. + """ + + def __new__(self, severity, message): + if message: + if isinstance(message, str): + message = self._clean_string(message) + else: + message = self._clean_string(repr(message)) + message = message.strip('"') + + return super(Error, self).__new__( + Error, severity, message) + + def __repr__(self, *args, **kwargs): + sev, msg = self._get_values() + return 'Error(%(sev)s, %(msg)s)' % locals() + + def __eq__(self, other): + return repr(self) == repr(other) + + def _get_values(self): + sev = severities[self.severity] + msg = self._clean_string(repr(self.message)) + return sev, msg + + def render(self): + sev, msg = self._get_values() + return '%(sev)s: %(msg)s' % locals() + + def to_dict(self, *args, **kwargs): + """ + Return an ordered dict of self. + """ + return self._asdict() + + @staticmethod + def _clean_string(s): + """ + Return a cleaned string for `s`, stripping eventual "u" prefixes + from unicode representations. + """ + if not s: + return s + if s.startswith(('u"', "u'")): + s = s.lstrip('u') + s = s.replace('[u"', '["') + s = s.replace("[u'", "['") + s = s.replace("(u'", "('") + s = s.replace("(u'", "('") + s = s.replace("{u'", "{'") + s = s.replace("{u'", "{'") + s = s.replace(" u'", " '") + s = s.replace(" u'", " '") + s = s.replace("\\\\", "\\") + return s + + +# modeled after the logging levels +CRITICAL = 50 +ERROR = 40 +WARNING = 30 +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +severities = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET' +} diff --git a/.venv/lib/python3.11/site-packages/attributecode/__main__.py b/.venv/lib/python3.11/site-packages/attributecode/__main__.py new file mode 100644 index 0000000..b1d8514 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/__main__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if __name__ == '__main__': # pragma: nocover + from attributecode import cmd + cmd.about() diff --git a/.venv/lib/python3.11/site-packages/attributecode/api.py b/.venv/lib/python3.11/site-packages/attributecode/api.py new file mode 100644 index 0000000..4763aa5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/api.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import json +from requests import get + +from urllib.parse import quote +from urllib.parse import urlencode +from urllib.error import HTTPError + +from attributecode import ERROR +from attributecode import Error + +""" +API call helpers +""" + + +# FIXME: args should start with license_key +def request_license_data(api_url, api_key, license_key): + """ + Return a tuple of (dictionary of license data, list of errors) given a + `license_key`. Send a request to `api_url` authenticating with `api_key`. + """ + headers = { + 'Authorization': 'Token %s' % api_key, + } + payload = { + 'api_key': api_key, + 'key': license_key, + 'format': 'json' + } + + api_url = api_url.rstrip('/') + payload = urlencode(payload) + + full_url = '%(api_url)s/?%(payload)s' % locals() + # handle special characters in URL such as space etc. + quoted_url = quote(full_url, safe="%/:=&?~#+!$,;'@()*[]") + + license_data = {} + errors = [] + try: + response = get(quoted_url, headers=headers) + response_content = response.text + # FIXME: this should be an ordered dict + license_data = json.loads(response_content) + if not license_data.get('results', []): + msg = u"Invalid 'license': %s" % license_key + errors.append(Error(ERROR, msg)) + except HTTPError as http_e: + msg = (u"Authorization denied. Invalid '--api_key'. " + u"License generation is skipped.") + errors.append(Error(ERROR, msg)) + except Exception as e: + # Already checked the authorization and accessible of the URL. + # The only exception left is URL is accessible, but it's not a valid API URL + msg = (u"Invalid '--api_url'. " + u"License generation is skipped.") + errors.append(Error(ERROR, msg)) + + finally: + if license_data.get('count') == 1: + license_data = license_data.get('results')[0] + else: + license_data = {} + + return license_data, errors + + +# FIXME: args should start with license_key +def get_license_details_from_api(api_url, api_key, license_key): + """ + Return a tuple of license data given a `license_key` using the `api_url` + authenticating with `api_key`. + The details are a tuple of (license_name, license_key, license_text, errors) + where errors is a list of strings. + Missing values are provided as empty strings. + """ + license_data, errors = request_license_data(api_url, api_key, license_key) + return license_data, errors diff --git a/.venv/lib/python3.11/site-packages/attributecode/attrib.py b/.venv/lib/python3.11/site-packages/attributecode/attrib.py new file mode 100644 index 0000000..b22c6d9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/attrib.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import datetime +import os + +import jinja2 + +from attributecode import __version__ +from attributecode import CRITICAL +from attributecode import ERROR +from attributecode import Error +from attributecode.licenses import COMMON_LICENSES +from attributecode.model import parse_license_expression +from attributecode.model import License, StringField +from attributecode.util import add_unc +from attributecode.attrib_util import multi_sort + +DEFAULT_TEMPLATE_FILE = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'templates', 'default_html.template') + +DEFAULT_TEMPLATE_SCANCODE_FILE = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'templates', 'scancode_html.template') + +DEFAULT_LICENSE_SCORE = 100 + + +def generate(abouts, is_about_input, license_dict, scancode, min_license_score, template=None, vartext=None): + """ + Generate an attribution text from an `abouts` list of About objects, a + `template` template text and a `vartext` optional dict of extra + variables. + + Return a tuple of (error, attribution text) where error is an Error object + or None and attribution text is the generated text or None. + """ + rendered = None + errors = [] + template_error = check_template(template) + if template_error: + lineno, message = template_error + error = Error( + CRITICAL, + 'Template validation error at line: {lineno}: "{message}"'.format( + **locals()) + ) + errors.append(error) + return error, None + + template = jinja2.Template(template) + # Get the current UTC time + utcnow = datetime.datetime.utcnow() + + licenses_list = [] + lic_name_expression_list = [] + if is_about_input: + for about in abouts: + # about.license_file.value is a OrderDict with license_file_name as + # the key and the license text as the value + index = 0 + for lic_name in about.license_name.value: + if about.license_key.value: + key = about.license_key.value[index] + else: + key = lic_name + captured = False + for lic in licenses_list: + if key in lic.key: + captured = True + break + if not captured or not licenses_list: + name = lic_name + if about.license_file.value.keys(): + filename = list(about.license_file.value.keys())[index] + text = list(about.license_file.value.values())[index] + else: + error = Error( + CRITICAL, 'No license file found for ' + name) + errors.append(error) + break + if about.license_url.value: + url = about.license_url.value[index] + else: + url = '' + license_object = License(key, name, filename, url, text) + licenses_list.append(license_object) + index = index + 1 + else: + # Create license object + for key in license_dict: + name = license_dict[key][0] + filename = license_dict[key][1] + text = license_dict[key][2] + url = license_dict[key][3] + license_object = License(key, name, filename, url, text) + licenses_list.append(license_object) + + # We need special treatment for scancode input. + # Each about_object may have duplicated license key and same/different license score + # We will only keep the unique license key with the highest license score. + # The process will update the license_key, license_name and license_score. + if scancode: + abouts, meet_score_licenses_list = generate_sctk_input( + abouts, min_license_score, license_dict) + # Remove the license object + remove_list = [] + for lic in licenses_list: + if lic.key not in meet_score_licenses_list: + remove_list.append(lic) + + for lic in remove_list: + licenses_list.remove(lic) + + for about in abouts: + # Create a license expression with license name + lic_name_expression = '' + lic_name_expression_list = [] + if about.license_expression.value: + for segment in about.license_expression.value.split(): + not_lic = True + for lic in licenses_list: + if segment == lic.key: + lic_name_expression_list.append(lic.name) + not_lic = False + break + if not_lic: + lic_name_expression_list.append(segment) + # Join the license name expression into a single string + lic_name_expression = ' '.join(lic_name_expression_list) + + # Add the license name expression string into the about object as a custom field + custom_field = StringField( + name='license_name_expression', value=lic_name_expression, present=True) + setattr(about, 'license_name_expression', custom_field) + + # Sort the about objects by name + abouts = sorted(abouts, key=lambda x: x.name.value.lower()) + + # Sort the license object by key + licenses_list = sorted(licenses_list, key=lambda x: x.key) + + rendered = template.render( + abouts=abouts, + common_licenses=COMMON_LICENSES, + licenses_list=licenses_list, + utcnow=utcnow, + tkversion=__version__, + vartext=vartext + ) + + return errors, rendered + + +def generate_sctk_input(abouts, min_license_score, license_dict): + meet_score_licenses_list = [] + for about in abouts: + # We will use a dictionary to keep the unique license key + # which the dictionary key is the license key and the dictionary value + # is (lic_score, lic_name) + if about.license_key.value: + updated_dict = {} + lic_key = about.license_key.value + lic_name = [] + if about.license_name.value: + lic_name = about.license_name.value + else: + lic_name = [] + for key_list in lic_key: + lic_name_list = [] + for k in key_list: + try: + lic_name_list.append(license_dict[k][0]) + except: + lic_name_list.append(k) + lic_name.append(lic_name_list) + about.license_name.value = lic_name + + if not lic_name: + lic_name = [] + for key in lic_key: + lic_name.append(license_dict[key][0]) + lic_score = about.license_score.value + assert len(lic_key) == len(lic_name) + assert len(lic_key) == len(lic_score) + + lic_key_expression = about.license_key_expression.value + if lic_key_expression: + updated_lic_key_expression = [] + removed_index = [] + for index, key in enumerate(lic_key_expression): + if key in updated_dict: + previous_score, _name = updated_dict[key] + current_score = lic_score[index] + if current_score > previous_score: + updated_dict[key] = ( + lic_score[index], lic_name[index]) + # Track the duplicated index + removed_index.append(index) + else: + updated_dict[key] = ( + lic_score[index], lic_name[index]) + updated_lic_key_expression.append(key) + # Remove the duplication + for index, key in enumerate(about.license_key.value): + if index in removed_index: + del about.license_key.value[index] + del about.license_name.value[index] + del about.license_score.value[index] + + lic_key_expression = updated_lic_key_expression + updated_lic_key = [] + updated_lic_name = [] + updated_lic_score = [] + for index, lic in enumerate(updated_dict): + _sp_char, lic_keys, _invalid_lic_exp = parse_license_expression( + lic) + score, name = updated_dict[lic] + if score >= min_license_score: + for lic_key in lic_keys: + if not lic_key in meet_score_licenses_list: + meet_score_licenses_list.append(lic_key) + + updated_lic_key.append(lic_keys) + updated_lic_name.append(name) + updated_lic_score.append(score) + + # Remove items that don't meet to score + for index, score in enumerate(updated_lic_score): + if score < min_license_score: + del updated_lic_key[index] + del updated_lic_name[index] + del updated_lic_score[index] + del lic_key_expression[index] + + about.license_key.value = updated_lic_key + about.license_name.value = updated_lic_name + about.license_score.value = updated_lic_score + about.license_key_expression.value = lic_key_expression + return abouts, meet_score_licenses_list + + +def get_license_file_key(license_text_name): + if license_text_name.endswith('.LICENSE'): + # See https://github.com/aboutcode-org/aboutcode-toolkit/issues/439 + # for why using split instead of strip + return license_text_name.rsplit('.', 1)[0] + else: + return license_text_name + + +def check_template(template_string): + """ + Check the syntax of a template. Return an error tuple (line number, + message) if the template is invalid or None if it is valid. + """ + try: + jinja2.filters.FILTERS['multi_sort'] = multi_sort + jinja2.Template(template_string) + except (jinja2.TemplateSyntaxError, jinja2.TemplateAssertionError) as e: + return e.lineno, e.message + + +def generate_from_file(abouts, is_about_input, license_dict, scancode, min_license_score, template_loc=None, vartext=None): + """ + Generate an attribution text from an `abouts` list of About objects, a + `template_loc` template file location and a `vartext` optional + dict of extra variables. + + Return a tuple of (error, attribution text) where error is an Error object + or None and attribution text is the generated text or None. + """ + if not template_loc: + if scancode: + template_loc = add_unc(DEFAULT_TEMPLATE_SCANCODE_FILE) + else: + template_loc = add_unc(DEFAULT_TEMPLATE_FILE) + else: + template_loc = add_unc(template_loc) + with open(template_loc, encoding='utf-8', errors='replace') as tplf: + tpls = tplf.read() + return generate(abouts, is_about_input, license_dict, scancode, min_license_score, template=tpls, vartext=vartext) + + +def generate_and_save(abouts, is_about_input, license_dict, output_location, scancode=False, min_license_score=0, template_loc=None, vartext=None): + """ + Generate an attribution text from an `abouts` list of About objects, a + `template_loc` template file location and a `vartext` optional + dict of extra variables. Save the generated attribution text in the + `output_location` file. + Return a list of Error objects if any. + """ + errors = [] + # Parse license_expression and save to the license list + for about in abouts: + if not about.license_expression.value: + continue + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + about.license_expression.value) + if special_char_in_expression or invalid_lic_exp: + if special_char_in_expression: + msg = (u"The following character(s) cannot be in the license_expression: " + + str(special_char_in_expression)) + else: + msg = (u"This license_expression is invalid: " + + str(invalid_lic_exp)) + errors.append(Error(ERROR, msg)) + + rendering_error, rendered = generate_from_file( + abouts, + is_about_input, + license_dict, + scancode=scancode, + min_license_score=min_license_score, + template_loc=template_loc, + vartext=vartext, + ) + + if rendering_error: + errors.append(rendering_error) + + if rendered: + output_location = add_unc(output_location) + with open(output_location, 'w', encoding='utf-8', errors='replace') as of: + of.write(rendered) + + return errors, rendered diff --git a/.venv/lib/python3.11/site-packages/attributecode/attrib_util.py b/.venv/lib/python3.11/site-packages/attributecode/attrib_util.py new file mode 100644 index 0000000..3b919ab --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/attrib_util.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from jinja2 import Environment +try: + from jinja2.filters import pass_environment +except ImportError: + from jinja2.filters import environmentfilter as pass_environment +from jinja2.filters import make_attrgetter +from jinja2.filters import ignore_case +from jinja2.filters import FilterArgumentError + +""" +Extra JINJA2 custom filters and other template utilities. +""" + + +def get_template(template_text): + """ + Return a template built from a text string. + Register custom templates as needed. + """ + env = Environment(autoescape=True) + # register our custom filters + env.filters.update(dict( + unique_together=unique_together, + multi_sort=multi_sort)) + return env.from_string(template_text) + + +@pass_environment +def multi_sort(environment, value, reverse=False, case_sensitive=False, + attributes=None): + """ + Sort an iterable using an "attributes" list of attribute names available on + each iterable item. Sort ascending unless reverse is "true". Ignore the case + of strings unless "case_sensitive" is "true". + + .. sourcecode:: jinja + + {% for item in iterable|multi_sort(attributes=['date', 'name']) %} + ... + {% endfor %} + """ + if not attributes: + raise FilterArgumentError( + 'The multi_sort filter requires a list of attributes as argument, ' + 'such as in: ' + "for item in iterable|multi_sort(attributes=['date', 'name'])") + + # build a list of attribute getters, one for each attribute + do_ignore_case = ignore_case if not case_sensitive else None + attribute_getters = [] + for attribute in attributes: + ag = make_attrgetter(environment, attribute, postprocess=do_ignore_case) + attribute_getters.append(ag) + + # build a key function that has runs all attribute getters + def key(v): + return [a(v) for a in attribute_getters] + + return sorted(value, key=key, reverse=reverse) + + +@pass_environment +def unique_together(environment, value, case_sensitive=False, attributes=None): + """ + Return a list of unique items from an iterable. Unicity is checked when + considering together all the values of an "attributes" list of attribute + names available on each iterable item.. The items order is preserved. Ignore + the case of strings unless "case_sensitive" is "true". + .. sourcecode:: jinja + + {% for item in iterable|unique_together(attributes=['date', 'name']) %} + ... + {% endfor %} + + """ + if not attributes: + raise FilterArgumentError( + 'The unique_together filter requires a list of attributes as argument, ' + 'such as in: ' + "{% for item in iterable|unique_together(attributes=['date', 'name']) %} ") + + # build a list of attribute getters, one for each attribute + do_ignore_case = ignore_case if not case_sensitive else None + attribute_getters = [] + for attribute in attributes: + ag = make_attrgetter(environment, attribute, postprocess=do_ignore_case) + attribute_getters.append(ag) + + # build a unique_key function that has runs all attribute getters + # and returns a hashable tuple + def unique_key(v): + return tuple(repr(a(v)) for a in attribute_getters) + + unique = [] + seen = set() + for item in value: + key = unique_key(item) + if key not in seen: + seen.add(key) + unique.append(item) + return unique diff --git a/.venv/lib/python3.11/site-packages/attributecode/cmd.py b/.venv/lib/python3.11/site-packages/attributecode/cmd.py new file mode 100644 index 0000000..5f2f504 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/cmd.py @@ -0,0 +1,966 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from attributecode.util import write_licenses +from attributecode.util import get_temp_dir +from attributecode.util import filter_errors +from attributecode.util import extract_zip +from attributecode.transform import Transformer +from attributecode.transform import write_excel +from attributecode.transform import write_json +from attributecode.transform import write_csv +from attributecode.transform import transform_excel +from attributecode.transform import transform_json +from attributecode.transform import transform_csv +from attributecode.transform import transform_data +from attributecode.model import write_output +from attributecode.model import pre_process_and_fetch_license_dict +from attributecode.model import get_copy_list +from attributecode.model import copy_redist_src +from attributecode.model import collect_inventory, collect_abouts_license_expression, collect_inventory_license_expression +from attributecode.gen import generate as generate_about_files, load_inventory +from attributecode.attrib import generate_and_save as generate_attribution_doc +from attributecode.attrib import DEFAULT_LICENSE_SCORE +from attributecode.attrib import check_template +from attributecode import severities +from attributecode import __version__ +from attributecode import __about_spec_version__ +from attributecode.util import unique +from attributecode import WARNING + +from collections import defaultdict +from functools import partial + +import os +import sys + +import click + +# silence unicode literals warnings +click.disable_unicode_literals_warning = True + + +__copyright__ = """ + Copyright (c) nexB Inc and others. All rights reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License.""" + +prog_name = 'AboutCode-toolkit' + +intro = '''%(prog_name)s version %(__version__)s +ABOUT spec version: %(__about_spec_version__)s +https://aboutcode.org +%(__copyright__)s +''' % locals() + + +def print_version(): + click.echo('Running aboutcode-toolkit version ' + __version__) + + +class AboutCommand(click.Command): + """ + An enhanced click Command working around some Click quirk. + """ + + def main(self, args=None, prog_name=None, complete_var=None, + standalone_mode=True, **extra): + """ + Workaround click bug https://github.com/mitsuhiko/click/issues/365 + """ + return click.Command.main( + self, args=args, prog_name=self.name, + complete_var=complete_var, standalone_mode=standalone_mode, **extra) + + +# we define a main entry command with subcommands +@click.group(name='about') +@click.version_option(version=__version__, prog_name=prog_name, message=intro) +@click.help_option('-h', '--help') +def about(): + """ +Generate licensing attribution and credit notices from .ABOUT files and inventories. + +Read, write and collect provenance and license inventories from .ABOUT files to and from JSON or CSV files. + +Use about --help for help on a command. + """ + +###################################################################### +# option validators +###################################################################### + + +def validate_key_values(ctx, param, value): + """ + Return the a dict of {key: value} if valid or raise a UsageError + otherwise. + """ + if not value: + return + + kvals, errors = parse_key_values(value) + if errors: + ive = '\n'.join(sorted(' ' + x for x in errors)) + msg = ('Invalid {param} option(s):\n' + '{ive}'.format(**locals())) + raise click.UsageError(msg) + return kvals + + +def validate_extensions(ctx, param, value, extensions=tuple(('.csv', '.json',))): + if not value: + return + if not value.endswith(extensions): + msg = ' '.join(extensions) + raise click.UsageError( + 'Invalid {param} file extension: must be one of: {msg}'.format(**locals())) + return value + +###################################################################### +# inventory subcommand +###################################################################### + + +@about.command(cls=AboutCommand, + short_help='Collect the inventory of .ABOUT files to a CSV/JSON/XLSX file or stdout.') +@click.argument('location', + required=True, + metavar='LOCATION', + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, resolve_path=True)) +@click.argument('output', + required=True, + metavar='OUTPUT') +@click.option('--exclude', + multiple=True, + metavar='PATTERN', + help='Exclude the processing of the specified input pattern (e.g. *tests* or test/).') +@click.option('-f', '--format', + is_flag=False, + default='csv', + show_default=True, + type=click.Choice(['json', 'csv', 'excel']), + help='Set OUTPUT inventory file format.') +@click.option('-q', '--quiet', + is_flag=True, + help='Do not print error or warning messages.') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def inventory(location, output, exclude, format, quiet, verbose): # NOQA + """ +Collect the inventory of .ABOUT files to a CSV/JSON/XLSX file. + +LOCATION: Path to an ABOUT file or a directory with ABOUT files. + +OUTPUT: Path to the CSV/JSON/XLSX inventory file to create, or +using '-' to print result on screen/to stdout (Excel-formatted output +cannot be used in stdout). + """ + # We are not using type=click.Path() to validate the output location as + # it does not support `-` , which is used to print the result to stdout. + if not output == '-': + parent_dir = os.path.dirname(output) + if not os.path.exists(parent_dir): + msg = 'The OUTPUT directory: {parent_dir} does not exist.'.format(**locals()) + msg += '\nPlease correct and re-run' + click.echo(msg) + sys.exit(1) + else: + # Check the format if output is stdout as xlsx format cannot be displayed. + if format == 'excel': + msg = 'Excel-formatted output cannot be used in stdout.' + click.echo(msg) + sys.exit(0) + if not quiet: + print_version() + click.echo('Collecting inventory from ABOUT files...') + + if location.lower().endswith('.zip'): + # accept zipped ABOUT files as input + location = extract_zip(location) + errors, abouts = collect_inventory(location, exclude) + write_output(abouts=abouts, location=output, format=format) + + if output == '-': + log_file_loc = None + else: + log_file_loc = output + '-error.log' + errors_count = report_errors( + errors, quiet, verbose, log_file_loc) + if not quiet and not output == '-': + msg = 'Inventory collected in {output}.'.format(**locals()) + click.echo(msg) + sys.exit(errors_count) + +###################################################################### +# gen subcommand +###################################################################### + + +@about.command(cls=AboutCommand, + short_help='Generate .ABOUT files from an inventory as CSV/JSON/XLSX.') +@click.argument('location', + required=True, + metavar='LOCATION', + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, resolve_path=True)) +@click.argument('output', + required=True, + metavar='OUTPUT', + type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True)) +@click.option('--android', + is_flag=True, + help='Generate MODULE_LICENSE_XXX (XXX will be replaced by license key) and NOTICE ' + 'as the same design as from Android.') +# FIXME: the CLI UX should be improved with two separate options for API key and URL +@click.option('--fetch-license', + is_flag=True, + help='Fetch license data and text files from the ScanCode LicenseDB.') +@click.option('--fetch-license-djc', + nargs=2, + type=str, + metavar='api_url api_key', + help='Fetch license data and text files from a DejaCode License Library ' + 'API URL using the API KEY.') +@click.option('--scancode', + is_flag=True, + help='Indicate the input JSON file is from scancode_toolkit.') +@click.option('--reference', + metavar='DIR', + type=click.Path(exists=True, file_okay=False, + readable=True, resolve_path=True), + help='Path to a directory with reference license data and text files.') +@click.option('--worksheet', + metavar='name', + help='The worksheet name from the INPUT. (Default: the "active" worksheet)') +@click.option('-q', '--quiet', + is_flag=True, + help='Do not print error or warning messages.') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def gen(location, output, android, fetch_license, fetch_license_djc, scancode, reference, worksheet, quiet, verbose): + """ +Given a CSV/JSON/XLSX inventory, generate ABOUT files in the output location. + +LOCATION: Path to a JSON/CSV/XLSX inventory file. + +OUTPUT: Path to a directory where ABOUT files are generated. + """ + if not quiet: + print_version() + click.echo('Generating .ABOUT files...') + + # FIXME: This should be checked in the `click` + if not location.endswith(('.csv', '.json', '.xlsx')): + raise click.UsageError( + 'ERROR: Invalid input file extension: must be one .csv or .json or .xlsx.') + + if worksheet and not location.endswith('.xlsx'): + raise click.UsageError( + 'ERROR: --worksheet option only works with .xlsx input.') + + errors, abouts = generate_about_files( + location=location, + base_dir=output, + android=android, + reference_dir=reference, + fetch_license=fetch_license, + fetch_license_djc=fetch_license_djc, + scancode=scancode, + worksheet=worksheet + ) + + errors_count = report_errors( + errors, quiet, verbose, log_file_loc=output + '-error.log') + if not quiet: + abouts_count = len(abouts) + msg = '{abouts_count} .ABOUT files generated in {output}.'.format( + **locals()) + click.echo(msg) + sys.exit(errors_count) + + +###################################################################### +# gen_license subcommand +###################################################################### + +@about.command(cls=AboutCommand, + short_help='Fetch and save all the licenses in the license_expression field to a directory.') +@click.argument('location', + required=True, + metavar='LOCATION', + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, resolve_path=True)) +@click.argument('output', + required=True, + metavar='OUTPUT', + type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True)) +@click.option('--djc', + nargs=2, + type=str, + metavar='api_url api_key', + help='Fetch licenses from a DejaCode License Library.') +@click.option('--scancode', + is_flag=True, + help='Indicate the input JSON file is from scancode_toolkit.') +@click.option('--worksheet', + metavar='name', + help='The worksheet name from the INPUT. (Default: the "active" worksheet)') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def gen_license(location, output, djc, scancode, worksheet, verbose): + """ +Fetch licenses (Default: ScanCode LicenseDB) in the license_expression field and save to the output location. + +LOCATION: Path to a JSON/CSV/XLSX/.ABOUT file(s) + +OUTPUT: Path to a directory where license files are saved. + """ + print_version() + api_url = '' + api_key = '' + errors = [] + + if worksheet and not location.endswith('.xlsx'): + raise click.UsageError( + 'ERROR: --worksheet option only works with .xlsx input.') + + log_file_loc = os.path.join(output, 'error.log') + + if location.endswith('.csv') or location.endswith('.json') or location.endswith('.xlsx'): + errors, abouts = collect_inventory_license_expression( + location=location, scancode=scancode, worksheet=worksheet) + if errors: + severe_errors_count = report_errors( + errors, quiet=False, verbose=verbose, log_file_loc=log_file_loc) + sys.exit(severe_errors_count) + else: + # _errors, abouts = collect_inventory(location) + errors, abouts = collect_abouts_license_expression(location) + + if djc: + # Strip the ' and " for api_url, and api_key from input + api_url = djc[0].strip("'").strip('"') + api_key = djc[1].strip("'").strip('"') + + click.echo('Fetching licenses...') + from_check = False + license_dict, lic_errors = pre_process_and_fetch_license_dict( + abouts, from_check, api_url, api_key, scancode) + + if lic_errors: + errors.extend(lic_errors) + + # A dictionary with license file name as the key and context as the value + lic_dict_output = {} + for key in license_dict: + if not key in lic_dict_output: + lic_filename = license_dict[key][1] + lic_context = license_dict[key][2] + lic_dict_output[lic_filename] = lic_context + + write_errors = write_licenses(lic_dict_output, output) + if write_errors: + errors.extend(write_errors) + + severe_errors_count = report_errors( + errors, quiet=False, verbose=verbose, log_file_loc=log_file_loc) + sys.exit(severe_errors_count) + + +###################################################################### +# attrib subcommand +###################################################################### + + +def validate_template(ctx, param, value): + if not value: + return None + + with open(value, encoding='utf-8', errors='replace') as templatef: + template_error = check_template(templatef.read()) + + if template_error: + lineno, message = template_error + raise click.UsageError( + 'Template syntax error at line: ' + '{lineno}: "{message}"'.format(**locals())) + return value + + +@about.command(cls=AboutCommand, + short_help='Generate an attribution document from JSON/CSV/XLSX/.ABOUT files.') +@click.argument('input', + required=True, + metavar='INPUT', + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, resolve_path=True)) +@click.argument('output', + required=True, + metavar='OUTPUT', + type=click.Path(exists=False, dir_okay=False, writable=True, resolve_path=True)) +@click.option('--api_url', + nargs=1, + type=click.STRING, + metavar='URL', + help='URL to DejaCode License Library.') +@click.option('--api_key', + nargs=1, + type=click.STRING, + metavar='KEY', + help='API Key for the DejaCode License Library') +@click.option('--min-license-score', + type=int, + help='Attribute components that have license score higher than or equal to the defined ' + '--min-license-score.') +@click.option('--scancode', + is_flag=True, + help='Indicate the input JSON file is from scancode_toolkit.') +@click.option('--reference', + metavar='DIR', + type=click.Path(exists=True, file_okay=False, + readable=True, resolve_path=True), + help='Path to a directory with reference files where "license_file" and/or "notice_file"' + ' located.') +@click.option('--template', + metavar='FILE', + callback=validate_template, + type=click.Path(exists=True, dir_okay=False, + readable=True, resolve_path=True), + help='Path to an optional custom attribution template to generate the ' + 'attribution document. If not provided the default built-in template is used.') +@click.option('--vartext', + multiple=True, + callback=validate_key_values, + metavar='=', + help='Add variable text as key=value for use in a custom attribution template.') +@click.option('--worksheet', + metavar='name', + help='The worksheet name from the INPUT. (Default: the "active" worksheet)') +@click.option('-q', '--quiet', + is_flag=True, + help='Do not print error or warning messages.') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def attrib(input, output, api_url, api_key, scancode, min_license_score, reference, template, vartext, worksheet, quiet, verbose): + """ +Generate an attribution document at OUTPUT using JSON, CSV or XLSX or .ABOUT files at INPUT. + +INPUT: Path to a file (.ABOUT/.csv/.json/.xlsx), directory or .zip archive containing .ABOUT files. + +OUTPUT: Path where to write the attribution document. + """ + # A variable to define if the input ABOUT file(s) + is_about_input = False + + rendered = '' + license_dict = {} + errors = [] + + if worksheet and not input.endswith('.xlsx'): + raise click.UsageError( + 'ERROR: --worksheet option only works with .xlsx input.') + + if not quiet: + print_version() + click.echo('Generating attribution...') + + # accept zipped ABOUT files as input + if input.lower().endswith('.zip'): + input = extract_zip(input) + + if scancode: + if not input.endswith('.json'): + msg = 'The input file from scancode toolkit needs to be in JSON format.' + click.echo(msg) + sys.exit(1) + if not min_license_score and not min_license_score == 0: + min_license_score = DEFAULT_LICENSE_SCORE + + if min_license_score: + if not scancode: + msg = ('This option requires a JSON file generated by scancode toolkit as the input. ' + + 'The "--scancode" option is required.') + click.echo(msg) + sys.exit(1) + + if input.endswith('.json') or input.endswith('.csv') or input.endswith('.xlsx'): + is_about_input = False + from_attrib = True + if not reference: + # Set current directory as the reference dir + reference = os.path.dirname(input) + # Since the errors from load_inventory is only about field formatting or + # empty field which is irrelevant for attribtion process, + # See https://github.com/nexB/aboutcode-toolkit/issues/524 + # I believe we do not need to capture these errors in attrib process + errors, abouts = load_inventory( + location=input, + from_attrib=from_attrib, + scancode=scancode, + reference_dir=reference, + worksheet=worksheet + ) + + # Exit if CRITICAL error + if errors: + for e in errors: + if severities[e.severity] == 'CRITICAL': + click.echo(e) + sys.exit(1) + + else: + is_about_input = True + _errors, abouts = collect_inventory(input) + + if not abouts: + msg = 'No ABOUT file or reference is found from the input. Attribution generation halted.' + click.echo(msg) + errors_count = 1 + sys.exit(errors_count) + + if not is_about_input: + # Check if both api_url and api_key present + if api_url or api_key: + if not api_url: + msg = '"--api_url" is required.' + click.echo(msg) + sys.exit(1) + if not api_key: + msg = '"--api_key" is required.' + click.echo(msg) + sys.exit(1) + else: + api_url = '' + api_key = '' + api_url = api_url.strip("'").strip('"') + api_key = api_key.strip("'").strip('"') + from_check = False + license_dict, lic_errors = pre_process_and_fetch_license_dict( + abouts, from_check, api_url, api_key, scancode, reference) + errors.extend(lic_errors) + sorted_license_dict = sorted(license_dict) + + # Read the license_file and store in a dictionary + for about in abouts: + if about.license_file.value or about.notice_file.value: + if not reference: + msg = ( + '"license_file" / "notice_file" field contains value. Use `--reference` to indicate its parent directory.') + click.echo(msg) + # sys.exit(1) + + if abouts: + attrib_errors, rendered = generate_attribution_doc( + abouts=abouts, + is_about_input=is_about_input, + license_dict=dict(sorted(license_dict.items())), + output_location=output, + scancode=scancode, + min_license_score=min_license_score, + template_loc=template, + vartext=vartext, + ) + errors.extend(attrib_errors) + + errors_count = report_errors( + errors, quiet, verbose, log_file_loc=output + '-error.log') + + if not quiet: + if rendered: + msg = 'Attribution generated in: {output}'.format(**locals()) + click.echo(msg) + else: + msg = 'Attribution generation failed.' + click.echo(msg) + sys.exit(errors_count) + +###################################################################### +# collect_redist_src subcommand +###################################################################### + + +@about.command(cls=AboutCommand, + short_help='Collect redistributable sources.') +@click.argument('location', + required=True, + metavar='LOCATION', + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, resolve_path=True)) +@click.argument('output', + required=True, + metavar='OUTPUT') +@click.option('--from-inventory', + metavar='FILE', + type=click.Path(exists=True, dir_okay=False, + readable=True, resolve_path=True), + help='Path to an inventory CSV/JSON/XLSX file as the base list for files/directories ' + 'that need to be copied which have the \'redistribute\' flagged.') +@click.option('--with-structures', + is_flag=True, + help='Copy sources with directory structure.') +@click.option('--zip', + is_flag=True, + help='Zip the copied sources to the output location.') +@click.option('-q', '--quiet', + is_flag=True, + help='Do not print error or warning messages.') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def collect_redist_src(location, output, from_inventory, with_structures, zip, quiet, verbose): + """ +Collect sources that have 'redistribute' flagged as 'True' in .ABOUT files or inventory +to the output location. + +LOCATION: Path to a directory containing sources that need to be copied +(and containing ABOUT files if `inventory` is not provided) + +OUTPUT: Path to a directory or a zip file where sources will be copied to. + """ + if zip: + if not output.endswith('.zip'): + click.echo('The output needs to be a zip file.') + sys.exit() + + if not quiet: + print_version() + click.echo('Collecting inventory from ABOUT files...') + + if location.lower().endswith('.zip'): + # accept zipped ABOUT files as input + location = extract_zip(location) + + if from_inventory: + errors, abouts = load_inventory(from_inventory, location) + else: + errors, abouts = collect_inventory(location) + + if zip: + # Copy to a temp location and the zip to the output location + output_location = get_temp_dir() + else: + output_location = output + + copy_list, copy_list_errors = get_copy_list(abouts, location) + copy_errors = copy_redist_src( + copy_list, location, output_location, with_structures) + + if zip: + import shutil + # Stripped the .zip extension as the `shutil.make_archive` will + # append the .zip extension + output_no_extension = output.rsplit('.', 1)[0] + shutil.make_archive(output_no_extension, 'zip', output_location) + + errors.extend(copy_list_errors) + errors.extend(copy_errors) + errors_count = report_errors( + errors, quiet, verbose, log_file_loc=output + '-error.log') + if not quiet: + msg = 'Redistributed sources are copied to {output}.'.format( + **locals()) + click.echo(msg) + sys.exit(errors_count) + +###################################################################### +# check subcommand +###################################################################### + +# FIXME: This is really only a dupe of the Inventory command + +@about.command(cls=AboutCommand, + short_help='Validate that the format of .ABOUT files is correct and report ' + 'errors and warnings.') +@click.argument('location', + required=True, + metavar='LOCATION', + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, resolve_path=True)) +@click.option('--exclude', + multiple=True, + metavar='PATTERN', + help='Exclude the processing of the specified input pattern (e.g. *tests* or test/).') +@click.option('--license', + is_flag=True, + help='Validate the license_expression value in the input.') +@click.option('--djc', + nargs=2, + type=str, + metavar='api_url api_key', + help='Validate license_expression from a DejaCode License Library ' + 'API URL using the API KEY.') +@click.option('--log', + nargs=1, + metavar='FILE', + help='Path to a file to save the error messages if any.') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def check(location, exclude, license, djc, log, verbose): + """ +Check .ABOUT file(s) at LOCATION for validity and print error messages. + +LOCATION: Path to an ABOUT file or a directory with ABOUT files. + """ + print_version() + + if log: + # Check if the error log location exist and create the parent directory if not + parent = os.path.dirname(log) + if not parent: + os.makedirs(parent) + + api_url = '' + api_key = '' + if djc: + # Strip the ' and " for api_url, and api_key from input + api_url = djc[0].strip("'").strip('"') + api_key = djc[1].strip("'").strip('"') + click.echo('Checking ABOUT files...') + + errors, abouts = collect_inventory(location, exclude) + + # Validate license_expression + if license: + from_check = True + _key_text_dict, errs = pre_process_and_fetch_license_dict( + abouts, from_check, api_url, api_key) + for e in errs: + errors.append(e) + + severe_errors_count = report_errors( + errors, quiet=False, verbose=verbose, log_file_loc=log) + sys.exit(severe_errors_count) + +###################################################################### +# transform subcommand +###################################################################### + + +def print_config_help(ctx, param, value): + if not value or ctx.resilient_parsing: + return + from attributecode.transform import tranformer_config_help + click.echo(tranformer_config_help) + ctx.exit() + + +@about.command(cls=AboutCommand, + short_help='Transform a CSV/JSON/XLSX by applying renamings, filters and checks.') +@click.argument('location', + required=True, + callback=partial(validate_extensions, extensions=( + '.csv', '.json', '.xlsx',)), + metavar='LOCATION', + type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True)) +@click.argument('output', + required=True, + callback=partial(validate_extensions, extensions=( + '.csv', '.json', '.xlsx',)), + metavar='OUTPUT', + type=click.Path(exists=False, dir_okay=False, writable=True, resolve_path=True)) +@click.option('-c', '--configuration', + metavar='FILE', + type=click.Path(exists=True, dir_okay=False, + readable=True, resolve_path=True), + help='Path to an optional YAML configuration file. See --help-format for ' + 'format help.') +@click.option('--worksheet', + metavar='name', + help='The worksheet name from the INPUT. (Default: the "active" worksheet)') +@click.option('--help-format', + is_flag=True, is_eager=True, expose_value=False, + callback=print_config_help, + help='Show configuration file format help and exit.') +@click.option('-q', '--quiet', + is_flag=True, + help='Do not print error or warning messages.') +@click.option('--verbose', + is_flag=True, + help='Show all error and warning messages.') +@click.help_option('-h', '--help') +def transform(location, output, configuration, worksheet, quiet, verbose): # NOQA + """ +Transform the CSV/JSON/XLSX file at LOCATION by applying renamings, filters and checks +and then write a new CSV/JSON/XLSX to OUTPUT. + +LOCATION: Path to a CSV/JSON/XLSX file. + +OUTPUT: Path to CSV/JSON/XLSX inventory file to create. + """ + if worksheet and not location.endswith('.xlsx'): + raise click.UsageError( + 'ERROR: --worksheet option only works with .xlsx input.') + + if not configuration: + transformer = Transformer.default() + else: + transformer = Transformer.from_file(configuration) + + if not transformer: + msg = 'Cannot transform without Transformer' + click.echo(msg) + sys.exit(1) + + errors = [] + updated_data = [] + new_data = [] + + if location.endswith('.csv'): + new_data, errors = transform_csv(location) + elif location.endswith('.json'): + new_data, errors = transform_json(location) + elif location.endswith('.xlsx'): + new_data, errors = transform_excel(location, worksheet) + + if not errors: + updated_data, errors = transform_data(new_data, transformer) + + if not updated_data: + msg = 'The input is empty. Nothing is transformed.' + click.echo(msg) + sys.exit(0) + + if not errors: + if output.endswith('.csv'): + write_csv(output, updated_data) + elif output.endswith('.json'): + write_json(output, updated_data) + else: + write_excel(output, updated_data) + + if not quiet: + print_version() + click.echo('Transforming...') + + errors_count = report_errors( + errors, quiet, verbose, log_file_loc=output + '-error.log') + if not quiet and not errors: + msg = 'Transformed file is written to {output}.'.format(**locals()) + click.echo(msg) + sys.exit(errors_count) + +###################################################################### +# Error management +###################################################################### + + +def report_errors(errors, quiet, verbose, log_file_loc=None): + """ + Report the `errors` list of Error objects to screen based on the `quiet` and + `verbose` flags. + + If `log_file_loc` file location is provided also write a verbose log to this + file. + Return True if there were severe error reported. + """ + severe_errors_count = 0 + if errors: + log_msgs, severe_errors_count = get_error_messages(errors, verbose) + if not quiet: + for msg in log_msgs: + click.echo(msg) + if log_msgs and log_file_loc: + with open(log_file_loc, 'w', encoding='utf-8', errors='replace') as lf: + lf.write('\n'.join(log_msgs)) + click.echo("Error log: " + log_file_loc) + return severe_errors_count + + +def get_error_messages(errors, verbose=False): + """ + Return a tuple of (list of error message strings to report, + severe_errors_count) given an `errors` list of Error objects and using the + `verbose` flags. + """ + if verbose: + severe_errors = errors + else: + severe_errors = filter_errors(errors, WARNING) + + severe_errors = unique(severe_errors) + severe_errors_count = len(severe_errors) + + messages = [] + + if severe_errors: + error_msg = 'Command completed with {} errors or warnings.'.format( + severe_errors_count) + messages.append(error_msg) + + for severity, message in severe_errors: + sevcode = severities.get(severity) or 'UNKNOWN' + msg = '{sevcode}: {message}'.format(**locals()) + messages.append(msg) + + return messages, severe_errors_count + +###################################################################### +# Misc +###################################################################### + + +def parse_key_values(key_values): + """ + Given a list of "key=value" strings, return: + - a dict {key: value} + - a sorted list of unique error messages for invalid entries where there is + a missing a key or value. + """ + if not key_values: + return {}, [] + + errors = set() + parsed_key_values = defaultdict(list) + for key_value in key_values: + key, _, value = key_value.partition('=') + + key = key.strip().lower() + if not key: + errors.add('missing in "{key_value}".'.format(**locals())) + continue + + value = value.strip() + if not value: + errors.add('missing in "{key_value}".'.format(**locals())) + continue + + parsed_key_values[key] = value + + return dict(parsed_key_values), sorted(errors) + + +if __name__ == '__main__': + about() diff --git a/.venv/lib/python3.11/site-packages/attributecode/gen.py b/.venv/lib/python3.11/site-packages/attributecode/gen.py new file mode 100644 index 0000000..3b824c2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/gen.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from posixpath import basename +from posixpath import dirname +from posixpath import exists +from posixpath import join +from posixpath import normpath + +from attributecode import ERROR +from attributecode import CRITICAL +from attributecode import INFO +from attributecode import Error +from attributecode import model +from attributecode import util +from attributecode.util import add_unc +from attributecode.util import csv +from attributecode.util import file_fields +from attributecode.util import invalid_chars +from attributecode.util import to_posix +from attributecode.util import UNC_PREFIX_POSIX +from attributecode.util import load_scancode_json, load_csv, load_json, load_excel +from attributecode.util import strip_inventory_value + + +def check_duplicated_columns(location): + """ + Return a list of errors for duplicated column names in a CSV file + at location. + """ + location = add_unc(location) + with open(location, mode='r', encoding='utf-8-sig', errors='replace') as csvfile: + reader = csv.reader(csvfile) + columns = next(reader) + columns = [col for col in columns] + + seen = set() + dupes = dict() + for col in columns: + c = col.lower() + if c in seen: + if c in dupes: + dupes[c].append(col) + else: + dupes[c] = [col] + seen.add(c.lower()) + + errors = [] + if dupes: + dup_msg = [] + for name, names in dupes.items(): + names = u', '.join(names) + msg = '%(name)s with %(names)s' % locals() + dup_msg.append(msg) + dup_msg = u', '.join(dup_msg) + msg = ('Duplicated column name(s): %(dup_msg)s\n' % locals() + + 'Please correct the input and re-run.') + err = Error(ERROR, msg) + if not err in errors: + errors.append(err) + return errors + + +def check_duplicated_about_resource(arp, arp_list): + """ + Return error for duplicated about_resource. + """ + if arp in arp_list: + msg = ("The input has duplicated values in 'about_resource' " + "field: " + arp) + return Error(CRITICAL, msg) + return '' + + +def check_newline_in_file_field(component): + """ + Return a list of errors for newline characters detected in *_file fields. + """ + errors = [] + for k in component.keys(): + if k in file_fields: + try: + if '\n' in component[k]: + if k == u'about_resource': + msg = ( + "Multiple lines detected in 'about_resource' for '%s' which is not supported.") % component['about_resource'] + else: + msg = ("New line character detected in '%s' for '%s' which is not supported." + "\nPlease use ',' to declare multiple files.") % (k, component['about_resource']) + errors.append(Error(CRITICAL, msg)) + except: + pass + return errors + + +def check_about_resource_filename(arp): + """ + Return error for invalid/non-support about_resource's filename or + empty string if no error is found. + """ + if invalid_chars(arp): + msg = ("Invalid characters present in 'about_resource' " + "field: " + arp) + return (Error(ERROR, msg)) + return '' + + +def load_inventory(location, from_attrib=False, base_dir=None, scancode=False, reference_dir=None, worksheet=None): + """ + Load the inventory file at `location` for ABOUT and LICENSE files stored in + the `base_dir`. Return a list of errors and a list of About objects + validated against the `base_dir`. + + Optionally use `reference_dir` as the directory location of extra reference + license and notice files to reuse. + """ + errors = [] + abouts = [] + is_spreadsheet = False + + if base_dir: + base_dir = util.to_posix(base_dir) + if scancode: + inventory = load_scancode_json(location) + else: + if location.endswith('.csv'): + dup_cols_err = check_duplicated_columns(location) + if dup_cols_err: + errors.extend(dup_cols_err) + return errors, abouts + inventory = load_csv(location) + is_spreadsheet = True + elif location.endswith('.xlsx'): + dup_cols_err, inventory = load_excel(location, worksheet) + is_spreadsheet = True + if dup_cols_err: + errors.extend(dup_cols_err) + return errors, abouts + else: + inventory = load_json(location) + + arp_list = [] + errors = [] + + if is_spreadsheet: + # Only the .csv and .xlsx may have newline issue + stripped_inv = strip_inventory_value(inventory) + else: + stripped_inv = inventory + + for component in stripped_inv: + if not from_attrib: + if 'about_resource' in component: + arp = component['about_resource'] + dup_err = check_duplicated_about_resource(arp, arp_list) + if dup_err: + if not dup_err in errors: + errors.append(dup_err) + else: + arp_list.append(arp) + + invalid_about_filename = check_about_resource_filename(arp) + if invalid_about_filename and not invalid_about_filename in errors: + errors.append(invalid_about_filename) + + newline_in_file_err = check_newline_in_file_field(component) + if newline_in_file_err: + errors.extend(newline_in_file_err) + + if errors: + return errors, abouts + + custom_fields_list = [] + for fields in stripped_inv: + # check does the input contains the required fields + required_fields = model.About.required_fields + + for f in required_fields: + if f not in fields: + if from_attrib and f == 'about_resource': + continue + else: + msg = "Required field: %(f)r not found in the " % locals( + ) + errors.append(Error(CRITICAL, msg)) + return errors, abouts + # Set about file path to '' if no 'about_resource' is provided from + # the input + if 'about_resource' not in fields: + afp = '' + else: + afp = fields.get(model.About.ABOUT_RESOURCE_ATTR) + + afp = util.to_posix(afp) + if base_dir: + loc = join(base_dir, afp) + else: + loc = afp + about = model.About(about_file_path=afp) + about.location = loc + + # Update value for 'about_resource' + # keep only the filename or '.' if it's a directory + if 'about_resource' in fields: + updated_resource_value = u'' + resource_path = fields['about_resource'] + if resource_path.endswith(u'/'): + updated_resource_value = u'.' + else: + updated_resource_value = basename(resource_path) + fields['about_resource'] = updated_resource_value + + ld_errors = about.load_dict( + fields, + base_dir, + scancode=scancode, + from_attrib=from_attrib, + running_inventory=False, + reference_dir=reference_dir, + ) + + for severity, message in ld_errors: + if 'Custom Field' in message: + field_name = message.replace('Custom Field: ', '').strip() + if not field_name in custom_fields_list: + custom_fields_list.append(field_name) + else: + errors.append(Error(severity, message)) + + abouts.append(about) + if custom_fields_list: + custom_fields_err_msg = 'Field ' + \ + str(custom_fields_list) + ' is a custom field.' + errors.append(Error(INFO, custom_fields_err_msg)) + + return errors, abouts + + +def update_about_resource(self): + pass + + +def generate(location, base_dir, android=None, reference_dir=None, fetch_license=False, fetch_license_djc=False, scancode=False, worksheet=None): + """ + Load ABOUT data from a CSV inventory at `location`. Write ABOUT files to + base_dir. Return errors and about objects. + """ + notice_dict = {} + api_url = '' + api_key = '' + gen_license = False + # FIXME: use two different arguments: key and url + # Check if the fetch_license contains valid argument + if fetch_license_djc: + # Strip the ' and " for api_url, and api_key from input + api_url = fetch_license_djc[0].strip("'").strip('"') + api_key = fetch_license_djc[1].strip("'").strip('"') + gen_license = True + + if fetch_license: + gen_license = True + + # TODO: WHY use posix?? + bdir = to_posix(base_dir) + + errors, abouts = load_inventory( + location=location, + base_dir=bdir, + reference_dir=reference_dir, + scancode=scancode, + worksheet=worksheet + ) + if gen_license: + license_dict, err = model.pre_process_and_fetch_license_dict( + abouts, api_url=api_url, api_key=api_key) + if err: + for e in err: + # Avoid having same error multiple times + if not e in errors: + errors.append(e) + + for about in abouts: + # Strip trailing spaces + about.about_file_path = about.about_file_path.strip() + if about.about_file_path.startswith('/'): + about.about_file_path = about.about_file_path.lstrip('/') + # Use the name as the ABOUT file name if about_resource is empty + if not about.about_file_path: + about.about_file_path = about.name.value + dump_loc = join(bdir, about.about_file_path.lstrip('/')) + + # The following code is to check if there is any directory ends with spaces + split_path = about.about_file_path.split('/') + dir_endswith_space = False + for segment in split_path: + if segment.endswith(' '): + msg = (u'File path : ' + u'%(dump_loc)s ' + u'contains directory name ends with spaces which is not ' + u'allowed. Generation skipped.' % locals()) + errors.append(Error(ERROR, msg)) + dir_endswith_space = True + break + if dir_endswith_space: + # Continue to work on the next about object + continue + + try: + + licenses_dict = {} + if gen_license: + # Write generated LICENSE file + license_key_name_context_url_list = about.dump_lic( + dump_loc, license_dict) + if license_key_name_context_url_list: + for lic_key, lic_name, lic_filename, lic_context, lic_url, spdx_lic_key in license_key_name_context_url_list: + licenses_dict[lic_key] = [ + lic_name, lic_filename, lic_context, lic_url, spdx_lic_key] + if not lic_name in about.license_name.value: + about.license_name.value.append(lic_name) + about.license_file.value[lic_filename] = lic_filename + if not lic_url in about.license_url.value: + about.license_url.value.append(lic_url) + if not spdx_lic_key in about.spdx_license_key.value: + about.spdx_license_key.value.append(spdx_lic_key) + if about.license_name.value: + about.license_name.present = True + if about.license_file.value: + about.license_file.present = True + if about.license_url.value: + about.license_url.present = True + if about.spdx_license_key.value: + about.spdx_license_key.present = True + + about.dump(dump_loc, licenses_dict) + + if android: + """ + Create MODULE_LICENSE_XXX and get context to create NOTICE file + follow the standard from Android Open Source Project + """ + import os + parent_path = os.path.dirname(util.to_posix(dump_loc)) + + about.android_module_license(parent_path) + notice_path, notice_context = about.android_notice(parent_path) + if notice_path in notice_dict.keys(): + notice_dict[notice_path] += '\n\n' + notice_context + else: + notice_dict[notice_path] = notice_context + + except Exception as e: + # only keep the first 100 char of the exception + # TODO: truncated errors are likely making diagnotics harder + emsg = repr(e)[:100] + msg = (u'Failed to write .ABOUT file at : ' + u'%(dump_loc)s ' + u'with error: %(emsg)s' % locals()) + errors.append(Error(ERROR, msg)) + + if android: + # Check if there is already a NOTICE file present + for path in notice_dict.keys(): + if os.path.exists(path): + msg = (u'NOTICE file already exist at: %s' % path) + errors.append(Error(ERROR, msg)) + else: + about.dump_android_notice(path, notice_dict[path]) + return errors, abouts diff --git a/.venv/lib/python3.11/site-packages/attributecode/licenses.py b/.venv/lib/python3.11/site-packages/attributecode/licenses.py new file mode 100644 index 0000000..9280ab8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/licenses.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- + +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +# Common license keys +COMMON_LICENSES = ( + 'aes-128-3.0', + 'agpl-3.0-plus', + 'apache-1.1', + 'apache-2.0', + 'apple-attribution-1997', + 'apple-excl', + 'apsl-2.0', + 'arphic-public', + 'artistic-perl-1.0', + 'artistic-2.0', + 'bitstream', + 'boost-1.0', + 'broadcom-cfe', + 'bsd-new', + 'bsd-original', + 'bsd-original-uc', + 'bsd-simplified', + 'cmu-computing-services', + 'cddl-1.0', + 'cddl-1.1', + 'cpl-1.0', + 'cc-by-2.5', + 'cc-by-sa-3.0', + 'curl', + 'freetype', + 'gpl-1.0-plus', + 'gpl-2.0', + 'gpl-2.0-bison', + 'gpl-2.0-glibc', + 'gpl-2.0-plus', + 'gpl-3.0', + 'gpl-3.0-plus', + 'lgpl-2.0', + 'lgpl-2.0-plus', + 'lgpl-2.1', + 'lgpl-2.1-plus', + 'lgpl-3.0', + 'lgpl-3.0-plus', + 'gpl-2.0-plus-linking', + 'gpl-2.0-broadcom-linking', + 'ijg', + 'isc', + 'larabie', + 'libpng', + 'ms-limited-public', + 'ms-pl', + 'ms-rl', + 'ms-ttf-eula', + 'mit', + 'mpl-1.1', + 'mpl-2.0', + 'net-snmp', + 'npl-1.1', + 'ntpl', + 'openssl-ssleay', + 'ssleay-windows', + 'rsa-md4', + 'rsa-md5', + 'sfl-license', + 'sgi-freeb-2.0', + 'sun-rpc', + 'tcl', + 'tidy', + 'uoi-ncsa', + 'x11', + 'zlib', +) diff --git a/.venv/lib/python3.11/site-packages/attributecode/model.py b/.venv/lib/python3.11/site-packages/attributecode/model.py new file mode 100644 index 0000000..e935af2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/model.py @@ -0,0 +1,2184 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +""" +AboutCode toolkit is a tool to process ABOUT files. ABOUT files are +small text files that document the provenance (aka. the origin and +license) of software components as well as the essential obligation +such as attribution/credits and source code redistribution. See the +ABOUT spec at http://dejacode.org. + +AboutCode toolkit reads and validates ABOUT files and collect software +components inventories. +""" + +import json +import os +import posixpath +from requests import get, head, exceptions +import sys +import traceback +from itertools import zip_longest + +from urllib.parse import urljoin +from urllib.parse import urlparse + +from license_expression import Licensing +from packageurl import PackageURL + +from attributecode import __version__ +from attributecode import CRITICAL +from attributecode import ERROR +from attributecode import INFO +from attributecode import WARNING +from attributecode import api +from attributecode import Error +from attributecode import saneyaml +from attributecode import gen +from attributecode import util +from attributecode.transform import write_excel +from attributecode.util import add_unc +from attributecode.util import boolean_fields +from attributecode.util import copy_license_notice_files +from attributecode.util import copy_file +from attributecode.util import csv +from attributecode.util import file_fields +from attributecode.util import filter_errors +from attributecode.util import get_spdx_key_and_lic_key_from_licdb +from attributecode.util import is_valid_name +from attributecode.util import on_windows +from attributecode.util import norm +from attributecode.util import replace_tab_with_spaces +from attributecode.util import wrap_boolean_value +from attributecode.util import UNC_PREFIX +from attributecode.util import ungroup_licenses +from attributecode.util import ungroup_licenses_from_sctk + +genereated_tk_version = "# Generated with AboutCode Toolkit Version %s \n\n" % __version__ + + +class Field(object): + """ + An ABOUT file field. The initial value is a string. Subclasses can and + will alter the value type as needed. + """ + + def __init__(self, name=None, value=None, required=False, present=False): + # normalized names are lowercased per specification + self.name = name + # save this and do not mutate it afterwards + if isinstance(value, str): + self.original_value = value + # elif value: + # Don't convert it to string. Leave the format as-is + # self.original_value = repr(value) + else: + self.original_value = value + + # can become a string, list or OrderedDict() after validation + self.value = value or self.default_value() + + self.required = required + # True if the field is present in an About object + self.present = present + + self.errors = [] + + def default_value(self): + return '' + + def validate(self, *args, **kwargs): + """ + Validate and normalize thyself. Return a list of errors. + """ + errors = [] + name = self.name + + self.value = self.default_value() + if not self.present: + # required fields must be present + if self.required: + msg = u'Field %(name)s is required' + errors.append(Error(CRITICAL, msg % locals())) + return errors + else: + # present fields should have content ... + # The boolean value can be True, False and None + # The value True or False is the content of boolean fields + if not name in boolean_fields and not self.has_content: + # ... especially if required + if self.required: + msg = u'Field %(name)s is required and empty' + severity = CRITICAL + else: + severity = INFO + msg = u'Field %(name)s is present but empty.' + errors.append(Error(severity, msg % locals())) + else: + # present fields with content go through validation... + # first trim any trailing spaces on each line + if isinstance(self.original_value, str): + value = '\n'.join(s.rstrip() for s + in self.original_value.splitlines(False)) + # then strip leading and trailing spaces + value = value.strip() + else: + value = self.original_value + self.value = value + try: + validation_errors = self._validate(*args, **kwargs) + errors.extend(validation_errors) + except Exception as e: + emsg = repr(e) + msg = u'Error validating field %(name)s: %(value)r: %(emsg)r' + errors.append(Error(CRITICAL, msg % locals())) + raise + + # set or reset self + self.errors = errors + return errors + + def _validate(self, *args, **kwargs): + """ + Validate and normalize thyself. Return a list of errors. + Subclasses should override as needed. + """ + return [] + + def _serialized_value(self): + return self.value if self.value else u'' + + def serialize(self): + """ + Return a unicode serialization of self in the ABOUT format. + """ + name = self.name + value = self.serialized_value() or u'' + if self.has_content or self.value: + value = value.splitlines(True) + # multi-line + if len(value) > 1: + # This code is used to read the YAML's multi-line format in + # ABOUT files + # (Test: test_loads_dumps_is_idempotent) + if value[0].strip() == u'|' or value[0].strip() == u'>': + value = u' '.join(value) + else: + # Insert '|' as the indicator for multi-line follow by a + # newline character + value.insert(0, u'|\n') + # insert 4 spaces for newline values + value = u' '.join(value) + else: + # FIXME: See https://github.com/nexB/aboutcode-toolkit/issues/323 + # The yaml.load() will throw error if the parsed value + # contains ': ' character. A work around is to put a pipe, '|' + # to indicate the whole value as a string + if value and ': ' in value[0]: + value.insert(0, u'|\n') + # insert 4 spaces for newline values + value = u' '.join(value) + else: + value = u''.join(value) + + serialized = u'%(name)s:' % locals() + if value: + serialized += ' ' + '%(value)s' % locals() + return serialized + + def serialized_value(self): + """ + Return a unicode serialization of self in the ABOUT format. + Does not include a white space for continuations. + """ + return self._serialized_value() or u'' + + @property + def has_content(self): + return self.original_value + + def __repr__(self): + name = self.name + value = self.value + required = self.required + has_content = self.has_content + present = self.present + r = ('Field(name=%(name)r, value=%(value)r, required=%(required)r, present=%(present)r)') + return r % locals() + + def __eq__(self, other): + """ + Equality based on string content value, ignoring spaces. + """ + return (isinstance(other, self.__class__) + and self.name == other.name + and self.value == other.value) + + +class StringField(Field): + """ + A field containing a string value possibly on multiple lines. + The validated value is a string. + """ + + def _validate(self, *args, **kwargs): + errors = super(StringField, self)._validate(*args, ** kwargs) + no_special_char_field = [ + 'license_expression', 'license_key', 'license_name', 'declared_license_expression', 'other_license_expression '] + name = self.name + if name in no_special_char_field: + val = self.value + special_char = detect_special_char(val) + if special_char: + msg = (u'The following character(s) cannot be in the %(name)s: ' + '%(special_char)r' % locals()) + errors.append(Error(ERROR, msg)) + return errors + + def _serialized_value(self): + return self.value if self.value else u'' + + def __eq__(self, other): + """ + Equality based on string content value, ignoring spaces + """ + if not (isinstance(other, self.__class__) + and self.name == other.name): + return False + + if self.value == other.value: + return True + + # compare values stripped from spaces. Empty and None are equal + if self.value: + sval = u''.join(self.value.split()) + if not sval: + sval = None + + if other.value: + oval = u''.join(other.value.split()) + if not oval: + oval = None + + if sval == oval: + return True + + +class SingleLineField(StringField): + """ + A field containing a string value on a single line. The validated value is + a string. + """ + + def _validate(self, *args, **kwargs): + errors = super(SingleLineField, self)._validate(*args, ** kwargs) + if self.value and isinstance(self.value, str) and '\n' in self.value: + name = self.name + value = self.original_value + msg = (u'Field %(name)s: Cannot span multiple lines: %(value)s' + % locals()) + errors.append(Error(ERROR, msg)) + return errors + + +class ListField(StringField): + """ + A field containing a list of string values, one per line. The validated + value is a list. + """ + + def default_value(self): + return [] + + def _validate(self, *args, **kwargs): + errors = super(ListField, self)._validate(*args, ** kwargs) + + # reset + self.value = [] + + if isinstance(self.original_value, str): + values = self.original_value.splitlines(False) + elif isinstance(self.original_value, list): + values = self.original_value + else: + values = [repr(self.original_value)] + + for val in values: + if isinstance(val, str): + val = val.strip() + if not val: + name = self.name + msg = (u'Field %(name)s: ignored empty list value' + % locals()) + errors.append(Error(INFO, msg)) + continue + # keep only unique and report error for duplicates + if val not in self.value: + self.value.append(val) + else: + name = self.name + msg = (u'Field %(name)s: ignored duplicated list value: ' + '%(val)r' % locals()) + errors.append(Error(WARNING, msg)) + return errors + + def _serialized_value(self): + return self.value if self.value else u'' + + def __eq__(self, other): + """ + Equality based on sort-insensitive values + """ + + if not (isinstance(other, self.__class__) + and self.name == other.name): + return False + + if self.value == other.value: + return True + + # compare values stripped from spaces. + sval = [] + if self.value and isinstance(self.value, list): + sval = sorted(self.value) + + oval = [] + if other.value and isinstance(other.value, list): + oval = sorted(other.value) + + if sval == oval: + return True + + +class PackageUrlField(StringField): + """ + A Package URL field. The validated value is a purl. + """ + + def _validate(self, *args, **kwargs): + """ + Check that Package URL is valid. Return a list of errors. + """ + errors = super(PackageUrlField, self)._validate(*args, ** kwargs) + name = self.name + val = self.value + if not self.is_valid_purl(val): + msg = (u'Field %(name)s: Invalid Package URL: %(val)s' % locals()) + errors.append(Error(WARNING, msg)) + return errors + + @staticmethod + def is_valid_purl(purl): + """ + Return True if a Package URL is valid. + """ + try: + return bool(PackageURL.from_string(purl)) + except: + return False + + +class UrlListField(ListField): + """ + A URL field. The validated value is a list of URLs. + """ + + def _validate(self, *args, **kwargs): + """ + Check that URLs are valid. Return a list of errors. + """ + errors = super(UrlListField, self)._validate(*args, ** kwargs) + name = self.name + val = self.value + for url in val: + if not self.is_valid_url(url): + msg = (u'Field %(name)s: Invalid URL: %(val)s' % locals()) + errors.append(Error(WARNING, msg)) + return errors + + @staticmethod + def is_valid_url(url): + """ + Return True if a URL is valid. + """ + scheme, netloc, _path, _p, _q, _frg = urlparse(url) + valid = scheme in ('http', 'https', 'ftp') and netloc + return valid + + +class UrlField(StringField): + """ + A URL field. The validated value is a URL. + """ + + def _validate(self, *args, **kwargs): + """ + Check that URL is valid. Return a list of errors. + """ + errors = super(UrlField, self)._validate(*args, ** kwargs) + name = self.name + val = self.value + if not self.is_valid_url(val): + msg = (u'Field %(name)s: Invalid URL: %(val)s' % locals()) + errors.append(Error(WARNING, msg)) + return errors + + @staticmethod + def is_valid_url(url): + """ + Return True if a URL is valid. + """ + scheme, netloc, _path, _p, _q, _frg = urlparse(url) + valid = scheme in ('http', 'https', 'ftp') and netloc + return valid + + +class PathField(ListField): + """ + A field pointing to one or more paths relative to the ABOUT file location. + The validated value is an ordered dict of path->location or None. + The paths can also be resolved + """ + + def default_value(self): + return {} + + def _validate(self, *args, **kwargs): + """ + Ensure that paths point to existing resources. Normalize to posix + paths. Return a list of errors. + + base_dir is the directory location of the ABOUT file used to resolve + relative paths to actual file locations. + """ + errors = super(PathField, self)._validate(*args, ** kwargs) + self.about_file_path = kwargs.get('about_file_path') + self.running_inventory = kwargs.get('running_inventory') + self.base_dir = kwargs.get('base_dir') + self.reference_dir = kwargs.get('reference_dir') + + if self.base_dir: + self.base_dir = util.to_posix(self.base_dir) + + name = self.name + + # Why are paths a dict? + # Ans: The reason why the PathField use a dict is because + # for the FileTextField, the key is used as the path to the file and + # the value is used as the context of the file + # dict of normalized paths to a location or None + paths = {} + + for path_value in self.value: + p = path_value.split(',') + for path in p: + path = path.strip() + path = util.to_posix(path) + + # normalize eventual / to . + # and a succession of one or more ////// to . too + if path.strip() and not path.strip(posixpath.sep): + path = '.' + + # removing leading and trailing path separator + # path are always relative + path = path.strip(posixpath.sep) + + # the license files, if need to be copied, are located under the path + # set from the 'license-text-location' option, so the tool should check + # at the 'license-text-location' instead of the 'base_dir' + if not (self.base_dir or self.reference_dir): + msg = (u'Field %(name)s: Unable to verify path: %(path)s:' + u' No base directory provided' % locals()) + errors.append(Error(ERROR, msg)) + location = None + paths[path] = location + continue + if self.reference_dir: + location = posixpath.join(self.reference_dir, path) + else: + # The 'about_resource' should be a joined path with + # the 'about_file_path' and the 'base_dir + if not self.running_inventory and self.about_file_path: + # Get the parent directory of the 'about_file_path' + afp_parent = posixpath.dirname(self.about_file_path) + + # Create a relative 'about_resource' path by joining the + # parent of the 'about_file_path' with the value of the + # 'about_resource' + arp = posixpath.join(afp_parent, path) + normalized_arp = posixpath.normpath( + arp).strip(posixpath.sep) + location = posixpath.join( + self.base_dir, normalized_arp) + else: + location = posixpath.normpath( + posixpath.join(self.base_dir, path)) + + location = util.to_native(location) + location = os.path.abspath(os.path.normpath(location)) + location = util.to_posix(location) + location = add_unc(location) + + if not os.path.exists(location): + # We don't want to show the UNC_PREFIX in the error message + location = util.to_posix(location.strip(UNC_PREFIX)) + msg = (u'Field %(name)s: Path %(location)s not found' + % locals()) + # We want to show INFO error for 'about_resource' + if name == u'about_resource': + errors.append(Error(INFO, msg)) + else: + errors.append(Error(CRITICAL, msg)) + location = None + + paths[path] = location + + self.value = paths + return errors + + +class AboutResourceField(PathField): + """ + Special field for about_resource. self.resolved_paths contains a list of + the paths resolved relative to the about file path. + """ + + def __init__(self, *args, ** kwargs): + super(AboutResourceField, self).__init__(*args, ** kwargs) + self.resolved_paths = [] + + def _validate(self, *args, **kwargs): + errors = super(AboutResourceField, self)._validate(*args, ** kwargs) + return errors + + +class IgnoredResourcesField(PathField): + """ + Special field for ignored_resources. self.ignored_paths contains a list of + path patterns (glob patterns) which are not part of the summarization provided + by the ABOUT file. + """ + + def __init__(self, *args, ** kwargs): + super(AboutResourceField, self).__init__(*args, ** kwargs) + self.resolved_paths = [] + + def _validate(self, *args, **kwargs): + errors = super(AboutResourceField, self)._validate(*args, ** kwargs) + return errors + + +class FileTextField(PathField): + """ + A path field pointing to one or more text files such as license files. + The validated value is an ordered dict of path->Text or None if no + location or text could not be loaded. + """ + + def _validate(self, *args, **kwargs): + """ + Load and validate the texts referenced by paths fields. Return a list + of errors. base_dir is the directory used to resolve a file location + from a path. + """ + errors = super(FileTextField, self)._validate(*args, ** kwargs) + # a FileTextField is a PathField + # self.value is a paths to location ordered dict + # we will replace the location with the text content + name = self.name + for path, location in self.value.items(): + if not location: + # do not try to load if no location + # errors about non existing locations are PathField errors + # already collected. + continue + try: + # TODO: we have lots the location by replacing it with a text + location = add_unc(location) + with open(location, encoding='utf-8', errors='replace') as txt: + text = txt.read() + self.value[path] = text + except Exception as e: + # only keep the first 100 char of the exception + emsg = repr(e)[:100] + msg = (u'Field %(name)s: Failed to load text at path: ' + u'%(path)s ' + u'with error: %(emsg)s' % locals()) + errors.append(Error(ERROR, msg)) + # set or reset self + self.errors = errors + return errors + + +class BooleanField(SingleLineField): + """ + An flag field with a boolean value. Validated value is False, True or None. + """ + + def default_value(self): + return None + + true_flags = ('yes', 'y', 'true', 'x') + false_flags = ('no', 'n', 'false') + flag_values = true_flags + false_flags + + def _validate(self, *args, **kwargs): + """ + Check that flag are valid. Convert flags to booleans. Default flag to + False. Return a list of errors. + """ + errors = super(BooleanField, self)._validate(*args, ** kwargs) + self.about_file_path = kwargs.get('about_file_path') + flag = self.get_flag(self.original_value) + if flag is False: + name = self.name + val = self.original_value + about_file_path = self.about_file_path + flag_values = self.flag_values + msg = (u'Path: %(about_file_path)s - Field %(name)s: Invalid flag value: %(val)r is not ' + u'one of: %(flag_values)s' % locals()) + errors.append(Error(ERROR, msg)) + self.value = None + elif flag is None: + name = self.name + msg = (u'Field %(name)s: field is present but empty. ' % locals()) + errors.append(Error(INFO, msg)) + self.value = None + else: + if flag == u'yes' or flag is True: + self.value = True + else: + self.value = False + return errors + + def get_flag(self, value): + """ + Return a normalized existing flag value if found in the list of + possible values or None if empty or False if not found or original value + if it is not a boolean value + """ + if value is None or value == '': + return None + + if isinstance(value, bool): + return value + else: + if isinstance(value, str): + value = value.strip() + if not value: + return None + + value = value.lower() + if value in self.flag_values: + if value in self.true_flags: + return u'yes' + else: + return u'no' + else: + return False + else: + return False + + @property + def has_content(self): + """ + Return true if it has content regardless of what value, False otherwise + """ + if self.original_value: + return True + return False + + def _serialized_value(self): + # default normalized values for serialization + if self.value: + return u'yes' + elif self.value is False: + return u'no' + else: + # self.value is None + # TODO: should we serialize to No for None??? + return u'' + + def __eq__(self, other): + """ + Boolean equality + """ + return (isinstance(other, self.__class__) + and self.name == other.name + and self.value == other.value) + + +class BooleanAndTwoCharactersField(SingleLineField): + """ + Field with either a boolean value or character(s) value (at most 2 + characters). Validated value is False, True, None or character value. + """ + + def default_value(self): + return None + + true_flags = ('yes', 'y', 'true', 'x') + false_flags = ('no', 'n', 'false') + flag_values = true_flags + false_flags + + def _validate(self, *args, **kwargs): + """ + Check that flag are valid with either boolean value or character value. + Default flag to False. Return a list of errors. + """ + errors = super(BooleanAndTwoCharactersField, + self)._validate(*args, ** kwargs) + self.about_file_path = kwargs.get('about_file_path') + flag = self.get_value(self.original_value) + if flag is False: + name = self.name + val = self.original_value + about_file_path = self.about_file_path + flag_values = self.flag_values + msg = (u'Path: %(about_file_path)s - Field %(name)s: Invalid value: %(val)r is not ' + u'one of: %(flag_values)s and it is not a 1 or 2 character value.' % locals()) + errors.append(Error(ERROR, msg)) + self.value = None + elif flag is None: + name = self.name + msg = (u'Field %(name)s: field is present but empty. ' % locals()) + errors.append(Error(INFO, msg)) + self.value = None + else: + if flag == u'yes' or flag is True: + self.value = True + elif flag == u'no': + self.value = False + else: + self.value = flag + return errors + + def get_value(self, value): + """ + Return a normalized existing value if found in the list of + possible values or None if empty or False if not found or original value + if it is not a boolean value + """ + if value is None or value == '': + return None + + if isinstance(value, bool): + return value + else: + if isinstance(value, str): + value = value.strip() + if not value: + return None + + value = value.lower() + if value in self.flag_values or len(value) <= 2: + if value in self.true_flags: + return u'yes' + elif value in self.false_flags: + return u'no' + else: + return value + else: + return False + else: + return False + + @property + def has_content(self): + """ + Return true if it has content regardless of what value, False otherwise + """ + if self.original_value: + return True + return False + + def _serialized_value(self): + # default normalized values for serialization + if self.value: + if isinstance(self.value, bool): + return u'yes' + else: + return self.value + elif self.value is False: + return u'no' + else: + # self.value is None + # TODO: should we serialize to No for None??? + return u'' + + +def validate_fields(fields, about_file_path, running_inventory, base_dir, + reference_dir=None): + """ + Validate a sequence of Field objects. Return a list of errors. + Validation may update the Field objects as needed as a side effect. + """ + errors = [] + for f in fields: + val_err = f.validate( + base_dir=base_dir, + about_file_path=about_file_path, + running_inventory=running_inventory, + reference_dir=reference_dir, + ) + errors.extend(val_err) + return errors + + +def validate_field_name(name): + if not is_valid_name(name): + msg = ('Field name: %(name)r contains illegal name characters ' + '(or empty spaces) and is ignored.') + return Error(WARNING, msg % locals()) + + +class License: + """ + Represent a License object + """ + + def __init__(self, key, name, filename, url, text): + self.key = key + self.name = name + self.filename = filename + self.url = url + self.text = text + + +class About(object): + """ + Represent an ABOUT file and functions to parse and validate a file. + """ + # special names, used only when serializing lists of ABOUT files to CSV or + # similar + + # name of the attribute containing the relative ABOUT file path + ABOUT_FILE_PATH_ATTR = 'about_file_path' + + # name of the attribute containing the resolved relative Resources paths + about_resource_path_attr = 'about_resource_path' + + # name of the attribute containing the resolved relative Resources paths + ABOUT_RESOURCE_ATTR = 'about_resource' + + # Required fields + required_fields = ['name'] + + def get_required_fields(self): + return [f for f in self.fields if f.required] + + def set_standard_fields(self): + """ + Create fields in an ordered dict to keep a standard ordering. We + could use a metaclass to track ordering django-like but this approach + is simpler. + """ + self.fields = dict([ + ('about_resource', AboutResourceField()), + ('ignored_resources', AboutResourceField()), + ('name', SingleLineField(required=True)), + ('version', SingleLineField()), + + ('download_url', UrlField()), + ('description', StringField()), + ('homepage_url', UrlField()), + ('package_url', PackageUrlField()), + ('notes', StringField()), + + ('license_expression', SingleLineField()), + ('license_key', ListField()), + ('license_name', ListField()), + ('license_file', FileTextField()), + ('license_url', UrlListField()), + ('spdx_license_expression', SingleLineField()), + ('spdx_license_key', ListField()), + ('declared_license_expression', SingleLineField()), + ('other_license_expression', SingleLineField()), + ('copyright', StringField()), + ('notice_file', FileTextField()), + ('notice_url', UrlField()), + + ('redistribute', BooleanField()), + ('attribute', BooleanAndTwoCharactersField()), + ('track_changes', BooleanField()), + ('modified', BooleanField()), + ('internal_use_only', BooleanField()), + + ('changelog_file', FileTextField()), + + ('owner', StringField()), + ('owner_url', UrlField()), + ('contact', StringField()), + ('author', StringField()), + ('author_file', FileTextField()), + + ('vcs_tool', SingleLineField()), + ('vcs_repository', SingleLineField()), + ('vcs_path', SingleLineField()), + ('vcs_tag', SingleLineField()), + ('vcs_branch', SingleLineField()), + ('vcs_revision', SingleLineField()), + + ('checksum_md5', SingleLineField()), + ('checksum_sha1', SingleLineField()), + ('checksum_sha256', SingleLineField()), + ('spec_version', SingleLineField()), + ]) + + for name, field in self.fields.items(): + # we could have a hack to get the actual field name + # but setting an attribute is explicit and cleaner + field.name = name + setattr(self, name, field) + + def __init__(self, location=None, about_file_path=None, strict=False): + """ + Create an instance. + If strict is True, raise an Exception on errors. Otherwise the errors + attribute contains the errors. + """ + self.set_standard_fields() + self.custom_fields = {} + + self.errors = [] + + # about file path relative to the root of an inventory using posix + # path separators + self.about_file_path = about_file_path + + # os native absolute location, using posix path separators + self.location = location + self.base_dir = None + if self.location: + self.base_dir = os.path.dirname(location) + self.errors.extend(self.load(location)) + if strict and self.errors and filter_errors(self.errors): + msg = '\n'.join(map(str, self.errors)) + raise Exception(msg) + + def __repr__(self): + return repr(self.all_fields()) + + def __eq__(self, other): + """ + Equality based on fields and custom_fields., i.e. content. + """ + return (isinstance(other, self.__class__) + and self.fields == other.fields + and self.custom_fields == other.custom_fields) + + def all_fields(self): + """ + Return the list of all Field objects. + """ + return list(self.fields.values()) + list(self.custom_fields.values()) + + def as_dict(self): + """ + Return all the standard fields and customer-defined fields of this + About object in an ordered dict. + """ + data = {} + data[self.ABOUT_FILE_PATH_ATTR] = self.about_file_path + with_values = ((fld.name, fld.serialized_value()) + for fld in self.all_fields()) + non_empty = ((name, value) for name, value in with_values if value) + data.update(non_empty) + return data + + def hydrate(self, fields): + """ + Process an iterable of field (name, value) tuples. Update or create + Fields attributes and the fields and custom fields dictionaries. + Return a list of errors. + """ + errors = [] + seen_fields = {} + illegal_name_list = [] + + for name, value in fields: + orig_name = name + name = name.lower() + + # Some special attributes + if name == self.ABOUT_FILE_PATH_ATTR: + # this is a special attribute set directly on object + setattr(self, name, value) + continue + + if name == self.about_resource_path_attr: + # this is a special attribute, skip entirely + continue + + # A field that has been already processed ... and has a value + previous_value = seen_fields.get(name) + if previous_value: + if value != previous_value: + msg = (u'Field %(orig_name)s is a duplicate. ' + u'Original value: "%(previous_value)s" ' + u'replaced with: "%(value)s"') + errors.append(Error(WARNING, msg % locals())) + continue + + seen_fields[name] = value + + # A standard field (could be essential/required or not) + standard_field = self.fields.get(name) + if standard_field: + standard_field.original_value = value + standard_field.value = value + standard_field.present = True + continue + + # A custom field + # is the name valid? + if not is_valid_name(name): + if not name in illegal_name_list: + illegal_name_list.append(name) + continue + + msg = 'Custom Field: %(orig_name)s' + errors.append(Error(INFO, msg % locals())) + # is this a known one? + custom_field = self.custom_fields.get(name) + if custom_field: + # An known custom field + custom_field.original_value = value + custom_field.value = value + custom_field.present = True + else: + # A new, unknown custom field + custom_field = Field(name=name, value=value, present=True) + self.custom_fields[name] = custom_field + # FIXME: why would this ever fail??? + try: + if name in dir(self): + raise Exception( + 'Illegal field: %(name)r: %(value)r.' % locals()) + setattr(self, name, custom_field) + except: + msg = 'Internal error with custom field: %(name)r: %(value)r.' + errors.append(Error(CRITICAL, msg % locals())) + + if illegal_name_list: + msg = ('Field name: %(illegal_name_list)r contains illegal name characters ' + '(or empty spaces) and is ignored.') + errors.append(Error(WARNING, msg % locals())) + return errors + + def process(self, fields, about_file_path, running_inventory=False, + base_dir=None, scancode=False, from_attrib=False, reference_dir=None): + """ + Validate and set as attributes on this About object a sequence of + `fields` name/value tuples. Return a list of errors. + """ + self.base_dir = base_dir + self.reference_dir = reference_dir + afp = self.about_file_path + + errors = self.hydrate(fields) + + # We want to copy the license_files before the validation + if reference_dir and not from_attrib: + copy_err = copy_license_notice_files( + fields, base_dir, reference_dir, afp) + errors.extend(copy_err) + + # TODO: why? we validate all fields, not only these hydrated + # The validate functions does not allow duplicated entry for a list meaning + # it will cause problem when using scancode license detection as an input as + # it usually returns duplicated license_key and many license have duplicated + # score such as 100. We need to handle this scenario using different method. + if not scancode: + validation_errors = validate_fields( + self.all_fields(), + about_file_path, + running_inventory, + self.base_dir, + self.reference_dir) + errors.extend(validation_errors) + return errors + + def load(self, location): + """ + Read, parse and process the ABOUT file at `location`. + Return a list of errors and update self with errors. + """ + self.location = location + loc = util.to_posix(location) + base_dir = posixpath.dirname(loc) + errors = [] + try: + loc = add_unc(loc) + with open(loc, encoding='utf-8', errors='replace') as txt: + input_text = txt.read() + if not input_text: + msg = 'ABOUT file is empty: %(location)r' + errors.append(Error(CRITICAL, msg % locals())) + self.errors = errors + return errors + # The 'Yes' and 'No' will be converted to 'True' and 'False' in the yaml.load() + # Therefore, we need to wrap the original value in quote to prevent + # the conversion + pre_input = wrap_boolean_value(input_text) + # saneyaml.load() will have parsing error if the input has + # tab value. Therefore, we should check if the input contains + # any tab and then convert it to spaces. + input = replace_tab_with_spaces(pre_input) + # FIXME: this should be done in the commands, not here + """ + The running_inventory defines if the current process is 'inventory' or not. + This is used for the validation of the path of the 'about_resource'. + In the 'inventory' command, the code will use the parent of the about_file_path + location and join with the 'about_resource' for the validation. + On the other hand, in the 'gen' command, the code will use the + generated location (aka base_dir) along with the parent of the about_file_path + and then join with the 'about_resource' + """ + running_inventory = True + data = saneyaml.load(input, allow_duplicate_keys=False) + errs = self.load_dict( + data, base_dir, running_inventory=running_inventory) + errors.extend(errs) + except Exception as e: + # The trace is good for debugging, but probably not good for user to + # see the traceback message + # trace = traceback.format_exc() + # msg = 'Cannot load invalid ABOUT file: %(location)r: %(e)r\n%(trace)s' + msg = 'Cannot load invalid ABOUT file: %(location)r: %(e)r' + errors.append(Error(CRITICAL, msg % locals())) + + self.errors = errors + return errors + + # FIXME: should be a from_dict class factory instead + # FIXME: running_inventory: remove this : this should be done in the commands, not here + + def load_dict(self, fields_dict, base_dir, scancode=False, from_attrib=False, running_inventory=False, reference_dir=None,): + """ + Load this About object file from a `fields_dict` name/value dict. + Return a list of errors. + """ + # do not keep empty + fields = list(fields_dict.items()) + + if scancode: + have_copyright = False + for key, value in fields: + if not value: + continue + if key == u'copyrights': + have_copyright = True + elif key == u'license_detections': + lic_list = ungroup_licenses_from_sctk(value) + lic_exp_list = [] + for detected_license in value: + if 'license_expression' in detected_license: + lic_exp_list.append( + detected_license['license_expression']) + if lic_exp_list: + fields.append( + ('license_expression', ' AND '.join(lic_exp_list))) + + lic_key_list = [] + lic_key_exp_list = [] + lic_score_list = [] + for lic in lic_list: + _char, lic_keys, _invalid_lic_exp = parse_license_expression( + lic['lic_exp']) + lic_key_list.append(lic_keys) + # for lic_key in lic_keys: + # lic_key_list.append([lic_key]) + + for lic in lic_list: + lic_key_exp_list.append(lic['lic_exp']) + lic_score_list.append(lic['score']) + fields.append(('license_key', lic_key_list)) + fields.append(('license_key_expression', lic_key_exp_list)) + fields.append(('license_score', lic_score_list)) + + # The licenses field has been ungrouped and can be removed. + # Otherwise, it will gives the following INFO level error + # 'Field licenses is a custom field.' + licenses_field = (key, value) + fields.remove(licenses_field) + # Make sure the copyrights is present even is empty to avoid error + # when generating with Jinja + if not have_copyright: + fields.append(('copyrights', '')) + + else: + for key, value in fields: + if not value: + # never return empty or absent fields + continue + if key == u'licenses': + # FIXME: use a license object instead + lic_key, lic_name, lic_file, lic_url, spdx_lic_key, lic_score, lic_matched_text = ungroup_licenses( + value) + if lic_key: + fields.append(('license_key', lic_key)) + if lic_name: + fields.append(('license_name', lic_name)) + if lic_file: + fields.append(('license_file', lic_file)) + if lic_url: + fields.append(('license_url', lic_url)) + if spdx_lic_key: + fields.append(('spdx_license_key', spdx_lic_key)) + # The license score is a key from scancode license scan + if lic_score: + fields.append(('license_score', lic_score)) + if lic_matched_text: + fields.append(('matched_text', lic_matched_text)) + # The licenses field has been ungrouped and can be removed. + # Otherwise, it will gives the following INFO level error + # 'Field licenses is a custom field.' + licenses_field = (key, value) + fields.remove(licenses_field) + + errors = self.process( + fields=fields, + about_file_path=self.about_file_path, + running_inventory=running_inventory, + base_dir=base_dir, + scancode=scancode, + from_attrib=from_attrib, + reference_dir=reference_dir, + ) + self.errors = errors + return errors + + @classmethod + def from_dict(cls, about_data, base_dir=''): + """ + Return an About object loaded from a python dict. + """ + about = cls() + about.load_dict(about_data, base_dir=base_dir) + return about + + def dumps(self, licenses_dict=None): + """ + Return self as a formatted ABOUT string. + """ + data = {} + # Group the same license information (name, url, file) together + license_key = [] + license_name = [] + license_file = [] + license_url = [] + spdx_license_key = [] + bool_fields = ['redistribute', 'attribute', + 'track_changes', 'modified', 'internal_use_only'] + for field in self.all_fields(): + if not field.value and not field.name in bool_fields: + continue + if field.name == 'license_key' and field.value: + license_key = field.value + elif field.name == 'license_name' and field.value: + license_name = field.value + elif field.name == 'license_file' and field.value: + # Restore the original_value as it was parsed for + # validation purpose + if field.original_value: + # This line break is for the components that have multiple license + # values in CSV format. + if '\n' in field.original_value: + license_file_list = field.original_value.split('\n') + license_file = [] + # Strip the carriage return character '\r' See #443 + for lic in license_file_list: + if '\r' in lic: + license_file.append(lic.strip('\r')) + else: + license_file.append(lic) + else: + if isinstance(field.original_value, list): + license_file = list(field.value.keys()) + else: + # Restore the original license_file value + # See #444 + license_file = [field.original_value] + else: + license_file = list(field.value.keys()) + elif field.name == 'license_url' and field.value: + license_url = field.value + elif field.name == 'spdx_license_key' and field.value: + spdx_license_key = field.value + elif field.name in file_fields and field.value: + data[field.name] = field.original_value + elif field.name in bool_fields and not field.value == None: + data[field.name] = field.value + else: + if field.value: + data[field.name] = field.value + # If there is no license_key value, parse the license_expression + # and get the parsed license key + if 'license_expression' in data: + if not license_key and data['license_expression']: + _spec_char, lic_list, _invalid_lic_exp = parse_license_expression( + data['license_expression']) + license_key = lic_list + + # Group the same license information in a list + # This `licenses_dict` is a dictionary with license key as the key and the + # value is the list of [license_name, license_filename, license_context, license_url] + lic_key_copy = license_key[:] + lic_dict_list = [] + for lic_key in license_key: + lic_dict = {} + if licenses_dict and lic_key in licenses_dict: + lic_dict['key'] = lic_key + lic_name, lic_filename, lic_context, lic_url, spdx_lic_key = licenses_dict[ + lic_key] + if lic_name: + lic_dict['name'] = lic_name + if lic_filename: + lic_dict['file'] = lic_filename + if lic_url: + lic_dict['url'] = lic_url + if spdx_lic_key: + lic_dict['spdx_license_key'] = spdx_lic_key + + # Remove the license information if it has been handled + # The following condition is to check if license information + # has been fetched, the license key is invalid or custom if + # no value for lic_name + if lic_name: + lic_key_copy.remove(lic_key) + if lic_name in license_name: + license_name.remove(lic_name) + if lic_url in license_url: + license_url.remove(lic_url) + if lic_filename in license_file: + license_file.remove(lic_filename) + if spdx_lic_key in spdx_license_key: + spdx_license_key.remove(spdx_lic_key) + lic_dict_list.append(lic_dict) + + # Handle license information that have not been handled. + # If the len of the lic_key is the same as the lic_file, the tool should + # assume the lic_file (custom license) is referring this specific lic_key + # otherwise, the tool shouldn't group them + if len(lic_key_copy) == len(license_file): + license_group = list(zip_longest( + lic_key_copy, license_name, license_file, license_url, spdx_license_key)) + else: + license_group = list(zip_longest( + lic_key_copy, license_name, [], license_url, spdx_license_key)) + # Add the unhandled_lic_file if any + if license_file: + for lic_file in license_file: + license_group.append((None, None, lic_file, None, None)) + + for lic_group in license_group: + lic_dict = {} + if lic_group[0]: + lic_dict['key'] = lic_group[0] + if lic_group[1]: + lic_dict['name'] = lic_group[1] + else: + # If no name is given, treat the key as the name + if lic_group[0]: + lic_dict['name'] = lic_group[0] + if lic_group[2]: + lic_dict['file'] = lic_group[2] + if lic_group[3]: + lic_dict['url'] = lic_group[3] + if lic_group[4]: + lic_dict['spdx_license_key'] = lic_group[4] + lic_dict_list.append(lic_dict) + + # Format the license information in the same order of the license expression + for key in license_key: + for lic_dict in lic_dict_list: + if key == lic_dict['key']: + data.setdefault('licenses', []).append(lic_dict) + lic_dict_list.remove(lic_dict) + break + + for lic_dict in lic_dict_list: + data.setdefault('licenses', []).append(lic_dict) + + return saneyaml.dump(data) + + def dump(self, location, lic_dict=None): + """ + Write formatted ABOUT representation of self to location. + """ + loc = util.to_posix(location) + parent = posixpath.dirname(loc) + + if not posixpath.exists(parent): + os.makedirs(add_unc(parent)) + + about_file_path = loc + if not about_file_path.endswith('.ABOUT'): + # FIXME: we should not infer some location. + if about_file_path.endswith('/'): + about_file_path = util.to_posix( + os.path.join(parent, os.path.basename(parent))) + about_file_path += '.ABOUT' + + if on_windows: + about_file_path = add_unc(about_file_path) + + with open(about_file_path, mode='w', encoding='utf-8', errors='replace') as dumped: + dumped.write(genereated_tk_version) + dumped.write(self.dumps(lic_dict)) + + def dump_android_notice(self, path, context): + """ + Write the NOITCE file consist of copyright, notice and license + """ + if on_windows: + path = add_unc(path) + + with open(path, mode='w', encoding='utf-8', errors='replace') as dumped: + dumped.write(context) + + def android_module_license(self, about_parent_path): + """ + Create MODULE_LICENSE_XXX which the XXX is the value of license key. + """ + for lic_key in self.license_key.value: + # Make uppercase and with dash and spaces and dots replaced by underscore + # just to look similar and consistent. + name = 'MODULE_LICENSE_' + \ + lic_key.replace('.', '_').replace( + '-', '_').replace(' ', '_').upper() + module_lic_path = os.path.join(about_parent_path, name) + # Create an empty MODULE_LICESE_XXX file + open(module_lic_path, 'a').close() + + def android_notice(self, about_parent_path): + """ + Return a notice dictionary which the path of the notice file going + to create will be the key and its context will be the value of the dict. + """ + # Create NOTICE file with the combination context of copyright, + # notice_file and license_file + notice_path = posixpath.join(about_parent_path, 'NOTICE') + notice_context = '' + if self.copyright.value: + notice_context += self.copyright.value + if self.notice_file.value: + notice_file_dict = self.notice_file.value + notice_file_key = notice_file_dict.keys() + for key in notice_file_key: + if notice_file_dict[key]: + notice_context += '\n' + notice_file_dict[key] + '\n' + if self.license_file.value: + lic_file_dict = self.license_file.value + lic_file_key = lic_file_dict.keys() + for key in lic_file_key: + if lic_file_dict[key]: + notice_context += '\n\n' + lic_file_dict[key] + '\n\n' + return notice_path, notice_context + + def dump_lic(self, location, license_dict): + """ + Write LICENSE files and return the a list of key, name, context and the url + as these information are needed for the ABOUT file + """ + license_name = license_context = license_url = '' + loc = util.to_posix(location) + parent = posixpath.dirname(loc) + license_key_name_context_url = [] + + if not posixpath.exists(parent): + os.makedirs(add_unc(parent)) + + licenses_list = [] + if self.license_expression.present: + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + self.license_expression.value) + if lic_list: + for lic in lic_list: + if lic not in licenses_list: + licenses_list.append(lic) + if self.declared_license_expression.present: + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + self.declared_license_expression.value) + if lic_list: + for lic in lic_list: + if lic not in licenses_list: + licenses_list.append(lic) + if self.other_license_expression.present: + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + self.other_license_expression.value) + if lic_list: + for lic in lic_list: + if lic not in licenses_list: + licenses_list.append(lic) + if licenses_list: + self.license_key.value = licenses_list + self.license_key.present = True + for lic_key in licenses_list: + license_name = '' + license_filename = '' + license_context = '' + license_url = '' + spdx_license_key = '' + if lic_key in license_dict: + license_path = posixpath.join(parent, lic_key) + license_path += u'.LICENSE' + license_path = add_unc(license_path) + license_name, license_filename, license_context, license_url, spdx_license_key = license_dict[ + lic_key] + license_info = (lic_key, license_name, license_filename, + license_context, license_url, spdx_license_key) + license_key_name_context_url.append(license_info) + with open(license_path, mode='w', encoding='utf-8', newline='\n', errors='replace') as lic: + lic.write(license_context) + else: + # Invalid license issue is already handled + license_info = (lic_key, license_name, license_filename, + license_context, license_url, spdx_license_key) + license_key_name_context_url.append(license_info) + + return license_key_name_context_url + + +def collect_inventory(location, exclude=None): + """ + Collect ABOUT files at location and return a list of errors and a list of + About objects. + """ + errors = [] + input_location = util.get_absolute(location) + about_locations = list(util.get_about_locations(input_location, exclude)) + + name_errors = util.check_file_names(about_locations) + errors.extend(name_errors) + abouts = [] + custom_fields_list = [] + for about_loc in about_locations: + about_file_path = util.get_relative_path(input_location, about_loc) + about = About(about_loc, about_file_path) + for severity, message in about.errors: + if 'Custom Field' in message: + field_name = message.replace('Custom Field: ', '').strip() + if field_name not in custom_fields_list: + custom_fields_list.append(field_name) + else: + msg = (about_file_path + ": " + message) + errors.append(Error(severity, msg)) + abouts.append(about) + if custom_fields_list: + custom_fields_err_msg = 'Field ' + \ + str(custom_fields_list) + ' is a custom field.' + errors.append(Error(INFO, custom_fields_err_msg)) + return errors, abouts + + +def collect_abouts_license_expression(location): + """ + Read the ABOUT files at location and return a list of ABOUT objects without + validation. The purpose of this is to speed up the process for `gen_license` command. + """ + lic_key_list = [] + errors = [] + input_location = util.get_absolute(location) + about_locations = list(util.get_about_locations(input_location)) + abouts = [] + + for loc in about_locations: + try: + loc = add_unc(loc) + with open(loc, encoding='utf-8', errors='replace') as txt: + input_text = txt.read() + # saneyaml.load() will have parsing error if the input has + # tab value. Therefore, we should check if the input contains + # any tab and then convert it to spaces. + input = replace_tab_with_spaces(input_text) + data = saneyaml.load(input, allow_duplicate_keys=False) + about = About() + about.load_dict(data, base_dir='') + abouts.append(about) + except Exception as e: + trace = traceback.format_exc() + msg = 'Cannot load invalid ABOUT file: %(location)r: %(e)r\n%(trace)s' + errors.append(Error(CRITICAL, msg % locals())) + + return errors, abouts + + +def collect_inventory_license_expression(location, scancode=False, worksheet=None): + """ + Read the inventory file at location and return a list of ABOUT objects without + validation. The purpose of this is to speed up the process for `gen_license` command. + """ + abouts = [] + errors = [] + + if scancode: + inventory = gen.load_scancode_json(location) + # ScanCode uses 'detected_license_expression' + if not 'detected_license_expression' in inventory[0]: + errors.append( + Error(CRITICAL, "No 'license_expressions' field in the input.")) + return errors, abouts + else: + if location.endswith('.csv'): + inventory = gen.load_csv(location) + elif location.endswith('.xlsx'): + _dup_cols_err, inventory = gen.load_excel(location, worksheet) + else: + inventory = gen.load_json(location) + # Check if 'license_expression' field is in the input + if not inventory or not 'license_expression' in inventory[0]: + errors.append( + Error(CRITICAL, "No 'license_expression' field in the input.")) + return errors, abouts + + for data in inventory: + about = About() + about.load_dict(data, base_dir='', scancode=scancode) + abouts.append(about) + return errors, abouts + + +def get_field_names(abouts): + """ + Given a list of About objects, return a list of any field names that exist + in any object, including custom fields. + """ + fields = [] + # fields.append(About.ABOUT_FILE_PATH_ATTR) + + standard_fields = About().fields.keys() + standards = [] + for a in abouts: + for name, field in a.fields.items(): + if field.required: + if name not in standards: + standards.append(name) + else: + if field.present: + if name not in standards: + standards.append(name) + # resort standard fields in standard order + # which is a tad complex as this is a predefined order + sorted_std = [] + for fn in standard_fields: + if fn in standards: + sorted_std.append(fn) + fields.extend(sorted_std) + + customs = [] + for a in abouts: + for name, field in a.custom_fields.items(): + if field.has_content: + if name not in customs: + customs.append(name) + # always sort custom fields list by name + customs.sort() + fields.extend(customs) + + return fields + + +def copy_redist_src(copy_list, location, output, with_structure): + """ + Given a list of files/directories and copy to the destination + """ + errors = [] + for from_path in copy_list: + norm_from_path = norm(from_path) + relative_from_path = norm_from_path.partition(util.norm(location))[2] + # Need to strip the '/' to use the join + if relative_from_path.startswith('/'): + relative_from_path = relative_from_path.partition('/')[2] + # Get the directory name of the output path + if with_structure: + output_dir = os.path.dirname(os.path.join( + output, util.norm(relative_from_path))) + else: + output_dir = output + err = copy_file(from_path, output_dir) + if err: + errors.extend(err) + return errors + + +def get_copy_list(abouts, location): + """ + Return a list of files/directories that need to be copied (and error if any) + This is a summary list in a sense that if a directory is already in the list, + its children directories/files will not be included in the list regardless if + they have 'redistribute' flagged. The reason for this is we want to capture + the error/warning if existence files/directories already exist. However, if + we don't have this "summarized" list, and we've copied a file (with directory structure) + and then later on this file's parent directory also need to be copied, then + it will prompt warning as the directory that need to be copied is already exist. + Technically, this is correct, but it leads to confusion. Therefore, we want to + create a summarized list to avoid this kind of confusion. + """ + errors = [] + copy_list = [] + dir_list = [] + file_list = [] + for about in abouts: + if about.redistribute.value: + file_exist = True + for e in about.errors: + if 'Field about_resource' in e.message and 'not found' in e.message: + msg = e.message + u' and cannot be copied.' + errors.append(Error(CRITICAL, msg)) + file_exist = False + continue + if file_exist: + for k in about.about_resource.value: + from_path = about.about_resource.value.get(k) + if on_windows: + norm_from_path = norm(from_path) + else: + norm_from_path = os.path.normpath(from_path) + # Get the relative path + relative_from_path = norm_from_path.partition( + util.norm(location))[2] + if os.path.isdir(from_path): + if not dir_list: + dir_list.append(relative_from_path) + else: + handled = False + for dir in dir_list: + # The dir is a parent of the relative_from_path + if dir in relative_from_path: + handled = True + continue + # The relative_from_path is the parent of the dir + # We need to update the dir_list + if relative_from_path in dir: + dir_list.remove(dir) + dir_list.append(relative_from_path) + handled = True + continue + if not handled: + dir_list.append(relative_from_path) + else: + # Check if the file is from "root" + # If the file is at root level, it'll add to the copy_list + if not os.path.dirname(relative_from_path) == '/': + file_list.append(relative_from_path) + else: + copy_list.append(from_path) + + for dir in dir_list: + for f in file_list: + # The file is already in one of copied directories + if dir in f: + file_list.remove(f) + continue + if dir.startswith('/'): + dir = dir.partition('/')[2] + absolute_path = os.path.join(location, dir) + if on_windows: + absolute_path = add_unc(absolute_path) + copy_list.append(absolute_path) + + for f in file_list: + if f.startswith('/'): + f = f.partition('/')[2] + absolute_path = os.path.join(location, f) + if on_windows: + absolute_path = add_unc(absolute_path) + copy_list.append(absolute_path) + + return copy_list, errors + + +def about_object_to_list_of_dictionary(abouts): + """ + Convert About objects to a list of dictionaries + """ + serialized = [] + for about in abouts: + # Restore the *_file value to the original value + # The *_file's original_value may be parsed (i.e. split(',)) + # for validation purpose. + about.license_file.value = about.license_file.original_value + about.notice_file.value = about.notice_file.original_value + about.changelog_file.value = about.changelog_file.original_value + about.author_file.value = about.author_file.original_value + + # TODO: this wholeblock should be under sd_dict() + ad = about.as_dict() + + if 'about_file_path' in ad.keys(): + afp = ad['about_file_path'] + afp_parent = posixpath.dirname(afp) + afp_parent = '/' + \ + afp_parent if not afp_parent.startswith( + '/') else afp_parent + + # Update the 'about_resource' field with the relative path + # from the output location + if 'about_resource' in ad.keys(): + about_resource = ad['about_resource'] + for resource in about_resource: + updated_about_resource = posixpath.normpath( + posixpath.join(afp_parent, resource)) + if resource == u'.': + if not updated_about_resource == '/': + updated_about_resource = updated_about_resource + '/' + ad['about_resource'] = dict( + [(updated_about_resource, None)]) + del ad['about_file_path'] + serialized.append(ad) + return serialized + + +def write_output(abouts, location, format): # NOQA + """ + Write a CSV/JSON file at location given a list of About objects. + Return a list of Error objects. + """ + about_dicts = about_object_to_list_of_dictionary(abouts) + if not location == '-': + location = add_unc(location) + if format == 'csv': + save_as_csv(location, about_dicts, get_field_names(abouts)) + elif format == 'json': + save_as_json(location, about_dicts) + else: + save_as_excel(location, about_dicts) + + +def save_as_json(location, about_dicts): + """ + Save the given data as a JSON file or print it to standard output. + """ + data = util.format_about_dict_for_json_output(about_dicts) + if location == '-': + json.dump(data, sys.stdout, indent=2) + else: + with open(location, mode='w') as output_file: + output_file.write(json.dumps(data, indent=2)) + + +def save_as_csv(location, about_dicts, field_names): + """ + Save the given data as a CSV file or print it to standard output. + """ + if location == '-': + writer = csv.DictWriter(sys.stdout, field_names) + writer.writeheader() + csv_formatted_list = util.format_about_dict_output(about_dicts) + for row in csv_formatted_list: + writer.writerow(row) + else: + with open(location, mode='w', encoding='utf-8', newline='', errors='replace') as output_file: + writer = csv.DictWriter(output_file, field_names) + writer.writeheader() + csv_formatted_list = util.format_about_dict_output(about_dicts) + for row in csv_formatted_list: + writer.writerow(row) + + +def save_as_excel(location, about_dicts): + """ + Save the given data as a Excel file. + """ + formatted_list = util.format_about_dict_output(about_dicts) + write_excel(location, formatted_list) + + +def pre_process_and_fetch_license_dict(abouts, from_check=False, api_url=None, api_key=None, scancode=False, reference=None): + """ + Return a dictionary containing the license information (key, name, text, url) + fetched from the ScanCode LicenseDB or DejaCode API. + """ + key_text_dict = {} + captured_license = [] + errors = [] + if api_url: + dje_uri = urlparse(api_url) + domain = '{uri.scheme}://{uri.netloc}/'.format(uri=dje_uri) + lic_urn = urljoin(domain, 'urn/?urn=urn:dje:license:') + url = api_url + else: + url = 'https://scancode-licensedb.aboutcode.org/' + if util.have_network_connection(): + if not valid_api_url(url): + msg = u"URL not reachable. Invalid 'URL. License generation is skipped." + errors.append(Error(ERROR, msg)) + else: + msg = u'Network problem. Please check your Internet connection. License generation is skipped.' + errors.append(Error(ERROR, msg)) + + if errors: + return key_text_dict, errors + + spdx_sclickey_dict = get_spdx_key_and_lic_key_from_licdb() + for about in abouts: + # No need to go through all the about objects if '--api_key' is invalid + auth_error = Error( + ERROR, u"Authorization denied. Invalid '--api_key'. License generation is skipped.") + if auth_error in errors: + break + + if scancode: + lic_exp = '' + lic_list = [] + if about.detected_license_expression.value: + lic_exp = about.detected_license_expression.value + about.license_expression.value = lic_exp + about.license_expression.present = True + + afp = '' + if about.about_file_path: + afp = about.about_file_path + + if not about.license_expression.value and about.spdx_license_expression.value: + lic_exp_value = "" + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + about.spdx_license_expression.value) + if special_char_in_expression or invalid_lic_exp: + if special_char_in_expression: + if afp: + msg = (afp + u": The following character(s) cannot be in the spdx_license_expression: " + + str(special_char_in_expression)) + else: + msg = (u"The following character(s) cannot be in the spdx_license_expression: " + + str(special_char_in_expression)) + else: + if afp: + msg = (afp + u": This spdx_license_expression is invalid: " + + str(invalid_lic_exp)) + else: + msg = (u"This spdx_license_expression is invalid: " + + str(invalid_lic_exp)) + errors.append(Error(ERROR, msg)) + else: + spdx_lic_exp_segment = about.spdx_license_expression.value.split() + for spdx_lic_key in spdx_lic_exp_segment: + if lic_exp_value: + lic_exp_value = lic_exp_value + " " + convert_spdx_expression_to_lic_expression( + spdx_lic_key, spdx_sclickey_dict) + else: + lic_exp_value = convert_spdx_expression_to_lic_expression( + spdx_lic_key, spdx_sclickey_dict) + if lic_exp_value: + about.license_expression.value = lic_exp_value + about.license_expression.present = True + + lic_exp_list = [] + + if about.declared_license_expression.value: + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + about.declared_license_expression.value) + if special_char_in_expression: + if afp: + msg = (afp + u": The following character(s) cannot be in the declared_license_expression: " + + str(special_char_in_expression)) + else: + msg = (u"The following character(s) cannot be in the declared_license_expression: " + + str(special_char_in_expression)) + errors.append(Error(ERROR, msg)) + if invalid_lic_exp: + if afp: + msg = (afp + u": This declared_license_expression is invalid: " + + str(invalid_lic_exp)) + else: + msg = (u"This declared_license_expression is invalid: " + + str(invalid_lic_exp)) + errors.append(Error(ERROR, msg)) + if lic_list: + lic_exp_list.extend(lic_list) + + if about.other_license_expression.value: + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + about.other_license_expression.value) + if special_char_in_expression: + if afp: + msg = (afp + u": The following character(s) cannot be in the other_license_expression: " + + str(special_char_in_expression)) + else: + msg = (u"This declared_license_expression is invalid: " + + str(invalid_lic_exp)) + errors.append(Error(ERROR, msg)) + if invalid_lic_exp: + if afp: + msg = (afp + u": This other_license_expression is invalid: " + + str(invalid_lic_exp)) + else: + msg = (u"This other_license_expression is invalid: " + + str(invalid_lic_exp)) + errors.append(Error(ERROR, msg)) + if lic_list: + lic_exp_list.extend(lic_list) + + if about.license_expression.value: + special_char_in_expression, lic_list, invalid_lic_exp = parse_license_expression( + about.license_expression.value) + if special_char_in_expression: + if afp: + msg = (afp + u": The following character(s) cannot be in the license_expression: " + + str(special_char_in_expression)) + else: + msg = (u"The following character(s) cannot be in the license_expression: " + + str(special_char_in_expression)) + errors.append(Error(ERROR, msg)) + if invalid_lic_exp: + if afp: + msg = (afp + u": This license_expression is invalid: " + + str(invalid_lic_exp)) + else: + msg = (u"This license_expression is invalid: " + + str(invalid_lic_exp)) + errors.append(Error(ERROR, msg)) + if lic_list: + lic_exp_list.extend(lic_list) + if not about.license_key.value: + about.license_key.value = lic_list + + if lic_exp_list: + for lic_key in lic_exp_list: + if not lic_key in captured_license: + lic_url = '' + license_name = '' + license_filename = '' + license_text = '' + spdx_license_key = '' + detail_list = [] + captured_license.append(lic_key) + if api_key: + license_data, errs = api.get_license_details_from_api( + url, api_key, lic_key) + # Catch incorrect API URL + if errs: + _, msg = errs[0] + if msg == "Invalid '--api_url'. License generation is skipped.": + errors.extend(errs) + return key_text_dict, errors + for severity, message in errs: + msg = (afp + ": " + message) + errors.append(Error(severity, msg)) + # We don't want to actually get the license information from the + # check utility + if from_check: + continue + if not license_data: + continue + license_name = license_data.get('short_name', '') + license_text = license_data.get('full_text', '') + spdx_license_key = license_data.get( + 'spdx_license_key', '') + license_filename = lic_key + '.LICENSE' + lic_url = lic_urn + lic_key + else: + license_url = url + lic_key + '.json' + license_text_url = url + lic_key + '.LICENSE' + try: + response = head(license_url) + if response.status_code < 400: + json_url_content = get( + license_url).text + # We don't want to actually get the license + # information from the check utility + if from_check: + continue + data = json.loads(json_url_content) + license_name = data['short_name'] + license_text = get( + license_text_url).text + license_filename = data['key'] + '.LICENSE' + lic_url = url + license_filename + spdx_license_key = data['spdx_license_key'] + else: + if afp: + msg = afp + u" : Invalid 'license': " + lic_key + else: + msg = u"Invalid 'license': " + lic_key + errors.append(Error(ERROR, msg)) + continue + except exceptions.RequestException as e: + msg = f"An error occurred while trying to access the URL: {e}" + errors.append(Error(ERROR, msg)) + if not from_check: + detail_list.append(license_name) + detail_list.append(license_filename) + detail_list.append(license_text) + detail_list.append(lic_url) + detail_list.append(spdx_license_key) + key_text_dict[lic_key] = detail_list + return key_text_dict, errors + + +def convert_spdx_expression_to_lic_expression(spdx_key, spdx_lic_dict): + """ + Translate the spdx_license_expression to license_expression and return + errors if spdx_license_key is not matched + """ + value = "" + if spdx_key in spdx_lic_dict: + value = spdx_lic_dict[spdx_key] + else: + if spdx_key.startswith('('): + mod_key = spdx_key.partition('(')[2] + value = '(' + \ + convert_spdx_expression_to_lic_expression( + mod_key, spdx_lic_dict) + elif spdx_key.endswith(')'): + mod_key = spdx_key.rpartition(')')[0] + value = convert_spdx_expression_to_lic_expression( + mod_key, spdx_lic_dict) + ')' + else: + # This can be operator or key that don't have match + value = spdx_key + return value + + +def parse_license_expression(lic_expression): + licensing = Licensing() + lic_list = [] + invalid_lic_exp = '' + special_char = detect_special_char(lic_expression) + if not special_char: + # Parse the license expression and save it into a list + try: + lic_list = licensing.license_keys(lic_expression) + except: + invalid_lic_exp = lic_expression + return special_char, lic_list, invalid_lic_exp + + +def detect_special_char(expression): + not_support_char = [ + '!', '@', '#', '$', '^', '&', '*', '=', '{', '}', + '|', '[', ']', '\\', ':', ';', '<', '>', '?', ',', '/'] + special_character = [] + for char in not_support_char: + if char in expression: + special_character.append(char) + return special_character + + +def valid_api_url(api_url): + try: + response = get(api_url) + # The 403 error code is expected if the api_url is pointing to DJE as no + # API key is provided. The 200 status code represent connection success + # to scancode's LicenseDB. All other exception yield to invalid api_url + if response.status_code == 403 or response.status_code == 200: + return True + else: + return False + except: + return False diff --git a/.venv/lib/python3.11/site-packages/attributecode/templates/default_html.template b/.venv/lib/python3.11/site-packages/attributecode/templates/default_html.template new file mode 100644 index 0000000..c7c31b6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/templates/default_html.template @@ -0,0 +1,84 @@ + + + + + Open Source Software Information + + + +

OPEN SOURCE SOFTWARE INFORMATION

+

{{ vartext['subtitle'] }}

+
+

Licenses, acknowledgments and required copyright notices for + open source components:

+
+ + + +
+ + {% for about_object in abouts %} +
+

{{ about_object.name.value }} {% if about_object.version.value %}{{ about_object.version.value }}{% endif %}

+ {% if about_object.license_expression.value %} +

This component is licensed under {{ about_object.license_expression.value }}

+ {% endif %} + {% if about_object.copyright.value %} +
Copyright: {{about_object.copyright.value}}
+ {% endif %} + {% if about_object.notice_file.value %} + {% for notice in about_object.notice_file.value %} +
+                    {{ about_object.notice_file.value[notice] }}
+                
+ {% endfor %} + {% endif %} + {% if about_object.license_key.value %} + {% for license_key in about_object.license_key.value %} + {% if license_key in common_licenses %} +

Full text of {{ license_key }} is available at the end of this document.

+ {% else %} + {% for license in licenses_list %} + {% if license_key == license.key %} +

{{ license.key }}

+
{{ license.text | e }}
+ {% endif %} + {% endfor %} + {% endif %} + {% endfor %} + {% else %} + {% if about_object.license_file.value %} + {% for lic_file_name in about_object.license_file.value %} + {% if about_object.license_file.value[lic_file_name] %} +
 {{ about_object.license_file.value[lic_file_name] | e}} 
+ {% endif %} + {% endfor %} + {% endif %} + {% endif %} +
+ {% endfor %} + +
+ +

Common Licenses Used in This Product

+ {% for license in licenses_list %} + {% if license.key in common_licenses %} +

{{ license.key }}

+
 {{ license.text | e }} 
+ {% endif %} + {% endfor %} + +

End

+ + This file was generated with AttributeCode version: {{ tkversion }} on: {{ utcnow }} (UTC) + + + diff --git a/.venv/lib/python3.11/site-packages/attributecode/templates/default_json.template b/.venv/lib/python3.11/site-packages/attributecode/templates/default_json.template new file mode 100644 index 0000000..dc19a5a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/templates/default_json.template @@ -0,0 +1,18 @@ +{ + "ossAttribution": { + "title": "Open Source Software Information", + "entries": [ + {% for about_object in abouts %} + { + "name": "{{ about_object.name.value }}"{% if about_object.version.value or about_object.license_expression.value-%},{%- endif %} + {% if about_object.version.value -%} + "version": "{{ about_object.version.value }}"{% if about_object.license_expressio.value-%},{%- endif %} + {%- endif %} + {% if about_object.license_expression.value -%} + "license_expression": "{{ about_object.license_expression.value }}" + {%- endif %} + }{% if not loop.last -%},{%- endif %} + {%- endfor %} + ] + } +} \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/attributecode/templates/license_ref.template b/.venv/lib/python3.11/site-packages/attributecode/templates/license_ref.template new file mode 100644 index 0000000..1d0a539 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/templates/license_ref.template @@ -0,0 +1,69 @@ + + + + + {{ vartext['title'] }} + + +

{{ vartext['title'] }}

+ + +
+
+ {% for license in licenses_list %} +

+ {{ license.name }} + +

+ {% endfor %} + +
+
+ + {% for license in licenses_list %} +
+

{{ license.name }}

+

This product contains the following open source software packages licensed under the terms of the license: {{license.name}}

+ +
+ {%for about_object in abouts %} + {% if loop.first %} + {% if license.url %} +

License Gallery URL: {{license.url}}

+ {% endif %} + {% endif %} + {% if license.key in about_object.license_key.value %} +
  • {{ about_object.name.value }}{% if about_object.version.value %} - Version {{ about_object.version.value }}{% endif %}
  • + {% if about_object.copyright.value %} +
    Copyright: {{about_object.copyright.value}}
    + {% endif %} + {% if about_object.notice_file.value %} + {% for notice in about_object.notice_file.value %} +
    +                                {{ about_object.notice_file.value[notice] }}
    +                            
    + {% endfor %} + {% endif %} + {% endif %} + {% if loop.last %} +
    {{license.text}}
    + {% endif %} + {% endfor %} +
    +
    + {% endfor %} +
    +
    +

    End

    + + diff --git a/.venv/lib/python3.11/site-packages/attributecode/templates/list.csv b/.venv/lib/python3.11/site-packages/attributecode/templates/list.csv new file mode 100644 index 0000000..b576a9b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/templates/list.csv @@ -0,0 +1,4 @@ +Name,Version,DejaCode License,Homepage +{% for about in abouts %} +"{{about.name}}","{{about.version}}","{{about.license_name}}","{{about.homepage_url}}" +{% endfor %} diff --git a/.venv/lib/python3.11/site-packages/attributecode/templates/scancode_html.template b/.venv/lib/python3.11/site-packages/attributecode/templates/scancode_html.template new file mode 100644 index 0000000..d858258 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/templates/scancode_html.template @@ -0,0 +1,88 @@ + + + + + Open Source Software Information + + + +

    OPEN SOURCE SOFTWARE INFORMATION

    +

    {{ vartext['subtitle'] }}

    +
    +

    Licenses, acknowledgments and required copyright notices for + open source components:

    +
    + +
    + {% set index = namespace(value=0) %} + {% for about_object in abouts %} + {% set captured = {} %} + {% if about_object.license_key.value %} + {% if not captured[about_object.name.value] %} +

    {{ about_object.name.value }}{% if about_object.version.value %} {{ about_object.version.value }}{% endif %}

    + {% set _ = captured.update({ about_object.name.value: true }) %} + {% set index.value = index.value + 1 %} + {% endif %} + {% endif %} + {% endfor %} +
    + +
    + + {% set index = namespace(value=0) %} + {% for about_object in abouts %} + {% set captured = {} %} + {% if about_object.license_key.value %} + {% if not captured[about_object.name.value] %} +
    +

    {{ about_object.name.value }} {% if about_object.version.value %}{{ about_object.version.value }}{% endif %}

    + {% set _ = captured.update({ about_object.name.value: true }) %} + {% set index.value = index.value + 1 %} + {% endif %} + {% if about_object.copyrights.value %} + {% for copyright in about_object.copyrights.value %} +
     {{ copyright['copyright'] }} 
    + {% endfor %} + {% endif %} + + {% for lic_key_exp in about_object.license_key_expression.value %} +

    This component is licensed under {{ lic_key_exp }}

    + {% endfor %} + + {% for lic_key_exp in about_object.license_key.value %} + {% for lic_key in lic_key_exp %} + {% if lic_key in common_licenses %} +

    Full text of {{ lic_key }} is available at the end of this document.

    + {% else %} + {% for license in licenses_list %} + {% if lic_key == license.key %} +

    {{ license.key }}

    +
     {{ license.text | e }} 
    + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} + {% endfor %} + {% endif %} +
    + {% endfor %} + +
    + +

    Common Licenses Used in This Product

    + {% for license in licenses_list %} + {% if license.key in common_licenses %} +

    {{ license.key }}

    +
     {{ license.text | e }} 
    + {% endif %} + {% endfor %} + +

    End

    + + This file was generated with AttributeCode version: {{ tkversion }} on: {{ utcnow }} (UTC) + + diff --git a/.venv/lib/python3.11/site-packages/attributecode/transform.py b/.venv/lib/python3.11/site-packages/attributecode/transform.py new file mode 100644 index 0000000..46b2412 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/transform.py @@ -0,0 +1,446 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import json +from collections import Counter, OrderedDict +from itertools import zip_longest + +import attr +import openpyxl + +from attributecode import CRITICAL +from attributecode import Error +from attributecode import saneyaml +from attributecode.util import csv +from attributecode.util import replace_tab_with_spaces + + +def transform_csv(location): + """ + Read a CSV file at `location` and convert data into list of dictionaries. + """ + errors = [] + new_data = [] + rows = read_csv_rows(location) + data = iter(rows) + names = next(rows) + field_names = strip_trailing_fields_csv(names) + dupes = check_duplicate_fields(field_names) + + if dupes: + msg = u'Duplicated field name: %(name)s' + for name in dupes: + errors.append(Error(CRITICAL, msg % locals())) + + if not errors: + # Convert to dicts + new_data = [dict(zip_longest(field_names, item)) for item in data] + + return new_data, errors + + +def transform_json(location): + """ + Read a JSON file at `location` and convert data into list of dictionaries. + """ + errors = [] + new_data = [] + items = read_json(location) + data = normalize_dict_data(items) + new_data = strip_trailing_fields_json(data) + + return new_data, errors + + +def transform_excel(location, worksheet=None): + """ + Read a XLSX file at `location` and convert data into list of dictionaries. + """ + errors = [] + new_data = [] + dupes, new_data = read_excel(location, worksheet) + if dupes: + msg = u'Duplicated field name: %(name)s' + for name in dupes: + errors.append(Error(CRITICAL, msg % locals())) + return new_data, errors + + +def strip_trailing_fields_csv(names): + """ + Strip trailing spaces for field names #456 + """ + field_names = [] + for name in names: + field_names.append(name.strip()) + return field_names + + +def strip_trailing_fields_json(items): + """ + Strip trailing spaces for field name #456 + """ + data = [] + for item in items: + od = {} + for field in item: + stripped_field_name = field.strip() + od[stripped_field_name] = item[field] + data.append(od) + return data + + +def normalize_dict_data(data): + """ + Check if the input data from scancode-toolkit and normalize to a normal + dictionary if it is. + Return a list type of normalized dictionary. + """ + try: + # Check if this is a JSON output from scancode-toolkit + if (data["headers"][0]["tool_name"] == "scancode-toolkit"): + # only takes data inside "files" + new_data = data["files"] + except: + new_data = data + if not isinstance(new_data, list): + new_data = [new_data] + return new_data + + +def transform_data(data, transformer): + """ + Read a dictionary and apply transformations using the + `transformer` Transformer. + Return a tuple of: + ([field names...], [transformed ordered dict...], [Error objects..]) + """ + renamed_field_data = transformer.apply_renamings(data) + + if transformer.field_filters: + renamed_field_data = list( + transformer.filter_fields(renamed_field_data)) + + if transformer.exclude_fields: + renamed_field_data = list( + transformer.filter_excluded(renamed_field_data)) + + errors = transformer.check_required_fields(renamed_field_data) + if errors: + return data, errors + return renamed_field_data, errors + + +tranformer_config_help = ''' +A transform configuration file is used to describe which transformations and +validations to apply to a source CSV file. This is a simple text file using YAML +format, using the same format as an .ABOUT file. + +The attributes that can be set in a configuration file are: + +* field_renamings: +An optional map of source CSV or JSON field name to target CSV/JSON new field name that +is used to rename CSV fields. + +For instance with this configuration the fields "Directory/Location" will be +renamed to "about_resource" and "foo" to "bar": + field_renamings: + about_resource : 'Directory/Location' + bar : foo + +The renaming is always applied first before other transforms and checks. All +other field names referenced below are these that exist AFTER the renamings +have been applied to the existing field names. + +* required_fields: +An optional list of required field names that must have a value, beyond the +standard fields names. If a source CSV/JSON does not have such a field or a row is +missing a value for a required field, an error is reported. + +For instance with this configuration an error will be reported if the fields +"name" and "version" are missing or if any row does not have a value set for +these fields: + required_fields: + - name + - version + +* field_filters: +An optional list of field names that should be kept in the transformed CSV/JSON. If +this list is provided, all the fields from the source CSV/JSON that should be kept +in the target CSV/JSON must be listed regardless of either standard or required +fields. If this list is not provided, all source CSV/JSON fields are kept in the +transformed target CSV/JSON. + +For instance with this configuration the target CSV/JSON will only contains the "name" +and "version" fields and no other field: + field_filters: + - name + - version + +* exclude_fields: +An optional list of field names that should be excluded in the transformed CSV/JSON. If +this list is provided, all the fields from the source CSV/JSON that should be excluded +in the target CSV/JSON must be listed. Excluding standard or required fields will cause +an error. If this list is not provided, all source CSV/JSON fields are kept in the +transformed target CSV/JSON. + +For instance with this configuration the target CSV/JSON will not contain the "type" +and "temp" fields: + exclude_fields: + - type + - temp +''' + + +@attr.attributes +class Transformer(object): + __doc__ = tranformer_config_help + + field_renamings = attr.attrib(default=attr.Factory(dict)) + required_fields = attr.attrib(default=attr.Factory(list)) + field_filters = attr.attrib(default=attr.Factory(list)) + exclude_fields = attr.attrib(default=attr.Factory(list)) + + # a list of all the standard fields from AboutCode toolkit + standard_fields = attr.attrib(default=attr.Factory(list), init=False) + # a list of the subset of standard fields that are essential and MUST be + # present for AboutCode toolkit to work + essential_fields = attr.attrib(default=attr.Factory(list), init=False) + + # called by attr after the __init__() + def __attrs_post_init__(self, *args, **kwargs): + from attributecode.model import About + about = About() + self.essential_fields = list(about.required_fields) + self.standard_fields = [f.name for f in about.all_fields()] + + @classmethod + def default(cls): + """ + Return a default Transformer with built-in transforms. + """ + return cls( + field_renamings={}, + required_fields=[], + field_filters=[], + exclude_fields=[], + ) + + @classmethod + def from_file(cls, location): + """ + Load and return a Transformer instance from a YAML configuration file at + `location`. + """ + with open(location, encoding='utf-8', errors='replace') as conf: + data = saneyaml.load(replace_tab_with_spaces(conf.read())) + return cls( + field_renamings=data.get('field_renamings', {}), + required_fields=data.get('required_fields', []), + field_filters=data.get('field_filters', []), + exclude_fields=data.get('exclude_fields', []), + ) + + def check_required_fields(self, data): + """ + Return a list of Error for a `data` list of ordered dict where a + dict is missing a value for a required field name. + """ + errors = [] + required = set(self.essential_fields + self.required_fields) + if not required: + return [] + + for rn, item in enumerate(data): + missings = [rk for rk in required if not item.get(rk)] + if not missings: + continue + + missings = ', '.join(missings) + msg = 'Row {rn} is missing required values for fields: {missings}' + errors.append(Error(CRITICAL, msg.format(**locals()))) + + return errors + + def apply_renamings(self, data): + """ + Return a tranformed list of `field_names` where fields are renamed + based on this Transformer configuration. + """ + renamings = self.field_renamings + renamed_to_list = list(renamings.keys()) + renamed_from_list = list(renamings.values()) + if not renamings: + return data + if isinstance(data, dict): + renamed_obj = {} + for key, value in data.items(): + if key in renamed_from_list: + for idx, renamed_from_key in enumerate(renamed_from_list): + if key == renamed_from_key: + renamed_key = renamed_to_list[idx] + renamed_obj[renamed_key] = self.apply_renamings( + value) + else: + renamed_obj[key] = self.apply_renamings(value) + return renamed_obj + elif isinstance(data, list): + return [self.apply_renamings(item) for item in data] + else: + return data + + """ + def clean_fields(self, field_names): + + Apply standard cleanups to a list of fields and return these. + + if not field_names: + return field_names + return [c.strip().lower() for c in field_names] + """ + + def filter_fields(self, data): + """ + Yield transformed dicts from a `data` list of dicts keeping only + fields with a name in the `field_filters`of this Transformer. + Return the data unchanged if no `field_filters` exists. + """ + # field_filters = set(self.clean_fields(self.field_filters)) + field_filters = set(self.field_filters) + for entry in data: + yield {k: v for k, v in entry.items() if k in field_filters} + + def filter_excluded(self, data): + """ + Yield transformed dicts from a `data` list of dicts excluding + fields with names in the `exclude_fields`of this Transformer. + Return the data unchanged if no `exclude_fields` exists. + """ + # exclude_fields = set(self.clean_fields(self.exclude_fields)) + exclude_fields = set(self.exclude_fields) + filtered_list = [] + for entry in data: + result = {} + for k, v in entry.items(): + if type(v) == list: + result[k] = self.filter_excluded(v) + elif k not in exclude_fields: + result[k] = v + filtered_list.append(result) + # yield result + # yield {k: v for k, v in entry.items() if k not in exclude_fields} + return filtered_list + + +def check_duplicate_fields(field_names): + """ + Check that there are no duplicate in the `field_names` list of field name + strings, ignoring case. Return a list of unique duplicated field names. + """ + counted = Counter(c.lower() for c in field_names) + return [field for field, count in sorted(counted.items()) if count > 1] + + +def read_csv_rows(location): + """ + Yield rows (as a list of values) from a CSV file at `location`. + """ + with open(location, encoding='utf-8', errors='replace') as csvfile: + reader = csv.reader(csvfile) + for row in reader: + yield row + + +def read_json(location): + """ + Yield rows (as a list of values) from a CSV file at `location`. + """ + with open(location, encoding='utf-8', errors='replace') as jsonfile: + return json.load(jsonfile) + + +def write_csv(location, data): + """ + Write a CSV file at `location` with the `data` which is a list of ordered dicts. + """ + field_names = list(data[0].keys()) + with open(location, 'w', encoding='utf-8', newline='\n', errors='replace') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=field_names) + writer.writeheader() + writer.writerows(data) + + +def write_json(location, data): + """ + Write a JSON file at `location` the `data` list of ordered dicts. + """ + with open(location, 'w') as jsonfile: + json.dump(data, jsonfile, indent=3) + + +def read_excel(location, worksheet=None): + """ + Read XLSX at `location`, return a list of ordered dictionaries, one + for each row. + """ + results = [] + errors = [] + input_bom = openpyxl.load_workbook(location) + if worksheet: + sheet_obj = input_bom[worksheet] + else: + sheet_obj = input_bom.active + max_col = sheet_obj.max_column + + index = 1 + col_keys = [] + mapping_dict = {} + while index <= max_col: + value = sheet_obj.cell(row=1, column=index).value + if value in col_keys: + msg = 'Duplicated column name, ' + str(value) + ', detected.' + errors.append(Error(CRITICAL, msg)) + return errors, results + if value in mapping_dict: + value = mapping_dict[value] + col_keys.append(value) + index = index + 1 + + for row in sheet_obj.iter_rows(min_row=2, values_only=True): + row_dict = OrderedDict() + index = 0 + while index < max_col: + value = row[index] + if value: + row_dict[col_keys[index]] = value + else: + row_dict[col_keys[index]] = '' + index = index + 1 + results.append(row_dict) + return errors, results + + +def write_excel(location, data): + wb = openpyxl.Workbook() + ws = wb.active + + # Get the header + headers = list(data[0].keys()) + ws.append(headers) + + for elements in data: + ws.append([elements.get(h) for h in headers]) + + wb.save(location) diff --git a/.venv/lib/python3.11/site-packages/attributecode/util.py b/.venv/lib/python3.11/site-packages/attributecode/util.py new file mode 100644 index 0000000..5f398d7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attributecode/util.py @@ -0,0 +1,873 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- +# ============================================================================ +# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from collections import OrderedDict + +import codecs +import csv +import json +import ntpath +import openpyxl +import os +import posixpath +import re +import shutil +import string +import sys +from distutils.dir_util import copy_tree +from itertools import zip_longest + +from attributecode import CRITICAL +from attributecode import WARNING +from attributecode import Error + +on_windows = "win32" in sys.platform + +# boolean field name +boolean_fields = [ + "redistribute", + "attribute", + "track_change", + "modified", + "internal_use_only", +] +file_fields = ["about_resource", "notice_file", "changelog_file", "author_file"] + + +def to_posix(path): + """ + Return a path using the posix path separator given a path that may contain + posix or windows separators, converting "\\" to "/". NB: this path will + still be valid in the windows explorer (except for a UNC or share name). It + will be a valid path everywhere in Python. It will not be valid for windows + command line operations. + """ + return path.replace(ntpath.sep, posixpath.sep) + + +UNC_PREFIX = "\\\\?\\" +UNC_PREFIX_POSIX = to_posix(UNC_PREFIX) +UNC_PREFIXES = ( + UNC_PREFIX_POSIX, + UNC_PREFIX, +) + +valid_file_chars = "_-.+()~[]{}@%!$," +invalid_file_chars = string.punctuation.translate( + str.maketrans("", "", valid_file_chars) +) + + +def invalid_chars(path): + """ + Return a list of invalid characters in the file name of `path`. + """ + path = to_posix(path) + rname = resource_name(path) + name = rname.lower() + + return [c for c in name if c in invalid_file_chars] + + +def check_file_names(paths): + """ + Given a sequence of file paths, check that file names are valid and that + there are no case-insensitive duplicates in any given directories. + Return a list of errors. + + From spec: + The case of a file name is not significant. On case-sensitive file + systems (such as Linux), a tool must raise an error if two ABOUT files + stored in the same directory have the same lowercase file name. + """ + # FIXME: this should be a defaultdicts that accumulates all duplicated paths + seen = {} + errors = [] + for orig_path in paths: + path = orig_path + invalid = invalid_chars(path) + if invalid: + invalid = "".join(invalid) + msg = "Invalid characters %(invalid)r in file name at: %(path)r" % locals() + errors.append(Error(CRITICAL, msg)) + + path = to_posix(orig_path) + name = resource_name(path).lower() + parent = posixpath.dirname(path) + path = posixpath.join(parent, name) + path = posixpath.normpath(path) + path = posixpath.abspath(path) + existing = seen.get(path) + if existing: + msg = ( + "Duplicate files: %(orig_path)r and %(existing)r " + "have the same case-insensitive file name" % locals() + ) + errors.append(Error(CRITICAL, msg)) + else: + seen[path] = orig_path + return errors + + +def wrap_boolean_value(context): + updated_context = "" + for line in context.splitlines(): + """ + wrap the boolean value in quote + """ + key = line.partition(":")[0] + value = line.partition(":")[2].strip() + value = '"' + value + '"' + if key in boolean_fields and not value == "": + updated_context += key + ": " + value + "\n" + else: + updated_context += line + "\n" + return updated_context + + +def replace_tab_with_spaces(context): + updated_context = "" + for line in context.splitlines(): + """ + Replace tab with 4 spaces + """ + updated_context += line.replace("\t", " ") + "\n" + return updated_context + + +# TODO: rename to normalize_path +def get_absolute(location): + """ + Return an absolute normalized location. + """ + location = os.path.expanduser(location) + location = os.path.expandvars(location) + location = os.path.normpath(location) + location = os.path.abspath(location) + return location + + +def get_locations(location): + """ + Return a list of locations of files given the `location` of a + a file or a directory tree containing ABOUT files. + File locations are normalized using posix path separators. + """ + location = add_unc(location) + location = get_absolute(location) + assert os.path.exists(location) + + if os.path.isfile(location): + yield location + else: + for base_dir, _, files in os.walk(location): + for name in files: + bd = to_posix(base_dir) + yield posixpath.join(bd, name) + + +def get_about_locations(location, exclude=None): + """ + Return a list of locations of ABOUT files given the `location` of a + a file or a directory tree containing ABOUT files. + File locations are normalized using posix path separators. + """ + pattern_characters_list = ["*", "?", "[", "!"] + import fnmatch + + for loc in get_locations(location): + exclude_match = False + if exclude: + for item in exclude: + is_pattern = False + for character in pattern_characters_list: + if character in item: + is_pattern = True + break + exclude_path = posixpath.join(location, item) + normalized_excluded_path = posixpath.normpath( + add_unc(exclude_path).replace("\\", "/") + ) + # Since 'normpath' removes the trailing '/', it is necessary + # to append the '/' back for proper matching. + if not is_pattern and item.endswith("/"): + normalized_excluded_path += "/" + if is_pattern: + if fnmatch.fnmatch(loc, normalized_excluded_path): + exclude_match = True + break + else: + if normalized_excluded_path in loc: + exclude_match = True + break + if not exclude_match: + if is_about_file(loc): + yield loc + + +def norm(p): + """ + Normalize the path + """ + if p.startswith(UNC_PREFIX) or p.startswith(to_posix(UNC_PREFIX)): + p = p.strip(UNC_PREFIX).strip(to_posix(UNC_PREFIX)) + p = to_posix(p) + p = p.strip(posixpath.sep) + p = posixpath.normpath(p) + return p + + +def get_spdx_key_and_lic_key_from_licdb(): + """ + Return a dictionary list that fetch all licenses from licenseDB. The + "spdx_license_key" will be the key of the dictionary and the "license_key" + will be the value of the directionary + """ + import requests + + lic_dict = dict() + + # URL of the license index + url = "https://scancode-licensedb.aboutcode.org/index.json" + + """ + Sample of one of the license in the index.json + { + "license_key": "bsd-new", + "category": "Permissive", + "spdx_license_key": "BSD-3-Clause", + "other_spdx_license_keys": [ + "LicenseRef-scancode-libzip" + ], + "is_exception": false, + "is_deprecated": false, + "json": "bsd-new.json", + "yaml": "bsd-new.yml", + "html": "bsd-new.html", + "license": "bsd-new.LICENSE" + }, + """ + response = requests.get(url) + # Check if the request was successful (status code 200) + if response.status_code == 200: + # Retrieve the JSON data from the response + licenses_index = response.json() + + for license in licenses_index: + lic_dict[license["spdx_license_key"]] = license["license_key"] + if license["other_spdx_license_keys"]: + for other_spdx in license["other_spdx_license_keys"]: + lic_dict[other_spdx] = license["license_key"] + + return lic_dict + + +def get_relative_path(base_loc, full_loc): + """ + Return a posix path for a given full location relative to a base location. + The first segment of the different between full_loc and base_loc will become + the first segment of the returned path. + """ + base = norm(base_loc) + path = norm(full_loc) + + assert path.startswith(base), ( + "Cannot compute relative path: %(path)r does not start with %(base)r" % locals() + ) + base_name = resource_name(base) + no_dir = base == base_name + same_loc = base == path + if same_loc: + # this is the case of a single file or single dir + if no_dir: + # we have no dir: the full path is the same as the resource name + relative = base_name + else: + # we have at least one dir + parent_dir = posixpath.dirname(base) + parent_dir = resource_name(parent_dir) + relative = posixpath.join(parent_dir, base_name) + else: + relative = path[len(base) + 1 :] + # We don't want to keep the first segment of the root of the returned path. + # See https://github.com/nexB/attributecode/issues/276 + # relative = posixpath.join(base_name, relative) + return relative + + +def to_native(path): + """ + Return a path using the current OS path separator given a path that may + contain posix or windows separators, converting "/" to "\\" on windows + and "\\" to "/" on posix OSes. + """ + path = path.replace(ntpath.sep, os.path.sep) + path = path.replace(posixpath.sep, os.path.sep) + return path + + +def is_about_file(path): + """ + Return True if the path represents a valid ABOUT file name. + """ + if path: + path = path.lower() + return path.endswith(".about") and path != ".about" + + +def resource_name(path): + """ + Return the file or directory name from a path. + """ + path = path.strip() + path = to_posix(path) + path = path.rstrip(posixpath.sep) + _left, right = posixpath.split(path) + return right.strip() + + +def load_csv(location): + """ + Read CSV at `location`, return a list of ordered dictionaries, one + for each row. + """ + results = [] + with open(location, mode="r", encoding="utf-8-sig", errors="replace") as csvfile: + for row in csv.DictReader(csvfile): + # convert all the column keys to lower case + updated_row = {key.lower().strip(): value for key, value in row.items()} + results.append(updated_row) + return results + + +def load_json(location): + """ + Read JSON file at `location` and return a list of ordered dicts, one for + each entry. + """ + with open(location) as json_file: + results = json.load(json_file) + + if not isinstance(results, list): + results = [results] + + return results + + +# FIXME: rename to is_online: BUT do we really need this at all???? +# This is needed to check for the network connection when user wants to fetch +# the licenses from DJE/LicenseDB +def have_network_connection(): + """ + Return True if an HTTP connection to some public web site is possible. + """ + import requests + + url = "https://scancode-licensedb.aboutcode.org/" + + response = requests.get(url) + if response.status_code == 200: + return True + else: + return False + + +def extract_zip(location): + """ + Extract a zip file at location in a temp directory and return the temporary + directory where the archive was extracted. + """ + import zipfile + import tempfile + + if not zipfile.is_zipfile(location): + raise Exception("Incorrect zip file %(location)r" % locals()) + + archive_base_name = os.path.basename(location).replace(".zip", "") + base_dir = tempfile.mkdtemp(prefix="aboutcode-toolkit-extract-") + target_dir = os.path.join(base_dir, archive_base_name) + target_dir = add_unc(target_dir) + os.makedirs(target_dir) + + if target_dir.endswith((ntpath.sep, posixpath.sep)): + target_dir = target_dir[:-1] + + with zipfile.ZipFile(location) as zipf: + for info in zipf.infolist(): + name = info.filename + content = zipf.read(name) + target = os.path.join(target_dir, name) + is_dir = target.endswith((ntpath.sep, posixpath.sep)) + if is_dir: + target = target[:-1] + parent = os.path.dirname(target) + if on_windows: + target = target.replace(posixpath.sep, ntpath.sep) + parent = parent.replace(posixpath.sep, ntpath.sep) + if not os.path.exists(parent): + os.makedirs(add_unc(parent)) + if not content and is_dir: + if not os.path.exists(target): + os.makedirs(add_unc(target)) + if not os.path.exists(target): + with open(target, "wb") as f: + f.write(content) + return target_dir + + +def add_unc(location): + """ + Convert a `location` to an absolute Window UNC path to support long paths on + Windows. Return the location unchanged if not on Windows. See + https://msdn.microsoft.com/en-us/library/aa365247.aspx + """ + if on_windows and not location.startswith(UNC_PREFIX): + if location.startswith(UNC_PREFIX_POSIX): + return UNC_PREFIX + os.path.abspath(location.strip(UNC_PREFIX_POSIX)) + return UNC_PREFIX + os.path.abspath(location) + return location + + +# FIXME: add docstring +def copy_license_notice_files(fields, base_dir, reference_dir, afp): + """ + Given a list of (key, value) `fields` tuples and a `base_dir` where ABOUT + files and their companion LICENSe are store, and an extra `reference_dir` + where reference license an notice files are stored and the `afp` + about_file_path value, this function will copy to the base_dir the + license_file or notice_file if found in the reference_dir + """ + errors = [] + copy_file_name = "" + for key, value in fields: + if key == "license_file" or key == "notice_file": + if value: + # This is to handle multiple license_file value in CSV format + # The following code will construct a list to contain the + # license file(s) that need to be copied. + # Note that *ONLY* license_file field allows \n. Others file + # fields that have \n will prompts error at validation stage + file_list = [] + if "\n" in value: + f_list = value.split("\n") + else: + if not isinstance(value, list): + f_list = [value] + else: + f_list = value + # The following code is to adopt the approach from #404 + # to use comma for multiple files which refer the same license + for item in f_list: + if "," in item: + item_list = item.split(",") + for i in item_list: + file_list.append(i.strip()) + else: + file_list.append(item) + else: + continue + + for copy_file_name in file_list: + from_lic_path = posixpath.join(to_posix(reference_dir), copy_file_name) + about_file_dir = os.path.dirname(to_posix(afp)).lstrip("/") + to_lic_path = posixpath.join(to_posix(base_dir), about_file_dir) + if not os.path.exists(posixpath.join(to_lic_path, copy_file_name)): + err = copy_file(from_lic_path, to_lic_path) + if err: + errors.append(err) + return errors + + +def copy_file(from_path, to_path): + error = "" + # Return if the from_path is empty or None. + if not from_path: + return + + if on_windows: + if not from_path.startswith(UNC_PREFIXES): + from_path = add_unc(from_path) + if not to_path.startswith(UNC_PREFIXES): + to_path = add_unc(to_path) + + # Strip the white spaces + from_path = from_path.strip() + to_path = to_path.strip() + # Errors will be captured when doing the validation + if not os.path.exists(from_path): + return "" + + if not posixpath.exists(to_path): + os.makedirs(to_path) + try: + if os.path.isdir(from_path): + # Copy the whole directory structure + if from_path.endswith("/"): + from_path = from_path.rpartition("/")[0] + folder_name = os.path.basename(from_path) + to_path = os.path.join(to_path, folder_name) + if os.path.exists(to_path): + msg = to_path + " is already existed and is replaced by " + from_path + error = Error(WARNING, msg) + copy_tree(from_path, to_path) + else: + file_name = os.path.basename(from_path) + to_file_path = os.path.join(to_path, file_name) + if os.path.exists(to_file_path): + msg = ( + to_file_path + " is already existed and is replaced by " + from_path + ) + error = Error(WARNING, msg) + shutil.copy2(from_path, to_path) + return error + except Exception as e: + msg = "Cannot copy file at %(from_path)r." % locals() + error = Error(CRITICAL, msg) + return error + + +def ungroup_licenses_from_sctk(value): + # Return a list of dictionary with lic_key and score + # extracted from SCTK scan + detected_license_list = [] + for detected_license in value: + for lic in detected_license["matches"]: + lic_exp = lic["license_expression"] + score = lic["score"] + detected_license_list.append({"lic_exp": lic_exp, "score": score}) + return detected_license_list + + +# FIXME: we should use a license object instead + + +def ungroup_licenses(licenses): + """ + Ungroup multiple licenses information + """ + lic_key = [] + lic_name = [] + lic_file = [] + lic_url = [] + spdx_lic_key = [] + lic_score = [] + lic_matched_text = [] + for lic in licenses: + if "key" in lic: + lic_key.append(lic["key"]) + if "name" in lic: + lic_name.append(lic["name"]) + if "file" in lic: + lic_file.append(lic["file"]) + if "url" in lic: + lic_url.append(lic["url"]) + if "spdx_license_key" in lic: + spdx_lic_key.append(lic["spdx_license_key"]) + if "score" in lic: + lic_score.append(lic["score"]) + if "matched_text" in lic: + lic_matched_text.append(lic["matched_text"]) + return ( + lic_key, + lic_name, + lic_file, + lic_url, + spdx_lic_key, + lic_score, + lic_matched_text, + ) + + +# FIXME: add docstring +def format_about_dict_output(about_dictionary_list): + formatted_list = [] + for element in about_dictionary_list: + row_list = dict() + for key in element: + if element[key]: + if isinstance(element[key], list): + row_list[key] = "\n".join((element[key])) + elif key == "about_resource": + row_list[key] = "\n".join((element[key].keys())) + else: + row_list[key] = element[key] + formatted_list.append(row_list) + return formatted_list + + +# FIXME: add docstring +def format_about_dict_for_json_output(about_dictionary_list): + licenses = ["license_key", "license_name", "license_file", "license_url"] + json_formatted_list = [] + for element in about_dictionary_list: + row_list = dict() + # FIXME: aboid using parallel list... use an object instead + license_key = [] + license_name = [] + license_file = [] + license_url = [] + + for key in element: + if element[key]: + # The 'about_resource' is an ordered dict + if key == "about_resource": + row_list[key] = list(element[key].keys())[0] + elif key in licenses: + if key == "license_key": + license_key = element[key] + elif key == "license_name": + license_name = element[key] + elif key == "license_file": + license_file = element[key] + elif key == "license_url": + license_url = element[key] + else: + row_list[key] = element[key] + + # Group the same license information in a list + license_group = list( + zip_longest(license_key, license_name, license_file, license_url) + ) + if license_group: + licenses_list = [] + for lic_group in license_group: + lic_dict = dict() + if lic_group[0]: + lic_dict["key"] = lic_group[0] + if lic_group[1]: + lic_dict["name"] = lic_group[1] + if lic_group[2]: + lic_dict["file"] = lic_group[2] + if lic_group[3]: + lic_dict["url"] = lic_group[3] + licenses_list.append(lic_dict) + row_list["licenses"] = licenses_list + json_formatted_list.append(row_list) + return json_formatted_list + + +def unique(sequence): + """ + Return a list of unique items found in sequence. Preserve the original + sequence order. + For example: + >>> unique([1, 5, 3, 5]) + [1, 5, 3] + """ + deduped = [] + for item in sequence: + if item not in deduped: + deduped.append(item) + return deduped + + +def filter_errors(errors, minimum_severity=WARNING): + """ + Return a list of unique `errors` Error object filtering errors that have a + severity below `minimum_severity`. + """ + return [e for e in errors if e.severity >= minimum_severity] + + +def create_dir(location): + """ + Create directory or directory tree at location, ensuring it is readable + and writeable. + """ + import stat + + if not os.path.exists(location): + os.makedirs(location) + os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH) + + +def get_temp_dir(sub_dir_path=None): + """ + Create a unique new temporary directory location. Create directories + identified by sub_dir_path if provided in this temporary directory. + Return the location for this unique directory joined with the + sub_dir_path if any. + """ + new_temp_dir = build_temp_dir() + + if sub_dir_path: + # create a sub directory hierarchy if requested + new_temp_dir = os.path.join(new_temp_dir, sub_dir_path) + create_dir(new_temp_dir) + return new_temp_dir + + +def build_temp_dir(prefix="attributecode-"): + """ + Create and return a new unique empty directory created in base_dir. + """ + import tempfile + + location = tempfile.mkdtemp(prefix=prefix) + create_dir(location) + return location + + +def get_file_text(file_name, reference): + """ + Return the file content from the license_file/notice_file field from the + given reference directory. + """ + error = "" + text = "" + file_path = os.path.join(reference, file_name) + if not os.path.exists(file_path): + msg = "The file " + file_path + " does not exist" + error = Error(CRITICAL, msg) + else: + with codecs.open( + file_path, "rb", encoding="utf-8-sig", errors="replace" + ) as txt: + # with io.open(file_path, encoding='utf-8') as txt: + text = txt.read() + return error, text + + +def convert_object_to_dict(about): + """ + Convert the list of field object + [Field(name='name', value=''), Field(name='version', value='')] + to a dictionary + """ + about_dict = {} + # Convert all the supported fields into a dictionary + fields_dict = getattr(about, "fields") + custom_fields_dict = getattr(about, "custom_fields") + supported_dict = {**fields_dict, **custom_fields_dict} + for field in supported_dict: + key = supported_dict[field].name + value = supported_dict[field].value + about_dict[key] = value + return about_dict + + +def load_scancode_json(location): + """ + Read the scancode JSON file at `location` and return a list of dictionaries. + """ + updated_results = [] + + with open(location) as json_file: + results = json.load(json_file) + results = results["files"] + # Rename the "path" to "about_resource" and update "name" from path value + for item in results: + updated_dict = {} + for key in item: + if key == "path": + updated_dict["about_resource"] = item[key] + updated_dict["name"] = os.path.basename(item[key]) + else: + updated_dict[key] = item[key] + updated_results.append(updated_dict) + return updated_results + + +def load_excel(location, worksheet=None): + """ + Read XLSX at `location`, return a list of ordered dictionaries, one + for each row. + """ + results = [] + errors = [] + import warnings + + # This is to prevent showing the: warn("Workbook contains no default style, apply openpyxl's default") + with warnings.catch_warnings(record=True): + input_bom = openpyxl.load_workbook(location) + sheetnames = input_bom.sheetnames + if worksheet: + if worksheet not in sheetnames: + import sys + + print("The input worksheet name does not exist. Exiting.") + sys.exit(1) + sheet_obj = input_bom[worksheet] + else: + sheet_obj = input_bom.active + print("Working on the " + sheet_obj.title + " worksheet.") + max_col = sheet_obj.max_column + + index = 1 + col_keys = [] + mapping_dict = {} + + while index <= max_col: + value = sheet_obj.cell(row=1, column=index).value + if value in col_keys: + msg = "Duplicated column name, " + str(value) + ", detected." + errors.append(Error(CRITICAL, msg)) + return errors, results + if value in mapping_dict: + value = mapping_dict[value] + col_keys.append(value) + index = index + 1 + + for row in sheet_obj.iter_rows(min_row=2, values_only=True): + row_dict = OrderedDict() + index = 0 + while index < max_col: + value = row[index] + if value: + row_dict[col_keys[index]] = value + else: + row_dict[col_keys[index]] = "" + index = index + 1 + results.append(row_dict) + return errors, results + + +def write_licenses(lic_dict, location): + import io + + loc = to_posix(location) + errors = [] + + if not posixpath.exists(loc): + os.makedirs(add_unc(loc)) + try: + for lic in lic_dict: + output_location = posixpath.join(loc, lic) + with open(output_location, "w", encoding="utf-8", errors="replace") as out: + out.write(lic_dict[lic]) + except Exception as e: + msg = str(e) + errors.append(Error(CRITICAL, msg)) + return errors + + +def strip_inventory_value(inventory): + """ + The inventory is a list of dictionaries. This function will strip the value + of the dictionary and return the stripped dictionary to a list + """ + stripped_inventory = [] + for component in inventory: + comp_dict = {} + for key in component: + comp_dict[key] = str(component[key]).strip() + stripped_inventory.append(comp_dict) + return stripped_inventory + + +""" +Return True if a string s name is safe to use as an attribute name. +""" +is_valid_name = re.compile(r"^[A-Za-z_][A-Za-z0-9_]*$").match diff --git a/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/METADATA b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/METADATA new file mode 100644 index 0000000..51128bb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/METADATA @@ -0,0 +1,235 @@ +Metadata-Version: 2.4 +Name: attrs +Version: 25.4.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.9 +Description-Content-Type: text/markdown + +

    + + attrs + +

    + + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +Trusted by NASA for [Mars missions since 2020](https://github.com/readme/featured/nasa-ingenuity-helicopter)! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + + + +

    + + + + + + + + + +

    + + + +

    + Please consider joining them to help make attrs’s maintenance more sustainable! +

    + + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation! + + +### Hate Type Annotations!? + +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types: + +```python +from attrs import define, field + +@define +class SomeClass: + a_number = field(default=42) + list_of_numbers = field(factory=list) +``` + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice. + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek). + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. + +## Release Information + +### Backwards-incompatible Changes + +- Class-level `kw_only=True` behavior is now consistent with `dataclasses`. + + Previously, a class that sets `kw_only=True` makes all attributes keyword-only, including those from base classes. + If an attribute sets `kw_only=False`, that setting is ignored, and it is still made keyword-only. + + Now, only the attributes defined in that class that doesn't explicitly set `kw_only=False` are made keyword-only. + + This shouldn't be a problem for most users, unless you have a pattern like this: + + ```python + @attrs.define(kw_only=True) + class Base: + a: int + b: int = attrs.field(default=1, kw_only=False) + + @attrs.define + class Subclass(Base): + c: int + ``` + + Here, we have a `kw_only=True` *attrs* class (`Base`) with an attribute that sets `kw_only=False` and has a default (`Base.b`), and then create a subclass (`Subclass`) with required arguments (`Subclass.c`). + Previously this would work, since it would make `Base.b` keyword-only, but now this fails since `Base.b` is positional, and we have a required positional argument (`Subclass.c`) following another argument with defaults. + [#1457](https://github.com/python-attrs/attrs/issues/1457) + + +### Changes + +- Values passed to the `__init__()` method of `attrs` classes are now correctly passed to `__attrs_pre_init__()` instead of their default values (in cases where *kw_only* was not specified). + [#1427](https://github.com/python-attrs/attrs/issues/1427) +- Added support for Python 3.14 and [PEP 749](https://peps.python.org/pep-0749/). + [#1446](https://github.com/python-attrs/attrs/issues/1446), + [#1451](https://github.com/python-attrs/attrs/issues/1451) +- `attrs.validators.deep_mapping()` now allows to leave out either *key_validator* xor *value_validator*. + [#1448](https://github.com/python-attrs/attrs/issues/1448) +- `attrs.validators.deep_iterator()` and `attrs.validators.deep_mapping()` now accept lists and tuples for all validators and wrap them into a `attrs.validators.and_()`. + [#1449](https://github.com/python-attrs/attrs/issues/1449) +- Added a new **experimental** way to inspect classes: + + `attrs.inspect(cls)` returns the _effective_ class-wide parameters that were used by *attrs* to construct the class. + + The returned class is the same data structure that *attrs* uses internally to decide how to construct the final class. + [#1454](https://github.com/python-attrs/attrs/issues/1454) +- Fixed annotations for `attrs.field(converter=...)`. + Previously, a `tuple` of converters was only accepted if it had exactly one element. + [#1461](https://github.com/python-attrs/attrs/issues/1461) +- The performance of `attrs.asdict()` has been improved by 45–260%. + [#1463](https://github.com/python-attrs/attrs/issues/1463) +- The performance of `attrs.astuple()` has been improved by 49–270%. + [#1469](https://github.com/python-attrs/attrs/issues/1469) +- The type annotation for `attrs.validators.or_()` now allows for different types of validators. + + This was only an issue on Pyright. + [#1474](https://github.com/python-attrs/attrs/issues/1474) + + + +--- + +[Full changelog →](https://www.attrs.org/en/stable/changelog.html) diff --git a/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/RECORD b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/RECORD new file mode 100644 index 0000000..b4287f6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/RECORD @@ -0,0 +1,55 @@ +attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057 +attr/__init__.pyi,sha256=IZkzIjvtbRqDWGkDBIF9dd12FgDa379JYq3GHnVOvFQ,11309 +attr/__pycache__/__init__.cpython-311.pyc,, +attr/__pycache__/_cmp.cpython-311.pyc,, +attr/__pycache__/_compat.cpython-311.pyc,, +attr/__pycache__/_config.cpython-311.pyc,, +attr/__pycache__/_funcs.cpython-311.pyc,, +attr/__pycache__/_make.cpython-311.pyc,, +attr/__pycache__/_next_gen.cpython-311.pyc,, +attr/__pycache__/_version_info.cpython-311.pyc,, +attr/__pycache__/converters.cpython-311.pyc,, +attr/__pycache__/exceptions.cpython-311.pyc,, +attr/__pycache__/filters.cpython-311.pyc,, +attr/__pycache__/setters.cpython-311.pyc,, +attr/__pycache__/validators.cpython-311.pyc,, +attr/_cmp.py,sha256=3Nn1TjxllUYiX_nJoVnEkXoDk0hM1DYKj5DE7GZe4i0,4117 +attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368 +attr/_compat.py,sha256=x0g7iEUOnBVJC72zyFCgb1eKqyxS-7f2LGnNyZ_r95s,2829 +attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843 +attr/_funcs.py,sha256=Ix5IETTfz5F01F-12MF_CSFomIn2h8b67EVVz2gCtBE,16479 +attr/_make.py,sha256=NRJDGS8syg2h3YNflVNoK2FwR3CpdSZxx8M6lacwljA,104141 +attr/_next_gen.py,sha256=BQtCUlzwg2gWHTYXBQvrEYBnzBUrDvO57u0Py6UCPhc,26274 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=w4R-FYC3NK_kMkGUWJlYP4cVAlH9HRaC-um3fcjYkHM,2222 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861 +attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795 +attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617 +attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584 +attr/validators.py,sha256=1BnYGTuYvSucGEI4ju-RPNJteVzG0ZlfWpJiWoSFHQ8,21458 +attr/validators.pyi,sha256=ftmW3m4KJ3pQcIXAj-BejT7BY4ZfqrC1G-5W7XvoPds,4082 +attrs-25.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-25.4.0.dist-info/METADATA,sha256=2Rerxj7agcMRxiwdkt6lC2guqHAmkGKCH13nWWK7ZoQ,10473 +attrs-25.4.0.dist-info/RECORD,, +attrs-25.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +attrs-25.4.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=RxaAZNwYiEh-fcvHLZNpQ_DWKni73M_jxEPEftiq1Zc,1183 +attrs/__init__.pyi,sha256=2gV79g9UxJppGSM48hAZJ6h_MHb70dZoJL31ZNJeZYI,9416 +attrs/__pycache__/__init__.cpython-311.pyc,, +attrs/__pycache__/converters.cpython-311.pyc,, +attrs/__pycache__/exceptions.cpython-311.pyc,, +attrs/__pycache__/filters.cpython-311.pyc,, +attrs/__pycache__/setters.cpython-311.pyc,, +attrs/__pycache__/validators.cpython-311.pyc,, +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/WHEEL new file mode 100644 index 0000000..12228d4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/licenses/LICENSE b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..2bd6453 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs-25.4.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.11/site-packages/attrs/__init__.py b/.venv/lib/python3.11/site-packages/attrs/__init__.py new file mode 100644 index 0000000..dc1ce4b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/__init__.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + AttrsInstance, + Converter, + Factory, + NothingType, + _make_getattr, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._make import ClassProps +from attr._next_gen import asdict, astuple, inspect + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "NOTHING", + "Attribute", + "AttrsInstance", + "ClassProps", + "Converter", + "Factory", + "NothingType", + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "has", + "inspect", + "make_class", + "mutable", + "resolve_types", + "setters", + "validate", + "validators", +] + +__getattr__ = _make_getattr(__name__) diff --git a/.venv/lib/python3.11/site-packages/attrs/__init__.pyi b/.venv/lib/python3.11/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000..6364bac --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/__init__.pyi @@ -0,0 +1,314 @@ +import sys + +from typing import ( + Any, + Callable, + Mapping, + Sequence, + overload, + TypeVar, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import Converter as Converter +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import has as has +from attr import make_class as make_class +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators +from attr import attrib, asdict as asdict, astuple as astuple +from attr import NothingType as NothingType + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = bool | Callable[[Any], Any] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_CallableConverterType = Callable[[Any], Any] +_ConverterType = _CallableConverterType | Converter[Any, Any] +_ReprType = Callable[[Any], str] +_ReprArgType = bool | _ReprType +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = _OnSetAttrType | list[_OnSetAttrType] | setters._NoOpType +_FieldTransformer = Callable[ + [type, list["Attribute[Any]"]], list["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = _ValidatorType[_T] | Sequence[_ValidatorType[_T]] + +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool | None = ..., + eq: bool | None = ..., + order: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType, ...] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType, ...] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType + | list[_ConverterType] + | tuple[_ConverterType, ...] + | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool | None = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +class ClassProps: + # XXX: somehow when defining/using enums Mypy starts looking at our own + # (untyped) code and causes tons of errors. + Hashability: Any + KeywordOnly: Any + + is_exception: bool + is_slotted: bool + has_weakref_slot: bool + is_frozen: bool + # kw_only: ClassProps.KeywordOnly + kw_only: Any + collected_fields_by_mro: bool + added_init: bool + added_repr: bool + added_eq: bool + added_ordering: bool + # hashability: ClassProps.Hashability + hashability: Any + added_match_args: bool + added_str: bool + added_pickling: bool + on_setattr_hook: _OnSetAttrType | None + field_transformer: Callable[[Attribute[Any]], Attribute[Any]] | None + + def __init__( + self, + is_exception: bool, + is_slotted: bool, + has_weakref_slot: bool, + is_frozen: bool, + # kw_only: ClassProps.KeywordOnly + kw_only: Any, + collected_fields_by_mro: bool, + added_init: bool, + added_repr: bool, + added_eq: bool, + added_ordering: bool, + # hashability: ClassProps.Hashability + hashability: Any, + added_match_args: bool, + added_str: bool, + added_pickling: bool, + on_setattr_hook: _OnSetAttrType, + field_transformer: Callable[[Attribute[Any]], Attribute[Any]], + ) -> None: ... + @property + def is_hashable(self) -> bool: ... + +def inspect(cls: type) -> ClassProps: ... diff --git a/.venv/lib/python3.11/site-packages/attrs/converters.py b/.venv/lib/python3.11/site-packages/attrs/converters.py new file mode 100644 index 0000000..7821f6c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa: F403 diff --git a/.venv/lib/python3.11/site-packages/attrs/exceptions.py b/.venv/lib/python3.11/site-packages/attrs/exceptions.py new file mode 100644 index 0000000..3323f9d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa: F403 diff --git a/.venv/lib/python3.11/site-packages/attrs/filters.py b/.venv/lib/python3.11/site-packages/attrs/filters.py new file mode 100644 index 0000000..3080f48 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa: F403 diff --git a/.venv/lib/python3.11/site-packages/attrs/py.typed b/.venv/lib/python3.11/site-packages/attrs/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/attrs/setters.py b/.venv/lib/python3.11/site-packages/attrs/setters.py new file mode 100644 index 0000000..f3d73bb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa: F403 diff --git a/.venv/lib/python3.11/site-packages/attrs/validators.py b/.venv/lib/python3.11/site-packages/attrs/validators.py new file mode 100644 index 0000000..037e124 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa: F403 diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/METADATA b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/METADATA new file mode 100644 index 0000000..7fd97b7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/METADATA @@ -0,0 +1,123 @@ +Metadata-Version: 2.4 +Name: beautifulsoup4 +Version: 4.14.3 +Summary: Screen-scraping library +Project-URL: Download, https://www.crummy.com/software/BeautifulSoup/bs4/download/ +Project-URL: Homepage, https://www.crummy.com/software/BeautifulSoup/bs4/ +Author-email: Leonard Richardson +License: MIT License +License-File: AUTHORS +License-File: LICENSE +Keywords: HTML,XML,parse,soup +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Topic :: Text Processing :: Markup :: SGML +Classifier: Topic :: Text Processing :: Markup :: XML +Requires-Python: >=3.7.0 +Requires-Dist: soupsieve>=1.6.1 +Requires-Dist: typing-extensions>=4.0.0 +Provides-Extra: cchardet +Requires-Dist: cchardet; extra == 'cchardet' +Provides-Extra: chardet +Requires-Dist: chardet; extra == 'chardet' +Provides-Extra: charset-normalizer +Requires-Dist: charset-normalizer; extra == 'charset-normalizer' +Provides-Extra: html5lib +Requires-Dist: html5lib; extra == 'html5lib' +Provides-Extra: lxml +Requires-Dist: lxml; extra == 'lxml' +Description-Content-Type: text/markdown + +Beautiful Soup is a library that makes it easy to scrape information +from web pages. It sits atop an HTML or XML parser, providing Pythonic +idioms for iterating, searching, and modifying the parse tree. + +# Quick start + +``` +>>> from bs4 import BeautifulSoup +>>> soup = BeautifulSoup("

    SomebadHTML") +>>> print(soup.prettify()) + + +

    + Some + + bad + + HTML + + +

    + + +>>> soup.find(string="bad") +'bad' +>>> soup.i +HTML +# +>>> soup = BeautifulSoup("SomebadXML", "xml") +# +>>> print(soup.prettify()) + + + Some + + bad + + XML + + +``` + +To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). + +# Links + +* [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/) +* [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) +* [Discussion group](https://groups.google.com/group/beautifulsoup/) +* [Development](https://code.launchpad.net/beautifulsoup/) +* [Bug tracker](https://bugs.launchpad.net/beautifulsoup/) +* [Complete changelog](https://git.launchpad.net/beautifulsoup/tree/CHANGELOG) + +# Note on Python 2 sunsetting + +Beautiful Soup's support for Python 2 was discontinued on December 31, +2020: one year after the sunset date for Python 2 itself. From this +point onward, new Beautiful Soup development will exclusively target +Python 3. The final release of Beautiful Soup 4 to support Python 2 +was 4.9.3. + +# Supporting the project + +If you use Beautiful Soup as part of your professional work, please consider a +[Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme). +This will support many of the free software projects your organization +depends on, not just Beautiful Soup. + +If you use Beautiful Soup for personal projects, the best way to say +thank you is to read +[Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I +wrote about what Beautiful Soup has taught me about software +development. + +# Building the documentation + +The bs4/doc/ directory contains full documentation in Sphinx +format. Run `make html` in that directory to create HTML +documentation. + +# Running the unit tests + +Beautiful Soup supports unit test discovery using Pytest: + +``` +$ pytest +``` + diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/RECORD b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/RECORD new file mode 100644 index 0000000..8a5523a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/RECORD @@ -0,0 +1,38 @@ +beautifulsoup4-4.14.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +beautifulsoup4-4.14.3.dist-info/METADATA,sha256=Ac93vA8Xp9FtgOcKXFM8ESfVdztimUfJ3WUpVlhKtsY,3812 +beautifulsoup4-4.14.3.dist-info/RECORD,, +beautifulsoup4-4.14.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +beautifulsoup4-4.14.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +beautifulsoup4-4.14.3.dist-info/licenses/AUTHORS,sha256=uYkjiRjh_aweRnF8tAW2PpJJeickE68NmJwd9siry28,2201 +beautifulsoup4-4.14.3.dist-info/licenses/LICENSE,sha256=VbTY1LHlvIbRDvrJG3TIe8t3UmsPW57a-LnNKtxzl7I,1441 +bs4/__init__.py,sha256=E7wiVp7oQK0JhdAYxpehZa8drv3W_sJv5oeTFiBfR5o,44386 +bs4/__pycache__/__init__.cpython-311.pyc,, +bs4/__pycache__/_deprecation.cpython-311.pyc,, +bs4/__pycache__/_typing.cpython-311.pyc,, +bs4/__pycache__/_warnings.cpython-311.pyc,, +bs4/__pycache__/css.cpython-311.pyc,, +bs4/__pycache__/dammit.cpython-311.pyc,, +bs4/__pycache__/diagnose.cpython-311.pyc,, +bs4/__pycache__/element.cpython-311.pyc,, +bs4/__pycache__/exceptions.cpython-311.pyc,, +bs4/__pycache__/filter.cpython-311.pyc,, +bs4/__pycache__/formatter.cpython-311.pyc,, +bs4/_deprecation.py,sha256=niHJCk37APg8KEuFOa57ZXaxLdBmc_2V6uuaJqu7r30,2408 +bs4/_typing.py,sha256=zNcx7R1yCTK8WwtumP28hc7CJ3pMyZXj_VAeYaNXMZA,7549 +bs4/_warnings.py,sha256=ZuOETgcnEbZgw2N0nnNXn6wvtrn2ut7AF0d98bvkMFc,4711 +bs4/builder/__init__.py,sha256=Rl4qjOXvdyyyjayOFqbkgoUoo81IgoyKD-RwWeVK59g,31194 +bs4/builder/__pycache__/__init__.cpython-311.pyc,, +bs4/builder/__pycache__/_html5lib.cpython-311.pyc,, +bs4/builder/__pycache__/_htmlparser.cpython-311.pyc,, +bs4/builder/__pycache__/_lxml.cpython-311.pyc,, +bs4/builder/_html5lib.py,sha256=hL6xUk4_I2i5CMguFoYFlrI26cY4Dut7fOEQrUctHIM,23607 +bs4/builder/_htmlparser.py,sha256=CnULPQV2rm4vLojJABpQ7Xm9diddnEZx2Wcz_VTC1Mg,17445 +bs4/builder/_lxml.py,sha256=ks1e8boA_nOA2oomAhxeudccR6ThbEE-EllFqHRoPLA,18969 +bs4/css.py,sha256=_m_l_4SGWHnY620VJ21j_qQH1RX3p91sYVemgKxaLsM,12713 +bs4/dammit.py,sha256=ZJWa9K32X6N2imFHleqUq0ekf592weU1lvULN_WYWYk,57024 +bs4/diagnose.py,sha256=at98iuxyOrqec4V8iwkTIbNUqBCsq9Lr3fDAQx2129Y,7846 +bs4/element.py,sha256=oXmj7LG_2NpsDK90mq73q0PMK0FjFBIGSeTTJLVwwTc,120237 +bs4/exceptions.py,sha256=Q9FOadNe8QRvzDMaKSXe2Wtl8JK_oAZW7mbFZBVP_GE,951 +bs4/filter.py,sha256=rw8ZNhTDLEJVCEiSifou5tZR_3zBLeuvAyouY82qU_E,29201 +bs4/formatter.py,sha256=uBT0k6W8O5kJ9PCuJYjra97yoUqC-dlM9D_v-oRM0r8,10478 +bs4/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/WHEEL new file mode 100644 index 0000000..12228d4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/licenses/AUTHORS b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/licenses/AUTHORS new file mode 100644 index 0000000..18926c2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/licenses/AUTHORS @@ -0,0 +1,49 @@ +Behold, mortal, the origins of Beautiful Soup... +================================================ + +Leonard Richardson is the primary maintainer. + +Aaron DeVore, Isaac Muse and Chris Papademetrious have made +significant contributions to the code base. + +Mark Pilgrim provided the encoding detection code that forms the base +of UnicodeDammit. + +Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful +Soup 4 working under Python 3. + +Simon Willison wrote soupselect, which was used to make Beautiful Soup +support CSS selectors. Isaac Muse wrote SoupSieve, which made it +possible to _remove_ the CSS selector code from Beautiful Soup. + +Sam Ruby helped with a lot of edge cases. + +Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his +work in solving the nestable tags conundrum. + +An incomplete list of people have contributed patches to Beautiful +Soup: + + Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew +Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy, +Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris +Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer, +Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan +Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", +Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano +Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Seppänen, +Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skyttä, +"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John +Wiseman, Paul Wright, Danny Yoo + +An incomplete list of people who made suggestions or found bugs or +found ways to break Beautiful Soup: + + Hanno Böck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel, + Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes, + Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams, + warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison, + Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed + Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart + Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de + Sousa Rocha, Yichun Wei, Per Vognsen diff --git a/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/licenses/LICENSE b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/licenses/LICENSE new file mode 100644 index 0000000..08e3a9c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/beautifulsoup4-4.14.3.dist-info/licenses/LICENSE @@ -0,0 +1,31 @@ +Beautiful Soup is made available under the MIT license: + + Copyright (c) Leonard Richardson + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +Beautiful Soup incorporates code from the html5lib library, which is +also made available under the MIT license. Copyright (c) James Graham +and other contributors + +Beautiful Soup has an optional dependency on the soupsieve library, +which is also made available under the MIT license. Copyright (c) +Isaac Muse diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/METADATA b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/METADATA new file mode 100644 index 0000000..d0a9db0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/METADATA @@ -0,0 +1,1610 @@ +Metadata-Version: 2.1 +Name: black +Version: 23.1.0 +Summary: The uncompromising code formatter. +Project-URL: Changelog, https://github.com/psf/black/blob/main/CHANGES.md +Project-URL: Homepage, https://github.com/psf/black +Author-email: Łukasz Langa +License: MIT +License-File: AUTHORS.md +License-File: LICENSE +Keywords: automation,autopep8,formatter,gofmt,pyfmt,rustfmt,yapf +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Quality Assurance +Requires-Python: >=3.7 +Requires-Dist: click>=8.0.0 +Requires-Dist: mypy-extensions>=0.4.3 +Requires-Dist: packaging>=22.0 +Requires-Dist: pathspec>=0.9.0 +Requires-Dist: platformdirs>=2 +Requires-Dist: tomli>=1.1.0; python_version < '3.11' +Requires-Dist: typed-ast>=1.4.2; python_version < '3.8' and implementation_name == 'cpython' +Requires-Dist: typing-extensions>=3.10.0.0; python_version < '3.10' +Provides-Extra: colorama +Requires-Dist: colorama>=0.4.3; extra == 'colorama' +Provides-Extra: d +Requires-Dist: aiohttp>=3.7.4; extra == 'd' +Provides-Extra: jupyter +Requires-Dist: ipython>=7.8.0; extra == 'jupyter' +Requires-Dist: tokenize-rt>=3.2.0; extra == 'jupyter' +Provides-Extra: uvloop +Requires-Dist: uvloop>=0.15.2; extra == 'uvloop' +Description-Content-Type: text/markdown + +[![Black Logo](https://raw.githubusercontent.com/psf/black/main/docs/_static/logo2-readme.png)](https://black.readthedocs.io/en/stable/) + +

    The Uncompromising Code Formatter

    + +

    +Actions Status +Documentation Status +Coverage Status +License: MIT +PyPI +Downloads +conda-forge +Code style: black +

    + +> “Any color you like.” + +_Black_ is the uncompromising Python code formatter. By using it, you agree to cede +control over minutiae of hand-formatting. In return, _Black_ gives you speed, +determinism, and freedom from `pycodestyle` nagging about formatting. You will save time +and mental energy for more important matters. + +Blackened code looks the same regardless of the project you're reading. Formatting +becomes transparent after a while and you can focus on the content instead. + +_Black_ makes code review faster by producing the smallest diffs possible. + +Try it out now using the [Black Playground](https://black.vercel.app). Watch the +[PyCon 2019 talk](https://youtu.be/esZLCuWs_2Y) to learn more. + +--- + +**[Read the documentation on ReadTheDocs!](https://black.readthedocs.io/en/stable)** + +--- + +## Installation and usage + +### Installation + +_Black_ can be installed by running `pip install black`. It requires Python 3.7+ to run. +If you want to format Jupyter Notebooks, install with `pip install "black[jupyter]"`. + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+https://github.com/psf/black` + +### Usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory} +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory} +``` + +Further information can be found in our docs: + +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) + +_Black_ is already [successfully used](https://github.com/psf/black#used-by) by many +projects, small and big. _Black_ has a comprehensive test suite, with efficient parallel +tests, and our own auto formatting and parallel Continuous Integration runner. Now that +we have become stable, you should not expect large formatting changes in the future. +Stylistic changes will mostly be responses to bug reports and support for new Python +syntax. For more information please refer to the +[The Black Code Style](https://black.readthedocs.io/en/stable/the_black_code_style/index.html). + +Also, as a safety measure which slows down processing, _Black_ will check that the +reformatted code still produces a valid AST that is effectively equivalent to the +original (see the +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#ast-before-and-after-formatting) +section for details). If you're feeling confident, use `--fast`. + +## The _Black_ code style + +_Black_ is a PEP 8 compliant opinionated formatter. _Black_ reformats entire files in +place. Style configuration options are deliberately limited and rarely added. It doesn't +take previous formatting into account (see +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) +for exceptions). + +Our documentation covers the current _Black_ code style, but planned changes to it are +also documented. They're both worth taking a look: + +- [The _Black_ Code Style: Current style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) +- [The _Black_ Code Style: Future style](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html) + +Changes to the _Black_ code style are bound by the Stability Policy: + +- [The _Black_ Code Style: Stability Policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy) + +Please refer to this document before submitting an issue. What seems like a bug might be +intended behaviour. + +### Pragmatism + +Early versions of _Black_ used to be absolutist in some respects. They took after its +initial author. This was fine at the time as it made the implementation simpler and +there were not many users anyway. Not many edge cases were reported. As a mature tool, +_Black_ does make some exceptions to rules it otherwise holds. + +- [The _Black_ code style: Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) + +Please refer to this document before submitting an issue just like with the document +above. What seems like a bug might be intended behaviour. + +## Configuration + +_Black_ is able to read project-specific default values for its command line options +from a `pyproject.toml` file. This is especially useful for specifying custom +`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your +project. + +You can find more details in our documentation: + +- [The basics: Configuration via a file](https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-via-a-file) + +And if you're looking for more general configuration documentation: + +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) + +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. Applying those defaults will have your +code in compliance with many other _Black_ formatted projects. + +## Used by + +The following notable open-source projects trust _Black_ with enforcing a consistent +code style: pytest, tox, Pyramid, Django, Django Channels, Hypothesis, attrs, +SQLAlchemy, Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), +pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant, +Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more. + +The following organizations use _Black_: Facebook, Dropbox, KeepTruckin, Mozilla, Quora, +Duolingo, QuantumBlack, Tesla, Archer Aviation. + +Are we missing anyone? Let us know. + +## Testimonials + +**Mike Bayer**, [author of `SQLAlchemy`](https://www.sqlalchemy.org/): + +> I can't think of any single tool in my entire programming career that has given me a +> bigger productivity increase by its introduction. I can now do refactorings in about +> 1% of the keystrokes that it would have taken me previously when we had no way for +> code to format itself. + +**Dusty Phillips**, +[writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): + +> _Black_ is opinionated so you don't have to be. + +**Hynek Schlawack**, [creator of `attrs`](https://www.attrs.org/), core developer of +Twisted and CPython: + +> An auto-formatter that doesn't suck is all I want for Xmas! + +**Carl Meyer**, [Django](https://www.djangoproject.com/) core developer: + +> At least the name is good. + +**Kenneth Reitz**, creator of [`requests`](https://requests.readthedocs.io/en/latest/) +and [`pipenv`](https://readthedocs.org/projects/pipenv/): + +> This vastly improves the formatting of our code. Thanks a ton! + +## Show your style + +Use the badge in your project's README.md: + +```md +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +``` + +Using the badge in README.rst: + +``` +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +``` + +Looks like this: +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +## License + +MIT + +## Contributing + +Welcome! Happy to see you willing to make the project better. You can get started by +reading this: + +- [Contributing: The basics](https://black.readthedocs.io/en/latest/contributing/the_basics.html) + +You can also take a look at the rest of the contributing docs or talk with the +developers: + +- [Contributing documentation](https://black.readthedocs.io/en/latest/contributing/index.html) +- [Chat on Discord](https://discord.gg/RtVdv86PrH) + +## Change log + +The log has become rather long. It moved to its own file. + +See [CHANGES](https://black.readthedocs.io/en/latest/change_log.html). + +## Authors + +The author list is quite long nowadays, so it lives in its own file. + +See [AUTHORS.md](./AUTHORS.md) + +## Code of Conduct + +Everyone participating in the _Black_ project, and in particular in the issue tracker, +pull requests, and social media activity, is expected to treat other people with respect +and more generally to follow the guidelines articulated in the +[Python Community Code of Conduct](https://www.python.org/psf/codeofconduct/). + +At the same time, humor is encouraged. In fact, basic familiarity with Monty Python's +Flying Circus is expected. We are not savages. + +And if you _really_ need to slap somebody, do it with a fish while dancing. +# Change Log + +## Unreleased + +### Highlights + + + +### Stable style + + + +### Preview style + + + +### Configuration + + + +### Packaging + + + +### Parser + + + +### Performance + + + +### Output + + + +### _Blackd_ + + + +### Integrations + + + +### Documentation + + + +## 23.1.0 + +### Highlights + +This is the first release of 2023, and following our +[stability policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy), +it comes with a number of improvements to our stable style, including improvements to +empty line handling, removal of redundant parentheses in several contexts, and output +that highlights implicitly concatenated strings better. + +There are also many changes to the preview style; try out `black --preview` and give us +feedback to help us set the stable style for next year. + +In addition to style changes, Black now automatically infers the supported Python +versions from your `pyproject.toml` file, removing the need to set Black's target +versions separately. + +### Stable style + + + +- Introduce the 2023 stable style, which incorporates most aspects of last year's + preview style (#3418). Specific changes: + - Enforce empty lines before classes and functions with sticky leading comments + (#3302) (22.12.0) + - Reformat empty and whitespace-only files as either an empty file (if no newline is + present) or as a single newline character (if a newline is present) (#3348) + (22.12.0) + - Implicitly concatenated strings used as function args are now wrapped inside + parentheses (#3307) (22.12.0) + - Correctly handle trailing commas that are inside a line's leading non-nested parens + (#3370) (22.12.0) + - `--skip-string-normalization` / `-S` now prevents docstring prefixes from being + normalized as expected (#3168) (since 22.8.0) + - When using `--skip-magic-trailing-comma` or `-C`, trailing commas are stripped from + subscript expressions with more than 1 element (#3209) (22.8.0) + - Implicitly concatenated strings inside a list, set, or tuple are now wrapped inside + parentheses (#3162) (22.8.0) + - Fix a string merging/split issue when a comment is present in the middle of + implicitly concatenated strings on its own line (#3227) (22.8.0) + - Docstring quotes are no longer moved if it would violate the line length limit + (#3044, #3430) (22.6.0) + - Parentheses around return annotations are now managed (#2990) (22.6.0) + - Remove unnecessary parentheses around awaited objects (#2991) (22.6.0) + - Remove unnecessary parentheses in `with` statements (#2926) (22.6.0) + - Remove trailing newlines after code block open (#3035) (22.6.0) + - Code cell separators `#%%` are now standardised to `# %%` (#2919) (22.3.0) + - Remove unnecessary parentheses from `except` statements (#2939) (22.3.0) + - Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) (22.3.0) + - Avoid magic-trailing-comma in single-element subscripts (#2942) (22.3.0) +- Fix a crash when a colon line is marked between `# fmt: off` and `# fmt: on` (#3439) + +### Preview style + + + +- Format hex codes in unicode escape sequences in string literals (#2916) +- Add parentheses around `if`-`else` expressions (#2278) +- Improve performance on large expressions that contain many strings (#3467) +- Fix a crash in preview style with assert + parenthesized string (#3415) +- Fix crashes in preview style with walrus operators used in function return annotations + and except clauses (#3423) +- Fix a crash in preview advanced string processing where mixed implicitly concatenated + regular and f-strings start with an empty span (#3463) +- Fix a crash in preview advanced string processing where a standalone comment is placed + before a dict's value (#3469) +- Fix an issue where extra empty lines are added when a decorator has `# fmt: skip` + applied or there is a standalone comment between decorators (#3470) +- Do not put the closing quotes in a docstring on a separate line, even if the line is + too long (#3430) +- Long values in dict literals are now wrapped in parentheses; correspondingly + unnecessary parentheses around short values in dict literals are now removed; long + string lambda values are now wrapped in parentheses (#3440) +- Fix two crashes in preview style involving edge cases with docstrings (#3451) +- Exclude string type annotations from improved string processing; fix crash when the + return type annotation is stringified and spans across multiple lines (#3462) +- Wrap multiple context managers in parentheses when targeting Python 3.9+ (#3489) +- Fix several crashes in preview style with walrus operators used in `with` statements + or tuples (#3473) +- Fix an invalid quote escaping bug in f-string expressions where it produced invalid + code. Implicitly concatenated f-strings with different quotes can now be merged or + quote-normalized by changing the quotes used in expressions. (#3509) +- Fix crash on `await (yield)` when Black is compiled with mypyc (#3533) + +### Configuration + + + +- Black now tries to infer its `--target-version` from the project metadata specified in + `pyproject.toml` (#3219) + +### Packaging + + + +- Upgrade mypyc from `0.971` to `0.991` so mypycified _Black_ can be built on armv7 + (#3380) + - This also fixes some crashes while using compiled Black with a debug build of + CPython +- Drop specific support for the `tomli` requirement on 3.11 alpha releases, working + around a bug that would cause the requirement not to be installed on any non-final + Python releases (#3448) +- Black now depends on `packaging` version `22.0` or later. This is required for new + functionality that needs to parse part of the project metadata (#3219) + +### Output + + + +- Calling `black --help` multiple times will return the same help contents each time + (#3516) +- Verbose logging now shows the values of `pyproject.toml` configuration variables + (#3392) +- Fix false symlink detection messages in verbose output due to using an incorrect + relative path to the project root (#3385) + +### Integrations + + + +- Move 3.11 CI to normal flow now that all dependencies support 3.11 (#3446) +- Docker: Add new `latest_prerelease` tag automation to follow latest black alpha + release on docker images (#3465) + +### Documentation + + + +- Expand `vim-plug` installation instructions to offer more explicit options (#3468) + +## 22.12.0 + +### Preview style + + + +- Enforce empty lines before classes and functions with sticky leading comments (#3302) +- Reformat empty and whitespace-only files as either an empty file (if no newline is + present) or as a single newline character (if a newline is present) (#3348) +- Implicitly concatenated strings used as function args are now wrapped inside + parentheses (#3307) +- For assignment statements, prefer splitting the right hand side if the left hand side + fits on a single line (#3368) +- Correctly handle trailing commas that are inside a line's leading non-nested parens + (#3370) + +### Configuration + + + +- Fix incorrectly applied `.gitignore` rules by considering the `.gitignore` location + and the relative path to the target file (#3338) +- Fix incorrectly ignoring `.gitignore` presence when more than one source directory is + specified (#3336) + +### Parser + + + +- Parsing support has been added for walruses inside generator expression that are + passed as function args (for example, + `any(match := my_re.match(text) for text in texts)`) (#3327). + +### Integrations + + + +- Vim plugin: Optionally allow using the system installation of Black via + `let g:black_use_virtualenv = 0`(#3309) + +## 22.10.0 + +### Highlights + +- Runtime support for Python 3.6 has been removed. Formatting 3.6 code will still be + supported until further notice. + +### Stable style + +- Fix a crash when `# fmt: on` is used on a different block level than `# fmt: off` + (#3281) + +### Preview style + +- Fix a crash when formatting some dicts with parenthesis-wrapped long string keys + (#3262) + +### Configuration + +- `.ipynb_checkpoints` directories are now excluded by default (#3293) +- Add `--skip-source-first-line` / `-x` option to ignore the first line of source code + while formatting (#3299) + +### Packaging + +- Executables made with PyInstaller will no longer crash when formatting several files + at once on macOS. Native x86-64 executables for macOS are available once again. + (#3275) +- Hatchling is now used as the build backend. This will not have any effect for users + who install Black with its wheels from PyPI. (#3233) +- Faster compiled wheels are now available for CPython 3.11 (#3276) + +### _Blackd_ + +- Windows style (CRLF) newlines will be preserved (#3257). + +### Integrations + +- Vim plugin: add flag (`g:black_preview`) to enable/disable the preview style (#3246) +- Update GitHub Action to support formatting of Jupyter Notebook files via a `jupyter` + option (#3282) +- Update GitHub Action to support use of version specifiers (e.g. `<23`) for Black + version (#3265) + +## 22.8.0 + +### Highlights + +- Python 3.11 is now supported, except for _blackd_ as aiohttp does not support 3.11 as + of publishing (#3234) +- This is the last release that supports running _Black_ on Python 3.6 (formatting 3.6 + code will continue to be supported until further notice) +- Reword the stability policy to say that we may, in rare cases, make changes that + affect code that was not previously formatted by _Black_ (#3155) + +### Stable style + +- Fix an infinite loop when using `# fmt: on/off` in the middle of an expression or code + block (#3158) +- Fix incorrect handling of `# fmt: skip` on colon (`:`) lines (#3148) +- Comments are no longer deleted when a line had spaces removed around power operators + (#2874) + +### Preview style + +- Single-character closing docstring quotes are no longer moved to their own line as + this is invalid. This was a bug introduced in version 22.6.0. (#3166) +- `--skip-string-normalization` / `-S` now prevents docstring prefixes from being + normalized as expected (#3168) +- When using `--skip-magic-trailing-comma` or `-C`, trailing commas are stripped from + subscript expressions with more than 1 element (#3209) +- Implicitly concatenated strings inside a list, set, or tuple are now wrapped inside + parentheses (#3162) +- Fix a string merging/split issue when a comment is present in the middle of implicitly + concatenated strings on its own line (#3227) + +### _Blackd_ + +- `blackd` now supports enabling the preview style via the `X-Preview` header (#3217) + +### Configuration + +- Black now uses the presence of debug f-strings to detect target version (#3215) +- Fix misdetection of project root and verbose logging of sources in cases involving + `--stdin-filename` (#3216) +- Immediate `.gitignore` files in source directories given on the command line are now + also respected, previously only `.gitignore` files in the project root and + automatically discovered directories were respected (#3237) + +### Documentation + +- Recommend using BlackConnect in IntelliJ IDEs (#3150) + +### Integrations + +- Vim plugin: prefix messages with `Black: ` so it's clear they come from Black (#3194) +- Docker: changed to a /opt/venv installation + added to PATH to be available to + non-root users (#3202) + +### Output + +- Change from deprecated `asyncio.get_event_loop()` to create our event loop which + removes DeprecationWarning (#3164) +- Remove logging from internal `blib2to3` library since it regularly emits error logs + about failed caching that can and should be ignored (#3193) + +### Parser + +- Type comments are now included in the AST equivalence check consistently so accidental + deletion raises an error. Though type comments can't be tracked when running on PyPy + 3.7 due to standard library limitations. (#2874) + +### Performance + +- Reduce Black's startup time when formatting a single file by 15-30% (#3211) + +## 22.6.0 + +### Style + +- Fix unstable formatting involving `#fmt: skip` and `# fmt:skip` comments (notice the + lack of spaces) (#2970) + +### Preview style + +- Docstring quotes are no longer moved if it would violate the line length limit (#3044) +- Parentheses around return annotations are now managed (#2990) +- Remove unnecessary parentheses around awaited objects (#2991) +- Remove unnecessary parentheses in `with` statements (#2926) +- Remove trailing newlines after code block open (#3035) + +### Integrations + +- Add `scripts/migrate-black.py` script to ease introduction of Black to a Git project + (#3038) + +### Output + +- Output Python version and implementation as part of `--version` flag (#2997) + +### Packaging + +- Use `tomli` instead of `tomllib` on Python 3.11 builds where `tomllib` is not + available (#2987) + +### Parser + +- [PEP 654](https://peps.python.org/pep-0654/#except) syntax (for example, + `except *ExceptionGroup:`) is now supported (#3016) +- [PEP 646](https://peps.python.org/pep-0646) syntax (for example, + `Array[Batch, *Shape]` or `def fn(*args: *T) -> None`) is now supported (#3071) + +### Vim Plugin + +- Fix `strtobool` function. It didn't parse true/on/false/off. (#3025) + +## 22.3.0 + +### Preview style + +- Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Remove unnecessary parentheses from `except` statements (#2939) +- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) +- Avoid magic-trailing-comma in single-element subscripts (#2942) + +### Configuration + +- Do not format `__pypackages__` directories by default (#2836) +- Add support for specifying stable version with `--required-version` (#2832). +- Avoid crashing when the user has no homedir (#2814) +- Avoid crashing when md5 is not available (#2905) +- Fix handling of directory junctions on Windows (#2904) + +### Documentation + +- Update pylint config documentation (#2931) + +### Integrations + +- Move test to disable plugin in Vim/Neovim, which speeds up loading (#2896) + +### Output + +- In verbose mode, log when _Black_ is using user-level config (#2861) + +### Packaging + +- Fix Black to work with Click 8.1.0 (#2966) +- On Python 3.11 and newer, use the standard library's `tomllib` instead of `tomli` + (#2903) +- `black-primer`, the deprecated internal devtool, has been removed and copied to a + [separate repository](https://github.com/cooperlees/black-primer) (#2924) + +### Parser + +- Black can now parse starred expressions in the target of `for` and `async for` + statements, e.g `for item in *items_1, *items_2: pass` (#2879). + +## 22.1.0 + +At long last, _Black_ is no longer a beta product! This is the first non-beta release +and the first release covered by our new +[stability policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy). + +### Highlights + +- **Remove Python 2 support** (#2740) +- Introduce the `--preview` flag (#2752) + +### Style + +- Deprecate `--experimental-string-processing` and move the functionality under + `--preview` (#2789) +- For stubs, one blank line between class attributes and methods is now kept if there's + at least one pre-existing blank line (#2736) +- Black now normalizes string prefix order (#2297) +- Remove spaces around power operators if both operands are simple (#2726) +- Work around bug that causes unstable formatting in some cases in the presence of the + magic trailing comma (#2807) +- Use parentheses for attribute access on decimal float and int literals (#2799) +- Don't add whitespace for attribute access on hexadecimal, binary, octal, and complex + literals (#2799) +- Treat blank lines in stubs the same inside top-level `if` statements (#2820) +- Fix unstable formatting with semicolons and arithmetic expressions (#2817) +- Fix unstable formatting around magic trailing comma (#2572) + +### Parser + +- Fix mapping cases that contain as-expressions, like `case {"key": 1 | 2 as password}` + (#2686) +- Fix cases that contain multiple top-level as-expressions, like `case 1 as a, 2 as b` + (#2716) +- Fix call patterns that contain as-expressions with keyword arguments, like + `case Foo(bar=baz as quux)` (#2749) +- Tuple unpacking on `return` and `yield` constructs now implies 3.8+ (#2700) +- Unparenthesized tuples on annotated assignments (e.g + `values: Tuple[int, ...] = 1, 2, 3`) now implies 3.8+ (#2708) +- Fix handling of standalone `match()` or `case()` when there is a trailing newline or a + comment inside of the parentheses. (#2760) +- `from __future__ import annotations` statement now implies Python 3.7+ (#2690) + +### Performance + +- Speed-up the new backtracking parser about 4X in general (enabled when + `--target-version` is set to 3.10 and higher). (#2728) +- _Black_ is now compiled with [mypyc](https://github.com/mypyc/mypyc) for an overall 2x + speed-up. 64-bit Windows, MacOS, and Linux (not including musl) are supported. (#1009, + #2431) + +### Configuration + +- Do not accept bare carriage return line endings in pyproject.toml (#2408) +- Add configuration option (`python-cell-magics`) to format cells with custom magics in + Jupyter Notebooks (#2744) +- Allow setting custom cache directory on all platforms with environment variable + `BLACK_CACHE_DIR` (#2739). +- Enable Python 3.10+ by default, without any extra need to specify + `--target-version=py310`. (#2758) +- Make passing `SRC` or `--code` mandatory and mutually exclusive (#2804) + +### Output + +- Improve error message for invalid regular expression (#2678) +- Improve error message when parsing fails during AST safety check by embedding the + underlying SyntaxError (#2693) +- No longer color diff headers white as it's unreadable in light themed terminals + (#2691) +- Text coloring added in the final statistics (#2712) +- Verbose mode also now describes how a project root was discovered and which paths will + be formatted. (#2526) + +### Packaging + +- All upper version bounds on dependencies have been removed (#2718) +- `typing-extensions` is no longer a required dependency in Python 3.10+ (#2772) +- Set `click` lower bound to `8.0.0` (#2791) + +### Integrations + +- Update GitHub action to support containerized runs (#2748) + +### Documentation + +- Change protocol in pip installation instructions to `https://` (#2761) +- Change HTML theme to Furo primarily for its responsive design and mobile support + (#2793) +- Deprecate the `black-primer` tool (#2809) +- Document Python support policy (#2819) + +## 21.12b0 + +### _Black_ + +- Fix determination of f-string expression spans (#2654) +- Fix bad formatting of error messages about EOF in multi-line statements (#2343) +- Functions and classes in blocks now have more consistent surrounding spacing (#2472) + +#### Jupyter Notebook support + +- Cell magics are now only processed if they are known Python cell magics. Earlier, all + cell magics were tokenized, leading to possible indentation errors e.g. with + `%%writefile`. (#2630) +- Fix assignment to environment variables in Jupyter Notebooks (#2642) + +#### Python 3.10 support + +- Point users to using `--target-version py310` if we detect 3.10-only syntax (#2668) +- Fix `match` statements with open sequence subjects, like `match a, b:` or + `match a, *b:` (#2639) (#2659) +- Fix `match`/`case` statements that contain `match`/`case` soft keywords multiple + times, like `match re.match()` (#2661) +- Fix `case` statements with an inline body (#2665) +- Fix styling of starred expressions inside `match` subject (#2667) +- Fix parser error location on invalid syntax in a `match` statement (#2649) +- Fix Python 3.10 support on platforms without ProcessPoolExecutor (#2631) +- Improve parsing performance on code that uses `match` under `--target-version py310` + up to ~50% (#2670) + +### Packaging + +- Remove dependency on `regex` (#2644) (#2663) + +## 21.11b1 + +### _Black_ + +- Bumped regex version minimum to 2021.4.4 to fix Pattern class usage (#2621) + +## 21.11b0 + +### _Black_ + +- Warn about Python 2 deprecation in more cases by improving Python 2 only syntax + detection (#2592) +- Add experimental PyPy support (#2559) +- Add partial support for the match statement. As it's experimental, it's only enabled + when `--target-version py310` is explicitly specified (#2586) +- Add support for parenthesized with (#2586) +- Declare support for Python 3.10 for running Black (#2562) + +### Integrations + +- Fixed vim plugin with Python 3.10 by removing deprecated distutils import (#2610) +- The vim plugin now parses `skip_magic_trailing_comma` from pyproject.toml (#2613) + +## 21.10b0 + +### _Black_ + +- Document stability policy, that will apply for non-beta releases (#2529) +- Add new `--workers` parameter (#2514) +- Fixed feature detection for positional-only arguments in lambdas (#2532) +- Bumped typed-ast version minimum to 1.4.3 for 3.10 compatibility (#2519) +- Fixed a Python 3.10 compatibility issue where the loop argument was still being passed + even though it has been removed (#2580) +- Deprecate Python 2 formatting support (#2523) + +### _Blackd_ + +- Remove dependency on aiohttp-cors (#2500) +- Bump required aiohttp version to 3.7.4 (#2509) + +### _Black-Primer_ + +- Add primer support for --projects (#2555) +- Print primer summary after individual failures (#2570) + +### Integrations + +- Allow to pass `target_version` in the vim plugin (#1319) +- Install build tools in docker file and use multi-stage build to keep the image size + down (#2582) + +## 21.9b0 + +### Packaging + +- Fix missing modules in self-contained binaries (#2466) +- Fix missing toml extra used during installation (#2475) + +## 21.8b0 + +### _Black_ + +- Add support for formatting Jupyter Notebook files (#2357) +- Move from `appdirs` dependency to `platformdirs` (#2375) +- Present a more user-friendly error if .gitignore is invalid (#2414) +- The failsafe for accidentally added backslashes in f-string expressions has been + hardened to handle more edge cases during quote normalization (#2437) +- Avoid changing a function return type annotation's type to a tuple by adding a + trailing comma (#2384) +- Parsing support has been added for unparenthesized walruses in set literals, set + comprehensions, and indices (#2447). +- Pin `setuptools-scm` build-time dependency version (#2457) +- Exclude typing-extensions version 3.10.0.1 due to it being broken on Python 3.10 + (#2460) + +### _Blackd_ + +- Replace sys.exit(-1) with raise ImportError as it plays more nicely with tools that + scan installed packages (#2440) + +### Integrations + +- The provided pre-commit hooks no longer specify `language_version` to avoid overriding + `default_language_version` (#2430) + +## 21.7b0 + +### _Black_ + +- Configuration files using TOML features higher than spec v0.5.0 are now supported + (#2301) +- Add primer support and test for code piped into black via STDIN (#2315) +- Fix internal error when `FORCE_OPTIONAL_PARENTHESES` feature is enabled (#2332) +- Accept empty stdin (#2346) +- Provide a more useful error when parsing fails during AST safety checks (#2304) + +### Docker + +- Add new `latest_release` tag automation to follow latest black release on docker + images (#2374) + +### Integrations + +- The vim plugin now searches upwards from the directory containing the current buffer + instead of the current working directory for pyproject.toml. (#1871) +- The vim plugin now reads the correct string normalization option in pyproject.toml + (#1869) +- The vim plugin no longer crashes Black when there's boolean values in pyproject.toml + (#1869) + +## 21.6b0 + +### _Black_ + +- Fix failure caused by `fmt: skip` and indentation (#2281) +- Account for += assignment when deciding whether to split string (#2312) +- Correct max string length calculation when there are string operators (#2292) +- Fixed option usage when using the `--code` flag (#2259) +- Do not call `uvloop.install()` when _Black_ is used as a library (#2303) +- Added `--required-version` option to require a specific version to be running (#2300) +- Fix incorrect custom breakpoint indices when string group contains fake f-strings + (#2311) +- Fix regression where `R` prefixes would be lowercased for docstrings (#2285) +- Fix handling of named escapes (`\N{...}`) when `--experimental-string-processing` is + used (#2319) + +### Integrations + +- The official Black action now supports choosing what version to use, and supports the + major 3 OSes. (#1940) + +## 21.5b2 + +### _Black_ + +- A space is no longer inserted into empty docstrings (#2249) +- Fix handling of .gitignore files containing non-ASCII characters on Windows (#2229) +- Respect `.gitignore` files in all levels, not only `root/.gitignore` file (apply + `.gitignore` rules like `git` does) (#2225) +- Restored compatibility with Click 8.0 on Python 3.6 when LANG=C used (#2227) +- Add extra uvloop install + import support if in python env (#2258) +- Fix --experimental-string-processing crash when matching parens are not found (#2283) +- Make sure to split lines that start with a string operator (#2286) +- Fix regular expression that black uses to identify f-expressions (#2287) + +### _Blackd_ + +- Add a lower bound for the `aiohttp-cors` dependency. Only 0.4.0 or higher is + supported. (#2231) + +### Packaging + +- Release self-contained x86_64 MacOS binaries as part of the GitHub release pipeline + (#2198) +- Always build binaries with the latest available Python (#2260) + +### Documentation + +- Add discussion of magic comments to FAQ page (#2272) +- `--experimental-string-processing` will be enabled by default in the future (#2273) +- Fix typos discovered by codespell (#2228) +- Fix Vim plugin installation instructions. (#2235) +- Add new Frequently Asked Questions page (#2247) +- Fix encoding + symlink issues preventing proper build on Windows (#2262) + +## 21.5b1 + +### _Black_ + +- Refactor `src/black/__init__.py` into many files (#2206) + +### Documentation + +- Replaced all remaining references to the + [`master`](https://github.com/psf/black/tree/main) branch with the + [`main`](https://github.com/psf/black/tree/main) branch. Some additional changes in + the source code were also made. (#2210) +- Sigificantly reorganized the documentation to make much more sense. Check them out by + heading over to [the stable docs on RTD](https://black.readthedocs.io/en/stable/). + (#2174) + +## 21.5b0 + +### _Black_ + +- Set `--pyi` mode if `--stdin-filename` ends in `.pyi` (#2169) +- Stop detecting target version as Python 3.9+ with pre-PEP-614 decorators that are + being called but with no arguments (#2182) + +### _Black-Primer_ + +- Add `--no-diff` to black-primer to suppress formatting changes (#2187) + +## 21.4b2 + +### _Black_ + +- Fix crash if the user configuration directory is inaccessible. (#2158) + +- Clarify + [circumstances](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) + in which _Black_ may change the AST (#2159) + +- Allow `.gitignore` rules to be overridden by specifying `exclude` in `pyproject.toml` + or on the command line. (#2170) + +### _Packaging_ + +- Install `primer.json` (used by `black-primer` by default) with black. (#2154) + +## 21.4b1 + +### _Black_ + +- Fix crash on docstrings ending with "\\ ". (#2142) + +- Fix crash when atypical whitespace is cleaned out of dostrings (#2120) + +- Reflect the `--skip-magic-trailing-comma` and `--experimental-string-processing` flags + in the name of the cache file. Without this fix, changes in these flags would not take + effect if the cache had already been populated. (#2131) + +- Don't remove necessary parentheses from assignment expression containing assert / + return statements. (#2143) + +### _Packaging_ + +- Bump pathspec to >= 0.8.1 to solve invalid .gitignore exclusion handling + +## 21.4b0 + +### _Black_ + +- Fixed a rare but annoying formatting instability created by the combination of + optional trailing commas inserted by `Black` and optional parentheses looking at + pre-existing "magic" trailing commas. This fixes issue #1629 and all of its many many + duplicates. (#2126) + +- `Black` now processes one-line docstrings by stripping leading and trailing spaces, + and adding a padding space when needed to break up """". (#1740) + +- `Black` now cleans up leading non-breaking spaces in comments (#2092) + +- `Black` now respects `--skip-string-normalization` when normalizing multiline + docstring quotes (#1637) + +- `Black` no longer removes all empty lines between non-function code and decorators + when formatting typing stubs. Now `Black` enforces a single empty line. (#1646) + +- `Black` no longer adds an incorrect space after a parenthesized assignment expression + in if/while statements (#1655) + +- Added `--skip-magic-trailing-comma` / `-C` to avoid using trailing commas as a reason + to split lines (#1824) + +- fixed a crash when PWD=/ on POSIX (#1631) + +- fixed "I/O operation on closed file" when using --diff (#1664) + +- Prevent coloured diff output being interleaved with multiple files (#1673) + +- Added support for PEP 614 relaxed decorator syntax on python 3.9 (#1711) + +- Added parsing support for unparenthesized tuples and yield expressions in annotated + assignments (#1835) + +- added `--extend-exclude` argument (PR #2005) + +- speed up caching by avoiding pathlib (#1950) + +- `--diff` correctly indicates when a file doesn't end in a newline (#1662) + +- Added `--stdin-filename` argument to allow stdin to respect `--force-exclude` rules + (#1780) + +- Lines ending with `fmt: skip` will now be not formatted (#1800) + +- PR #2053: Black no longer relies on typed-ast for Python 3.8 and higher + +- PR #2053: Python 2 support is now optional, install with + `python3 -m pip install black[python2]` to maintain support. + +- Exclude `venv` directory by default (#1683) + +- Fixed "Black produced code that is not equivalent to the source" when formatting + Python 2 docstrings (#2037) + +### _Packaging_ + +- Self-contained native _Black_ binaries are now provided for releases via GitHub + Releases (#1743) + +## 20.8b1 + +### _Packaging_ + +- explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions + older than 7.0 + +## 20.8b0 + +### _Black_ + +- re-implemented support for explicit trailing commas: now it works consistently within + any bracket pair, including nested structures (#1288 and duplicates) + +- `Black` now reindents docstrings when reindenting code around it (#1053) + +- `Black` now shows colored diffs (#1266) + +- `Black` is now packaged using 'py3' tagged wheels (#1388) + +- `Black` now supports Python 3.8 code, e.g. star expressions in return statements + (#1121) + +- `Black` no longer normalizes capital R-string prefixes as those have a + community-accepted meaning (#1244) + +- `Black` now uses exit code 2 when specified configuration file doesn't exit (#1361) + +- `Black` now works on AWS Lambda (#1141) + +- added `--force-exclude` argument (#1032) + +- removed deprecated `--py36` option (#1236) + +- fixed `--diff` output when EOF is encountered (#526) + +- fixed `# fmt: off` handling around decorators (#560) + +- fixed unstable formatting with some `# type: ignore` comments (#1113) + +- fixed invalid removal on organizing brackets followed by indexing (#1575) + +- introduced `black-primer`, a CI tool that allows us to run regression tests against + existing open source users of Black (#1402) + +- introduced property-based fuzzing to our test suite based on Hypothesis and + Hypothersmith (#1566) + +- implemented experimental and disabled by default long string rewrapping (#1132), + hidden under a `--experimental-string-processing` flag while it's being worked on; + this is an undocumented and unsupported feature, you lose Internet points for + depending on it (#1609) + +### Vim plugin + +- prefer virtualenv packages over global packages (#1383) + +## 19.10b0 + +- added support for PEP 572 assignment expressions (#711) + +- added support for PEP 570 positional-only arguments (#943) + +- added support for async generators (#593) + +- added support for pre-splitting collections by putting an explicit trailing comma + inside (#826) + +- added `black -c` as a way to format code passed from the command line (#761) + +- --safe now works with Python 2 code (#840) + +- fixed grammar selection for Python 2-specific code (#765) + +- fixed feature detection for trailing commas in function definitions and call sites + (#763) + +- `# fmt: off`/`# fmt: on` comment pairs placed multiple times within the same block of + code now behave correctly (#1005) + +- _Black_ no longer crashes on Windows machines with more than 61 cores (#838) + +- _Black_ no longer crashes on standalone comments prepended with a backslash (#767) + +- _Black_ no longer crashes on `from` ... `import` blocks with comments (#829) + +- _Black_ no longer crashes on Python 3.7 on some platform configurations (#494) + +- _Black_ no longer fails on comments in from-imports (#671) + +- _Black_ no longer fails when the file starts with a backslash (#922) + +- _Black_ no longer merges regular comments with type comments (#1027) + +- _Black_ no longer splits long lines that contain type comments (#997) + +- removed unnecessary parentheses around `yield` expressions (#834) + +- added parentheses around long tuples in unpacking assignments (#832) + +- added parentheses around complex powers when they are prefixed by a unary operator + (#646) + +- fixed bug that led _Black_ format some code with a line length target of 1 (#762) + +- _Black_ no longer introduces quotes in f-string subexpressions on string boundaries + (#863) + +- if _Black_ puts parenthesis around a single expression, it moves comments to the + wrapped expression instead of after the brackets (#872) + +- `blackd` now returns the version of _Black_ in the response headers (#1013) + +- `blackd` can now output the diff of formats on source code when the `X-Diff` header is + provided (#969) + +## 19.3b0 + +- new option `--target-version` to control which Python versions _Black_-formatted code + should target (#618) + +- deprecated `--py36` (use `--target-version=py36` instead) (#724) + +- _Black_ no longer normalizes numeric literals to include `_` separators (#696) + +- long `del` statements are now split into multiple lines (#698) + +- type comments are no longer mangled in function signatures + +- improved performance of formatting deeply nested data structures (#509) + +- _Black_ now properly formats multiple files in parallel on Windows (#632) + +- _Black_ now creates cache files atomically which allows it to be used in parallel + pipelines (like `xargs -P8`) (#673) + +- _Black_ now correctly indents comments in files that were previously formatted with + tabs (#262) + +- `blackd` now supports CORS (#622) + +## 18.9b0 + +- numeric literals are now formatted by _Black_ (#452, #461, #464, #469): + + - numeric literals are normalized to include `_` separators on Python 3.6+ code + + - added `--skip-numeric-underscore-normalization` to disable the above behavior and + leave numeric underscores as they were in the input + + - code with `_` in numeric literals is recognized as Python 3.6+ + + - most letters in numeric literals are lowercased (e.g., in `1e10`, `0x01`) + + - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) + +- added `blackd`, see + [its documentation](https://github.com/psf/black/blob/18.9b0/README.md#blackd) for + more info (#349) + +- adjacent string literals are now correctly split into multiple lines (#463) + +- trailing comma is now added to single imports that don't fit on a line (#250) + +- cache is now populated when `--check` is successful for a file which speeds up + consecutive checks of properly formatted unmodified files (#448) + +- whitespace at the beginning of the file is now removed (#399) + +- fixed mangling [pweave](http://mpastell.com/pweave/) and + [Spyder IDE](https://www.spyder-ide.org/) special comments (#532) + +- fixed unstable formatting when unpacking big tuples (#267) + +- fixed parsing of `__future__` imports with renames (#389) + +- fixed scope of `# fmt: off` when directly preceding `yield` and other nodes (#385) + +- fixed formatting of lambda expressions with default arguments (#468) + +- fixed `async for` statements: _Black_ no longer breaks them into separate lines (#372) + +- note: the Vim plugin stopped registering `,=` as a default chord as it turned out to + be a bad idea (#415) + +## 18.6b4 + +- hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) + +## 18.6b3 + +- typing stub files (`.pyi`) now have blank lines added after constants (#340) + +- `# fmt: off` and `# fmt: on` are now much more dependable: + + - they now work also within bracket pairs (#329) + + - they now correctly work across function/class boundaries (#335) + + - they now work when an indentation block starts with empty lines or misaligned + comments (#334) + +- made Click not fail on invalid environments; note that Click is right but the + likelihood we'll need to access non-ASCII file paths when dealing with Python source + code is low (#277) + +- fixed improper formatting of f-strings with quotes inside interpolated expressions + (#322) + +- fixed unnecessary slowdown when long list literals where found in a file + +- fixed unnecessary slowdown on AST nodes with very many siblings + +- fixed cannibalizing backslashes during string normalization + +- fixed a crash due to symbolic links pointing outside of the project directory (#338) + +## 18.6b2 + +- added `--config` (#65) + +- added `-h` equivalent to `--help` (#316) + +- fixed improper unmodified file caching when `-S` was used + +- fixed extra space in string unpacking (#305) + +- fixed formatting of empty triple quoted strings (#313) + +- fixed unnecessary slowdown in comment placement calculation on lines without comments + +## 18.6b1 + +- hotfix: don't output human-facing information on stdout (#299) + +- hotfix: don't output cake emoji on non-zero return code (#300) + +## 18.6b0 + +- added `--include` and `--exclude` (#270) + +- added `--skip-string-normalization` (#118) + +- added `--verbose` (#283) + +- the header output in `--diff` now actually conforms to the unified diff spec + +- fixed long trivial assignments being wrapped in unnecessary parentheses (#273) + +- fixed unnecessary parentheses when a line contained multiline strings (#232) + +- fixed stdin handling not working correctly if an old version of Click was used (#276) + +- _Black_ now preserves line endings when formatting a file in place (#258) + +## 18.5b1 + +- added `--pyi` (#249) + +- added `--py36` (#249) + +- Python grammar pickle caches are stored with the formatting caches, making _Black_ + work in environments where site-packages is not user-writable (#192) + +- _Black_ now enforces a PEP 257 empty line after a class-level docstring (and/or + fields) and the first method + +- fixed invalid code produced when standalone comments were present in a trailer that + was omitted from line splitting on a large expression (#237) + +- fixed optional parentheses being removed within `# fmt: off` sections (#224) + +- fixed invalid code produced when stars in very long imports were incorrectly wrapped + in optional parentheses (#234) + +- fixed unstable formatting when inline comments were moved around in a trailer that was + omitted from line splitting on a large expression (#238) + +- fixed extra empty line between a class declaration and the first method if no class + docstring or fields are present (#219) + +- fixed extra empty line between a function signature and an inner function or inner + class (#196) + +## 18.5b0 + +- call chains are now formatted according to the + [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) + +- data structure literals (tuples, lists, dictionaries, and sets) are now also always + exploded like imports when they don't fit in a single line (#152) + +- slices are now formatted according to PEP 8 (#178) + +- parentheses are now also managed automatically on the right-hand side of assignments + and return statements (#140) + +- math operators now use their respective priorities for delimiting multiline + expressions (#148) + +- optional parentheses are now omitted on expressions that start or end with a bracket + and only contain a single operator (#177) + +- empty parentheses in a class definition are now removed (#145, #180) + +- string prefixes are now standardized to lowercase and `u` is removed on Python 3.6+ + only code and Python 2.7+ code with the `unicode_literals` future import (#188, #198, + #199) + +- typing stub files (`.pyi`) are now formatted in a style that is consistent with PEP + 484 (#207, #210) + +- progress when reformatting many files is now reported incrementally + +- fixed trailers (content with brackets) being unnecessarily exploded into their own + lines after a dedented closing bracket (#119) + +- fixed an invalid trailing comma sometimes left in imports (#185) + +- fixed non-deterministic formatting when multiple pairs of removable parentheses were + used (#183) + +- fixed multiline strings being unnecessarily wrapped in optional parentheses in long + assignments (#215) + +- fixed not splitting long from-imports with only a single name + +- fixed Python 3.6+ file discovery by also looking at function calls with unpacking. + This fixed non-deterministic formatting if trailing commas where used both in function + signatures with stars and function calls with stars but the former would be + reformatted to a single line. + +- fixed crash on dealing with optional parentheses (#193) + +- fixed "is", "is not", "in", and "not in" not considered operators for splitting + purposes + +- fixed crash when dead symlinks where encountered + +## 18.4a4 + +- don't populate the cache on `--check` (#175) + +## 18.4a3 + +- added a "cache"; files already reformatted that haven't changed on disk won't be + reformatted again (#109) + +- `--check` and `--diff` are no longer mutually exclusive (#149) + +- generalized star expression handling, including double stars; this fixes + multiplication making expressions "unsafe" for trailing commas (#132) + +- _Black_ no longer enforces putting empty lines behind control flow statements (#90) + +- _Black_ now splits imports like "Mode 3 + trailing comma" of isort (#127) + +- fixed comment indentation when a standalone comment closes a block (#16, #32) + +- fixed standalone comments receiving extra empty lines if immediately preceding a + class, def, or decorator (#56, #154) + +- fixed `--diff` not showing entire path (#130) + +- fixed parsing of complex expressions after star and double stars in function calls + (#2) + +- fixed invalid splitting on comma in lambda arguments (#133) + +- fixed missing splits of ternary expressions (#141) + +## 18.4a2 + +- fixed parsing of unaligned standalone comments (#99, #112) + +- fixed placement of dictionary unpacking inside dictionary literals (#111) + +- Vim plugin now works on Windows, too + +- fixed unstable formatting when encountering unnecessarily escaped quotes in a string + (#120) + +## 18.4a1 + +- added `--quiet` (#78) + +- added automatic parentheses management (#4) + +- added [pre-commit](https://pre-commit.com) integration (#103, #104) + +- fixed reporting on `--check` with multiple files (#101, #102) + +- fixed removing backslash escapes from raw strings (#100, #105) + +## 18.4a0 + +- added `--diff` (#87) + +- add line breaks before all delimiters, except in cases like commas, to better comply + with PEP 8 (#73) + +- standardize string literals to use double quotes (almost) everywhere (#75) + +- fixed handling of standalone comments within nested bracketed expressions; _Black_ + will no longer produce super long lines or put all standalone comments at the end of + the expression (#22) + +- fixed 18.3a4 regression: don't crash and burn on empty lines with trailing whitespace + (#80) + +- fixed 18.3a4 regression: `# yapf: disable` usage as trailing comment would cause + _Black_ to not emit the rest of the file (#95) + +- when CTRL+C is pressed while formatting many files, _Black_ no longer freaks out with + a flurry of asyncio-related exceptions + +- only allow up to two empty lines on module level and only single empty lines within + functions (#74) + +## 18.3a4 + +- `# fmt: off` and `# fmt: on` are implemented (#5) + +- automatic detection of deprecated Python 2 forms of print statements and exec + statements in the formatted file (#49) + +- use proper spaces for complex expressions in default values of typed function + arguments (#60) + +- only return exit code 1 when --check is used (#50) + +- don't remove single trailing commas from square bracket indexing (#59) + +- don't omit whitespace if the previous factor leaf wasn't a math operator (#55) + +- omit extra space in kwarg unpacking if it's the first argument (#46) + +- omit extra space in + [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) + (#68) + +## 18.3a3 + +- don't remove single empty lines outside of bracketed expressions (#19) + +- added ability to pipe formatting from stdin to stdin (#25) + +- restored ability to format code with legacy usage of `async` as a name (#20, #42) + +- even better handling of numpy-style array indexing (#33, again) + +## 18.3a2 + +- changed positioning of binary operators to occur at beginning of lines instead of at + the end, following + [a recent change to PEP 8](https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b) + (#21) + +- ignore empty bracket pairs while splitting. This avoids very weirdly looking + formattings (#34, #35) + +- remove a trailing comma if there is a single argument to a call + +- if top level functions were separated by a comment, don't put four empty lines after + the upper function + +- fixed unstable formatting of newlines with imports + +- fixed unintentional folding of post scriptum standalone comments into last statement + if it was a simple statement (#18, #28) + +- fixed missing space in numpy-style array indexing (#33) + +- fixed spurious space after star-based unary expressions (#31) + +## 18.3a1 + +- added `--check` + +- only put trailing commas in function signatures and calls if it's safe to do so. If + the file is Python 3.6+ it's always safe, otherwise only safe if there are no `*args` + or `**kwargs` used in the signature or call. (#8) + +- fixed invalid spacing of dots in relative imports (#6, #13) + +- fixed invalid splitting after comma on unpacked variables in for-loops (#23) + +- fixed spurious space in parenthesized set expressions (#7) + +- fixed spurious space after opening parentheses and in default arguments (#14, #17) + +- fixed spurious space after unary operators when the operand was a complex expression + (#15) + +## 18.3a0 + +- first published version, Happy 🍰 Day 2018! + +- alpha quality + +- date-versioned (see: ) diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/RECORD b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/RECORD new file mode 100644 index 0000000..8dc945c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/RECORD @@ -0,0 +1,115 @@ +../../../bin/black,sha256=ggfgPyn_h81r1xTQ012MSxX8NcORQDVYvKztN2UJNEE,283 +../../../bin/blackd,sha256=W1rXuPgZQ8C1LSDpRh7Rnn916KEsfpd58-0egZsCvc8,284 +6b397dd64e00b5aff23d__mypyc.cpython-311-x86_64-linux-gnu.so,sha256=GQNLD6JN-qViU3ck3iGRMCewCjbO2k9ehTncOh4tK70,3993856 +__pycache__/_black_version.cpython-311.pyc,, +_black_version.py,sha256=u3_onRL-3_Ow8zYRCCNEH6AuMORqTwvKRYCtQKLUBfU,19 +black-23.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +black-23.1.0.dist-info/METADATA,sha256=-pB22iz9v6UAIzfgjxUSvVLpx6uiuYIsqBh3EFD9g7c,58959 +black-23.1.0.dist-info/RECORD,, +black-23.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +black-23.1.0.dist-info/WHEEL,sha256=EH5KzJshMQk4ddij7sGfE9Nvw8pfffB7c61DapoFw10,148 +black-23.1.0.dist-info/entry_points.txt,sha256=qBIyywHwGRkJj7kieq86kqf77rz3qGC4Joj36lHnxwc,78 +black-23.1.0.dist-info/licenses/AUTHORS.md,sha256=cpHlH2nYyIXWEnA0LccoGenN-g5HZ7341klZ7MwbdIU,8043 +black-23.1.0.dist-info/licenses/LICENSE,sha256=nAQo8MO0d5hQz1vZbhGqqK_HLUqG1KNiI9erouWNbgA,1080 +black/__init__.cpython-311-x86_64-linux-gnu.so,sha256=VhyeZgW4aZsL5lCmCJxMOjtNe70NGL8wNcdVnc1fADE,8248 +black/__init__.py,sha256=VWazgq80l38r1Cn5Ea7fH5bMuDdFuQJtYgBKGWJganI,46682 +black/__main__.py,sha256=mogeA4o9zt4w-ufKvaQjSEhtSgQkcMVLK9ChvdB5wH8,47 +black/__pycache__/__init__.cpython-311.pyc,, +black/__pycache__/__main__.cpython-311.pyc,, +black/__pycache__/brackets.cpython-311.pyc,, +black/__pycache__/cache.cpython-311.pyc,, +black/__pycache__/comments.cpython-311.pyc,, +black/__pycache__/concurrency.cpython-311.pyc,, +black/__pycache__/const.cpython-311.pyc,, +black/__pycache__/debug.cpython-311.pyc,, +black/__pycache__/files.cpython-311.pyc,, +black/__pycache__/handle_ipynb_magics.cpython-311.pyc,, +black/__pycache__/linegen.cpython-311.pyc,, +black/__pycache__/lines.cpython-311.pyc,, +black/__pycache__/mode.cpython-311.pyc,, +black/__pycache__/nodes.cpython-311.pyc,, +black/__pycache__/numerics.cpython-311.pyc,, +black/__pycache__/output.cpython-311.pyc,, +black/__pycache__/parsing.cpython-311.pyc,, +black/__pycache__/report.cpython-311.pyc,, +black/__pycache__/rusty.cpython-311.pyc,, +black/__pycache__/strings.cpython-311.pyc,, +black/__pycache__/trans.cpython-311.pyc,, +black/brackets.cpython-311-x86_64-linux-gnu.so,sha256=VDIeD54I8rOJXiaRRpNd0ypRzG-oOgkYugPhOPGZrlQ,8256 +black/brackets.py,sha256=UH8o6FxSbum5PyPWWIPJoXYnSL3ssu39IXzuFPZRSxw,12273 +black/cache.cpython-311-x86_64-linux-gnu.so,sha256=vCqfylt7IcLLhvGWrLGfH57yMV__83mcwFgs2WaNvqs,8256 +black/cache.py,sha256=yC8aCiOGpNq2kqsrtSUCjjH6NmTXMkUKg9gO9w4pUEg,2946 +black/comments.cpython-311-x86_64-linux-gnu.so,sha256=tWJF4rkTCxu0JGXsROV3g0Zp4PhekMuEVMkhbmfxfdw,8256 +black/comments.py,sha256=AMfuoZq4ISyf8l6AUJtKAM4Y1RknjpVwXQ3xONqUFLA,12720 +black/concurrency.py,sha256=8X2MKLFDPYNIV4Fca9065e6f9pNeORjvFRxKP14ym8k,6286 +black/const.cpython-311-x86_64-linux-gnu.so,sha256=rB9yo4zdcgqWBd9HJEwx656MgRUxGOpwNzUXi2B-nfg,8256 +black/const.py,sha256=4_6Bjr4eLrEEGY8uX_ZgR1Y90f3ok21RGlgDFgwGfjQ,284 +black/debug.py,sha256=PDBJeevHiMPzdQTZmC4kCZbQxyBXOSzQvIVnrFCXALU,1594 +black/files.py,sha256=W-V0evMTr3KAd4ZWK1hd-rmAzF2NrggiCTTqCQT7iB4,13448 +black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so,sha256=1SXeBdZ3TniAe8zdaE2oOi9I3EqiHeVrvfpXxfNCkfs,8280 +black/handle_ipynb_magics.py,sha256=bH0rwjyECd-phroNfrrQIXOvergQjTzcU6rFMkhdNKU,13501 +black/linegen.cpython-311-x86_64-linux-gnu.so,sha256=7qxMC3ygMNbGnwMwKLANStvQ604Zm_7CN_SxCfZXk8Q,8256 +black/linegen.py,sha256=sKWAhW57fpgIoJYKl7fDPr6jx8h8jtxvv0OXqTTUzyc,59117 +black/lines.cpython-311-x86_64-linux-gnu.so,sha256=tok8uP3SJY_R72m-dT1GIlokH53M9xan_QoyhfeCKKU,8256 +black/lines.py,sha256=yjcTQiyh_3ieNXTGYRTkjIEx69nYlfaUgAnUPqwTNQs,30834 +black/mode.cpython-311-x86_64-linux-gnu.so,sha256=uyxFZfbkIq9wZLtCk-n-Eabvu2VbTmPhjC-KVhOC5Uk,8248 +black/mode.py,sha256=SsuB0FummtWMtWT3OybqcPdrXUMBDYOLVQIoHIxzi-4,7049 +black/nodes.cpython-311-x86_64-linux-gnu.so,sha256=5mjT2mgQ8T2Frin9CVZ7xwfWQz6IukSFXebulW-5jlc,8256 +black/nodes.py,sha256=OEL_CByeL3iy9KVB7BtAqFKbMKm90HfvErBFCNUMuEQ,24864 +black/numerics.cpython-311-x86_64-linux-gnu.so,sha256=0TM-5uncg3qK9ys0J27emPOQBjw7XYUrZFJFl3lG__k,8256 +black/numerics.py,sha256=TBID6blEBXZgGIigMXS5JpX2-Trv9lK3WoWPkkHrpac,1653 +black/output.py,sha256=qfdOuT8z5WSm_GxwP24X5XrjrxPacjj1k1gC40crJGw,3486 +black/parsing.cpython-311-x86_64-linux-gnu.so,sha256=9sM2ef97tEJl3vINRdvkj1xnlkZyvVMlr6qRQxgW_ss,8256 +black/parsing.py,sha256=N5UdkgpgMnKQOBMFfAjd7y7RFyrQuh4A2PpajTFTH04,9792 +black/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +black/report.py,sha256=vcZXcQwoT2LPYD0AoL7w3tshKw_i5DjxGRsqqLZQksc,3451 +black/rusty.cpython-311-x86_64-linux-gnu.so,sha256=Xs9ZgjZgaNlXx_uxk2ejkVsdU9MCi4B05vOqBb8Tw_o,8256 +black/rusty.py,sha256=kmKIJpD9J0bPfkQ67Sy8HkVcx-5CF02acggiaaxM7sc,556 +black/strings.cpython-311-x86_64-linux-gnu.so,sha256=jsLtPDpdvT0VHFprlQHlv_OMWvy4wNSuPQhd9nyFtTs,8256 +black/strings.py,sha256=RHzuLjU3elJfT8DbIJ3Nmb1qp8qvMILyKb40YvU-bhc,9401 +black/trans.cpython-311-x86_64-linux-gnu.so,sha256=KDMtZkMFw6uaR7rylaj4ZNVacP7NQh5VnUQeKkFv664,8256 +black/trans.py,sha256=m61WQDDbppBNVUbCVjXRPUn7uisH0qAOoosoNnxAFAI,90097 +blackd/__init__.py,sha256=cTJotjnER9YB0rDWo3hOWSg4v0mNAx5DFmCJdyB1s5k,8068 +blackd/__main__.py,sha256=L4xAcDh1K5zb6SsJB102AewW2G13P9-w2RiEwuFj8WA,37 +blackd/__pycache__/__init__.cpython-311.pyc,, +blackd/__pycache__/__main__.cpython-311.pyc,, +blackd/__pycache__/middlewares.cpython-311.pyc,, +blackd/middlewares.py,sha256=QS7cs86Ojuaqh64dGneimhJ-f30rDI646c27ts4Dwh0,1585 +blib2to3/Grammar.txt,sha256=uIxG7IrdemRi94OtTUaVoW2ktYyKyuXHluWFMMq8Af4,11278 +blib2to3/LICENSE,sha256=V4mIG4rrnJH1g19bt8q-hKD-zUuyvi9UyeaVenjseZ0,12762 +blib2to3/PatternGrammar.txt,sha256=7lul2ztnIqDi--JWDrwciD5yMo75w7TaHHxdHMZJvOM,793 +blib2to3/README,sha256=C_K4Os-RntLrCpCHOKr43cnhQAaG94psjrgOX6tywg8,1071 +blib2to3/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 +blib2to3/__pycache__/__init__.cpython-311.pyc,, +blib2to3/__pycache__/pygram.cpython-311.pyc,, +blib2to3/__pycache__/pytree.cpython-311.pyc,, +blib2to3/pgen2/__init__.py,sha256=hY6w9QUzvTvRb-MoFfd_q_7ZLt6IUHC2yxWCfsZupQA,143 +blib2to3/pgen2/__pycache__/__init__.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/conv.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/driver.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/grammar.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/literals.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/parse.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/pgen.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/token.cpython-311.pyc,, +blib2to3/pgen2/__pycache__/tokenize.cpython-311.pyc,, +blib2to3/pgen2/conv.cpython-311-x86_64-linux-gnu.so,sha256=c2ryGboE-0KMiIUHysr_Mqsnyl6JrFaZzawXEKd471U,8256 +blib2to3/pgen2/conv.py,sha256=wGS0vapMV81PvxisAentOrXCZX1IlCFulQDW5Ha9Ueo,9607 +blib2to3/pgen2/driver.cpython-311-x86_64-linux-gnu.so,sha256=tnJiG4sC4UwJ89mrGGPcm8CtIyb5stTMRklw3N4Bt3E,8264 +blib2to3/pgen2/driver.py,sha256=A5HNBnHrl1amhullooyqWEtU2jOnByIketrdbb5zMmY,10670 +blib2to3/pgen2/grammar.cpython-311-x86_64-linux-gnu.so,sha256=Oyjz5o-jW3qeN5U6gDD33fTVpV4_y7HStZVtvkH9DEo,8264 +blib2to3/pgen2/grammar.py,sha256=EJW7w2JIZtzi_g1JWAmONf4kCH8KJD6GVn_1R1uFnDA,6873 +blib2to3/pgen2/literals.cpython-311-x86_64-linux-gnu.so,sha256=zgnZv6KWFT6VVqk3xM-vneNiVrhOoeAiQxXi--AlfJM,8264 +blib2to3/pgen2/literals.py,sha256=vbzw1RX0NvATBR971WwRNtp68fsXky4-pV4cBZ02_9E,1628 +blib2to3/pgen2/parse.cpython-311-x86_64-linux-gnu.so,sha256=zpnGRBSOVdOjPjjdGAmMl9OSbxPLUmD_qyR-J2J7NsY,8264 +blib2to3/pgen2/parse.py,sha256=QJhDkRCgEtc6jQterhuNwvtURIzuz_8Forq5X5k4mQo,14853 +blib2to3/pgen2/pgen.cpython-311-x86_64-linux-gnu.so,sha256=g0kyrkxLEXNyjogA-nKQ8m0P_L2ttTYpWi8EM9AhMQI,8256 +blib2to3/pgen2/pgen.py,sha256=8gkqFARDLVp8tdpjxjJEP1YytCZvNrLl5tuK6SdM_nc,15491 +blib2to3/pgen2/token.cpython-311-x86_64-linux-gnu.so,sha256=tQ_FsW2qRvbryNlsWRRix5hcQ_IRzgaaQXQ4RcGhrBg,8264 +blib2to3/pgen2/token.py,sha256=eDZWhONvSUNGsWYzHhgBd1pIQ2HlVPP7aon0S1CLuKE,1919 +blib2to3/pgen2/tokenize.cpython-311-x86_64-linux-gnu.so,sha256=7JNHlbvrChrieSUoWpTIvZDvM7w4X_tYZzqhEILCp9c,8264 +blib2to3/pgen2/tokenize.py,sha256=CT6TXaY9RJkboV9NJcxQf7KZo07KpxsjOSY4gymGDD4,22842 +blib2to3/pygram.cpython-311-x86_64-linux-gnu.so,sha256=s65ZatQq-qDnR9plai06dKASkYob4p10SM7oL5W2UjQ,8256 +blib2to3/pygram.py,sha256=uCU5Dyc1pObqP1TOJ9IjOkpVqaVpsg-nRovAJt7xOrU,5732 +blib2to3/pytree.cpython-311-x86_64-linux-gnu.so,sha256=EVgnZXshf81mfqkX_aChd8P8YiqT49I7l9sC0mPnyWY,8256 +blib2to3/pytree.py,sha256=We4PqLlpIOT2-625It5HN9ToD8a1OHrl5_FYmDE_0v4,32234 diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/WHEEL new file mode 100644 index 0000000..3f380b5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.12.2 +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/entry_points.txt b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/entry_points.txt new file mode 100644 index 0000000..d0bf907 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +black = black:patched_main +blackd = blackd:patched_main [d] diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/licenses/AUTHORS.md b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/licenses/AUTHORS.md new file mode 100644 index 0000000..ab3f30b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/licenses/AUTHORS.md @@ -0,0 +1,195 @@ +# Authors + +Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). + +Maintained with: + +- [Carol Willing](mailto:carolcode@willingconsulting.com) +- [Carl Meyer](mailto:carl@oddbird.net) +- [Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com) +- [Mika Naylor](mailto:mail@autophagy.io) +- [Zsolt Dollenstein](mailto:zsol.zsol@gmail.com) +- [Cooper Lees](mailto:me@cooperlees.com) +- [Richard Si](mailto:sichard26@gmail.com) +- [Felix Hildén](mailto:felix.hilden@gmail.com) +- [Batuhan Taskaya](mailto:batuhan@python.org) + +Multiple contributions by: + +- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) +- [Adam Johnson](mailto:me@adamj.eu) +- [Adam Williamson](mailto:adamw@happyassassin.net) +- [Alexander Huynh](mailto:ahrex-gh-psf-black@e.sc) +- [Alexandr Artemyev](mailto:mogost@gmail.com) +- [Alex Vandiver](mailto:github@chmrr.net) +- [Allan Simon](mailto:allan.simon@supinfo.com) +- Anders-Petter Ljungquist +- [Amethyst Reese](mailto:amy@n7.gg) +- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) +- [Andrew Zhou](mailto:andrewfzhou@gmail.com) +- [Andrey](mailto:dyuuus@yandex.ru) +- [Andy Freeland](mailto:andy@andyfreeland.net) +- [Anthony Sottile](mailto:asottile@umich.edu) +- [Antonio Ossa Guerra](mailto:aaossa+black@uc.cl) +- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) +- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) +- [Artem Malyshev](mailto:proofit404@gmail.com) +- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) +- [Augie Fackler](mailto:raf@durin42.com) +- [Aviskar KC](mailto:aviskarkc10@gmail.com) +- Batuhan Taşkaya +- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) +- [Benjamin Woodruff](mailto:github@benjam.info) +- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) +- [Brandt Bucher](mailto:brandtbucher@gmail.com) +- [Brett Cannon](mailto:brett@python.org) +- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) +- [Bryan Forbes](mailto:bryan@reigndropsfall.net) +- [Calum Lind](mailto:calumlind@gmail.com) +- [Charles](mailto:peacech@gmail.com) +- Charles Reid +- [Christian Clauss](mailto:cclauss@bluewin.ch) +- [Christian Heimes](mailto:christian@python.org) +- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) +- [Chris Rose](mailto:offline@offby1.net) +- Codey Oxley +- [Cong](mailto:congusbongus@gmail.com) +- [Cooper Ry Lees](mailto:me@cooperlees.com) +- [Dan Davison](mailto:dandavison7@gmail.com) +- [Daniel Hahler](mailto:github@thequod.de) +- [Daniel M. Capella](mailto:polycitizen@gmail.com) +- Daniele Esposti +- [David Hotham](mailto:david.hotham@metaswitch.com) +- [David Lukes](mailto:dafydd.lukes@gmail.com) +- [David Szotten](mailto:davidszotten@gmail.com) +- [Denis Laxalde](mailto:denis@laxalde.org) +- [Douglas Thor](mailto:dthor@transphormusa.com) +- dylanjblack +- [Eli Treuherz](mailto:eli@treuherz.com) +- [Emil Hessman](mailto:emil@hessman.se) +- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) +- [Florent Thiery](mailto:fthiery@gmail.com) +- Francisco +- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) +- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) +- [Gregory P. Smith](mailto:greg@krypto.org) +- Gustavo Camargo +- hauntsaninja +- [Hadi Alqattan](mailto:alqattanhadizaki@gmail.com) +- [Hassan Abouelela](mailto:hassan@hassanamr.com) +- [Heaford](mailto:dan@heaford.com) +- [Hugo Barrera](mailto::hugo@barrera.io) +- Hugo van Kemenade +- [Hynek Schlawack](mailto:hs@ox.cx) +- [Ionite](mailto:dev@ionite.io) +- [Ivan Katanić](mailto:ivan.katanic@gmail.com) +- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) +- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) +- [Jan Hnátek](mailto:jan.hnatek@gmail.com) +- [Jason Fried](mailto:me@jasonfried.info) +- [Jason Friedland](mailto:jason@friedland.id.au) +- [jgirardet](mailto:ijkl@netc.fr) +- Jim Brännlund +- [Jimmy Jia](mailto:tesrin@gmail.com) +- [Joe Antonakakis](mailto:jma353@cornell.edu) +- [Jon Dufresne](mailto:jon.dufresne@gmail.com) +- [Jonas Obrist](mailto:ojiidotch@gmail.com) +- [Jonty Wareing](mailto:jonty@jonty.co.uk) +- [Jose Nazario](mailto:jose.monkey.org@gmail.com) +- [Joseph Larson](mailto:larson.joseph@gmail.com) +- [Josh Bode](mailto:joshbode@fastmail.com) +- [Josh Holland](mailto:anowlcalledjosh@gmail.com) +- [Joshua Cannon](mailto:joshdcannon@gmail.com) +- [José Padilla](mailto:jpadilla@webapplicate.com) +- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) +- [kaiix](mailto:kvn.hou@gmail.com) +- [Katie McLaughlin](mailto:katie@glasnt.com) +- Katrin Leinweber +- [Keith Smiley](mailto:keithbsmiley@gmail.com) +- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) +- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) +- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) +- [Kyle Sunden](mailto:sunden@wisc.edu) +- Lawrence Chan +- [Linus Groh](mailto:mail@linusgroh.de) +- [Loren Carvalho](mailto:comradeloren@gmail.com) +- [Luka Sterbic](mailto:luka.sterbic@gmail.com) +- [LukasDrude](mailto:mail@lukas-drude.de) +- Mahmoud Hossam +- Mariatta +- [Matt VanEseltine](mailto:vaneseltine@gmail.com) +- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) +- [Matthew Walster](mailto:matthew@walster.org) +- Max Smolens +- [Michael Aquilina](mailto:michaelaquilina@gmail.com) +- [Michael Flaxman](mailto:michael.flaxman@gmail.com) +- [Michael J. Sullivan](mailto:sully@msully.net) +- [Michael McClimon](mailto:michael@mcclimon.org) +- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) +- [Mike](mailto:roshi@fedoraproject.org) +- [mikehoyio](mailto:mikehoy@gmail.com) +- [Min ho Kim](mailto:minho42@gmail.com) +- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) +- MomIsBestFriend +- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) +- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) +- [Neraste](mailto:neraste.herr10@gmail.com) +- [Nikolaus Waxweiler](mailto:madigens@gmail.com) +- [Ofek Lev](mailto:ofekmeister@gmail.com) +- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) +- [otstrel](mailto:otstrel@gmail.com) +- [Pablo Galindo](mailto:Pablogsal@gmail.com) +- [Paul Ganssle](mailto:p.ganssle@gmail.com) +- [Paul Meinhardt](mailto:mnhrdt@gmail.com) +- [Peter Bengtsson](mailto:mail@peterbe.com) +- [Peter Grayson](mailto:pete@jpgrayson.net) +- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) +- pmacosta +- [Quentin Pradet](mailto:quentin@pradet.me) +- [Ralf Schmitt](mailto:ralf@systemexit.de) +- [Ramón Valles](mailto:mroutis@protonmail.com) +- [Richard Fearn](mailto:richardfearn@gmail.com) +- [Rishikesh Jha](mailto:rishijha424@gmail.com) +- [Rupert Bedford](mailto:rupert@rupertb.com) +- Russell Davis +- [Sagi Shadur](mailto:saroad2@gmail.com) +- [Rémi Verschelde](mailto:rverschelde@gmail.com) +- [Sami Salonen](mailto:sakki@iki.fi) +- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) +- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) +- Sergi +- [Scott Stevenson](mailto:scott@stevenson.io) +- Shantanu +- [shaoran](mailto:shaoran@sakuranohana.org) +- [Shinya Fujino](mailto:shf0811@gmail.com) +- springstan +- [Stavros Korokithakis](mailto:hi@stavros.io) +- [Stephen Rosen](mailto:sirosen@globus.org) +- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) +- [Sunil Kapil](mailto:snlkapil@gmail.com) +- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) +- [Tal Amuyal](mailto:TalAmuyal@gmail.com) +- [Terrance](mailto:git@terrance.allofti.me) +- [Thom Lu](mailto:thomas.c.lu@gmail.com) +- [Thomas Grainger](mailto:tagrain@gmail.com) +- [Tim Gates](mailto:tim.gates@iress.com) +- [Tim Swast](mailto:swast@google.com) +- [Timo](mailto:timo_tk@hotmail.com) +- Toby Fleming +- [Tom Christie](mailto:tom@tomchristie.com) +- [Tony Narlock](mailto:tony@git-pull.com) +- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) +- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) +- [Tzu-ping Chung](mailto:uranusjr@gmail.com) +- [Utsav Shah](mailto:ukshah2@illinois.edu) +- utsav-dbx +- vezeli +- [Ville Skyttä](mailto:ville.skytta@iki.fi) +- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) +- [Vlad Emelianov](mailto:volshebnyi@gmail.com) +- [williamfzc](mailto:178894043@qq.com) +- [wouter bolsterlee](mailto:wouter@bolsterl.ee) +- Yazdan +- [Yngve Høiseth](mailto:yngve@hoiseth.net) +- [Yurii Karabas](mailto:1998uriyyo@gmail.com) +- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) diff --git a/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/licenses/LICENSE b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..7a9b891 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black-23.1.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Łukasz Langa + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.11/site-packages/black/__init__.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/__init__.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..0fcc920 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/__init__.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/__init__.py b/.venv/lib/python3.11/site-packages/black/__init__.py new file mode 100644 index 0000000..4ebf288 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/__init__.py @@ -0,0 +1,1451 @@ +import io +import json +import platform +import re +import sys +import tokenize +import traceback +from contextlib import contextmanager +from dataclasses import replace +from datetime import datetime +from enum import Enum +from json.decoder import JSONDecodeError +from pathlib import Path +from typing import ( + Any, + Dict, + Generator, + Iterator, + List, + MutableMapping, + Optional, + Pattern, + Sequence, + Set, + Sized, + Tuple, + Union, +) + +import click +from click.core import ParameterSource +from mypy_extensions import mypyc_attr +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError + +from _black_version import version as __version__ +from black.cache import Cache, get_cache_info, read_cache, write_cache +from black.comments import normalize_fmt_off +from black.const import ( + DEFAULT_EXCLUDES, + DEFAULT_INCLUDES, + DEFAULT_LINE_LENGTH, + STDIN_PLACEHOLDER, +) +from black.files import ( + find_project_root, + find_pyproject_toml, + find_user_pyproject_toml, + gen_python_files, + get_gitignore, + normalize_path_maybe_ignore, + parse_pyproject_toml, + wrap_stream_for_windows, +) +from black.handle_ipynb_magics import ( + PYTHON_CELL_MAGICS, + TRANSFORMED_MAGICS, + jupyter_dependencies_are_installed, + mask_cell, + put_trailing_semicolon_back, + remove_trailing_semicolon, + unmask_cell, +) +from black.linegen import LN, LineGenerator, transform_line +from black.lines import EmptyLineTracker, LinesBlock +from black.mode import ( + FUTURE_FLAG_TO_FEATURE, + VERSION_TO_FEATURES, + Feature, + Mode, + TargetVersion, + supports_feature, +) +from black.nodes import ( + STARS, + is_number_token, + is_simple_decorator_expression, + is_string_token, + syms, +) +from black.output import color_diff, diff, dump_to_file, err, ipynb_diff, out +from black.parsing import InvalidInput # noqa F401 +from black.parsing import lib2to3_parse, parse_ast, stringify_ast +from black.report import Changed, NothingChanged, Report +from black.trans import iter_fexpr_spans +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +COMPILED = Path(__file__).suffix in (".pyd", ".so") + +# types +FileContent = str +Encoding = str +NewLine = str + + +class WriteBack(Enum): + NO = 0 + YES = 1 + DIFF = 2 + CHECK = 3 + COLOR_DIFF = 4 + + @classmethod + def from_configuration( + cls, *, check: bool, diff: bool, color: bool = False + ) -> "WriteBack": + if check and not diff: + return cls.CHECK + + if diff and color: + return cls.COLOR_DIFF + + return cls.DIFF if diff else cls.YES + + +# Legacy name, left for integrations. +FileMode = Mode + + +def read_pyproject_toml( + ctx: click.Context, param: click.Parameter, value: Optional[str] +) -> Optional[str]: + """Inject Black configuration from "pyproject.toml" into defaults in `ctx`. + + Returns the path to a successfully found and read configuration file, None + otherwise. + """ + if not value: + value = find_pyproject_toml(ctx.params.get("src", ())) + if value is None: + return None + + try: + config = parse_pyproject_toml(value) + except (OSError, ValueError) as e: + raise click.FileError( + filename=value, hint=f"Error reading configuration file: {e}" + ) from None + + if not config: + return None + else: + # Sanitize the values to be Click friendly. For more information please see: + # https://github.com/psf/black/issues/1458 + # https://github.com/pallets/click/issues/1567 + config = { + k: str(v) if not isinstance(v, (list, dict)) else v + for k, v in config.items() + } + + target_version = config.get("target_version") + if target_version is not None and not isinstance(target_version, list): + raise click.BadOptionUsage( + "target-version", "Config key target-version must be a list" + ) + + default_map: Dict[str, Any] = {} + if ctx.default_map: + default_map.update(ctx.default_map) + default_map.update(config) + + ctx.default_map = default_map + return value + + +def target_version_option_callback( + c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...] +) -> List[TargetVersion]: + """Compute the target versions from a --target-version flag. + + This is its own function because mypy couldn't infer the type correctly + when it was a lambda, causing mypyc trouble. + """ + return [TargetVersion[val.upper()] for val in v] + + +def re_compile_maybe_verbose(regex: str) -> Pattern[str]: + """Compile a regular expression string in `regex`. + + If it contains newlines, use verbose mode. + """ + if "\n" in regex: + regex = "(?x)" + regex + compiled: Pattern[str] = re.compile(regex) + return compiled + + +def validate_regex( + ctx: click.Context, + param: click.Parameter, + value: Optional[str], +) -> Optional[Pattern[str]]: + try: + return re_compile_maybe_verbose(value) if value is not None else None + except re.error as e: + raise click.BadParameter(f"Not a valid regular expression: {e}") from None + + +@click.command( + context_settings={"help_option_names": ["-h", "--help"]}, + # While Click does set this field automatically using the docstring, mypyc + # (annoyingly) strips 'em so we need to set it here too. + help="The uncompromising code formatter.", +) +@click.option("-c", "--code", type=str, help="Format the code passed in as a string.") +@click.option( + "-l", + "--line-length", + type=int, + default=DEFAULT_LINE_LENGTH, + help="How many characters per line to allow.", + show_default=True, +) +@click.option( + "-t", + "--target-version", + type=click.Choice([v.name.lower() for v in TargetVersion]), + callback=target_version_option_callback, + multiple=True, + help=( + "Python versions that should be supported by Black's output. By default, Black" + " will try to infer this from the project metadata in pyproject.toml. If this" + " does not yield conclusive results, Black will use per-file auto-detection." + ), +) +@click.option( + "--pyi", + is_flag=True, + help=( + "Format all input files like typing stubs regardless of file extension (useful" + " when piping source on standard input)." + ), +) +@click.option( + "--ipynb", + is_flag=True, + help=( + "Format all input files like Jupyter Notebooks regardless of file extension " + "(useful when piping source on standard input)." + ), +) +@click.option( + "--python-cell-magics", + multiple=True, + help=( + "When processing Jupyter Notebooks, add the given magic to the list" + f" of known python-magics ({', '.join(sorted(PYTHON_CELL_MAGICS))})." + " Useful for formatting cells with custom python magics." + ), + default=[], +) +@click.option( + "-x", + "--skip-source-first-line", + is_flag=True, + help="Skip the first line of the source code.", +) +@click.option( + "-S", + "--skip-string-normalization", + is_flag=True, + help="Don't normalize string quotes or prefixes.", +) +@click.option( + "-C", + "--skip-magic-trailing-comma", + is_flag=True, + help="Don't use trailing commas as a reason to split lines.", +) +@click.option( + "--experimental-string-processing", + is_flag=True, + hidden=True, + help="(DEPRECATED and now included in --preview) Normalize string literals.", +) +@click.option( + "--preview", + is_flag=True, + help=( + "Enable potentially disruptive style changes that may be added to Black's main" + " functionality in the next major release." + ), +) +@click.option( + "--check", + is_flag=True, + help=( + "Don't write the files back, just return the status. Return code 0 means" + " nothing would change. Return code 1 means some files would be reformatted." + " Return code 123 means there was an internal error." + ), +) +@click.option( + "--diff", + is_flag=True, + help="Don't write the files back, just output a diff for each file on stdout.", +) +@click.option( + "--color/--no-color", + is_flag=True, + help="Show colored diff. Only applies when `--diff` is given.", +) +@click.option( + "--fast/--safe", + is_flag=True, + help="If --fast given, skip temporary sanity checks. [default: --safe]", +) +@click.option( + "--required-version", + type=str, + help=( + "Require a specific version of Black to be running (useful for unifying results" + " across many environments e.g. with a pyproject.toml file). It can be" + " either a major version number or an exact version." + ), +) +@click.option( + "--include", + type=str, + default=DEFAULT_INCLUDES, + callback=validate_regex, + help=( + "A regular expression that matches files and directories that should be" + " included on recursive searches. An empty value means all files are included" + " regardless of the name. Use forward slashes for directories on all platforms" + " (Windows, too). Exclusions are calculated first, inclusions later." + ), + show_default=True, +) +@click.option( + "--exclude", + type=str, + callback=validate_regex, + help=( + "A regular expression that matches files and directories that should be" + " excluded on recursive searches. An empty value means no paths are excluded." + " Use forward slashes for directories on all platforms (Windows, too)." + " Exclusions are calculated first, inclusions later. [default:" + f" {DEFAULT_EXCLUDES}]" + ), + show_default=False, +) +@click.option( + "--extend-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but adds additional files and directories on top of the" + " excluded ones. (Useful if you simply want to add to the default)" + ), +) +@click.option( + "--force-exclude", + type=str, + callback=validate_regex, + help=( + "Like --exclude, but files and directories matching this regex will be " + "excluded even when they are passed explicitly as arguments." + ), +) +@click.option( + "--stdin-filename", + type=str, + help=( + "The name of the file when passing it through stdin. Useful to make " + "sure Black will respect --force-exclude option on some " + "editors that rely on using stdin." + ), +) +@click.option( + "-W", + "--workers", + type=click.IntRange(min=1), + default=None, + help="Number of parallel workers [default: number of CPUs in the system]", +) +@click.option( + "-q", + "--quiet", + is_flag=True, + help=( + "Don't emit non-error messages to stderr. Errors are still emitted; silence" + " those with 2>/dev/null." + ), +) +@click.option( + "-v", + "--verbose", + is_flag=True, + help=( + "Also emit messages to stderr about files that were not changed or were ignored" + " due to exclusion patterns." + ), +) +@click.version_option( + version=__version__, + message=( + f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n" + f"Python ({platform.python_implementation()}) {platform.python_version()}" + ), +) +@click.argument( + "src", + nargs=-1, + type=click.Path( + exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True + ), + is_eager=True, + metavar="SRC ...", +) +@click.option( + "--config", + type=click.Path( + exists=True, + file_okay=True, + dir_okay=False, + readable=True, + allow_dash=False, + path_type=str, + ), + is_eager=True, + callback=read_pyproject_toml, + help="Read configuration from FILE path.", +) +@click.pass_context +def main( # noqa: C901 + ctx: click.Context, + code: Optional[str], + line_length: int, + target_version: List[TargetVersion], + check: bool, + diff: bool, + color: bool, + fast: bool, + pyi: bool, + ipynb: bool, + python_cell_magics: Sequence[str], + skip_source_first_line: bool, + skip_string_normalization: bool, + skip_magic_trailing_comma: bool, + experimental_string_processing: bool, + preview: bool, + quiet: bool, + verbose: bool, + required_version: Optional[str], + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + stdin_filename: Optional[str], + workers: Optional[int], + src: Tuple[str, ...], + config: Optional[str], +) -> None: + """The uncompromising code formatter.""" + ctx.ensure_object(dict) + + if src and code is not None: + out( + main.get_usage(ctx) + + "\n\n'SRC' and 'code' cannot be passed simultaneously." + ) + ctx.exit(1) + if not src and code is None: + out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.") + ctx.exit(1) + + root, method = ( + find_project_root(src, stdin_filename) if code is None else (None, None) + ) + ctx.obj["root"] = root + + if verbose: + if root: + out( + f"Identified `{root}` as project root containing a {method}.", + fg="blue", + ) + + normalized = [ + ( + (source, source) + if source == "-" + else (normalize_path_maybe_ignore(Path(source), root), source) + ) + for source in src + ] + srcs_string = ", ".join( + [ + ( + f'"{_norm}"' + if _norm + else f'\033[31m"{source} (skipping - invalid)"\033[34m' + ) + for _norm, source in normalized + ] + ) + out(f"Sources to be formatted: {srcs_string}", fg="blue") + + if config: + config_source = ctx.get_parameter_source("config") + user_level_config = str(find_user_pyproject_toml()) + if config == user_level_config: + out( + ( + "Using configuration from user-level config at " + f"'{user_level_config}'." + ), + fg="blue", + ) + elif config_source in ( + ParameterSource.DEFAULT, + ParameterSource.DEFAULT_MAP, + ): + out("Using configuration from project root.", fg="blue") + else: + out(f"Using configuration in '{config}'.", fg="blue") + if ctx.default_map: + for param, value in ctx.default_map.items(): + out(f"{param}: {value}") + + error_msg = "Oh no! 💥 💔 💥" + if ( + required_version + and required_version != __version__ + and required_version != __version__.split(".")[0] + ): + err( + f"{error_msg} The required version `{required_version}` does not match" + f" the running version `{__version__}`!" + ) + ctx.exit(1) + if ipynb and pyi: + err("Cannot pass both `pyi` and `ipynb` flags!") + ctx.exit(1) + + write_back = WriteBack.from_configuration(check=check, diff=diff, color=color) + if target_version: + versions = set(target_version) + else: + # We'll autodetect later. + versions = set() + mode = Mode( + target_versions=versions, + line_length=line_length, + is_pyi=pyi, + is_ipynb=ipynb, + skip_source_first_line=skip_source_first_line, + string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, + experimental_string_processing=experimental_string_processing, + preview=preview, + python_cell_magics=set(python_cell_magics), + ) + + if code is not None: + # Run in quiet mode by default with -c; the extra output isn't useful. + # You can still pass -v to get verbose output. + quiet = True + + report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) + + if code is not None: + reformat_code( + content=code, fast=fast, write_back=write_back, mode=mode, report=report + ) + else: + try: + sources = get_sources( + ctx=ctx, + src=src, + quiet=quiet, + verbose=verbose, + include=include, + exclude=exclude, + extend_exclude=extend_exclude, + force_exclude=force_exclude, + report=report, + stdin_filename=stdin_filename, + ) + except GitWildMatchPatternError: + ctx.exit(1) + + path_empty( + sources, + "No Python files are present to be formatted. Nothing to do 😴", + quiet, + verbose, + ctx, + ) + + if len(sources) == 1: + reformat_one( + src=sources.pop(), + fast=fast, + write_back=write_back, + mode=mode, + report=report, + ) + else: + from black.concurrency import reformat_many + + reformat_many( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + workers=workers, + ) + + if verbose or not quiet: + if code is None and (verbose or report.change_count or report.failure_count): + out() + out(error_msg if report.return_code else "All done! ✨ 🍰 ✨") + if code is None: + click.echo(str(report), err=True) + ctx.exit(report.return_code) + + +def get_sources( + *, + ctx: click.Context, + src: Tuple[str, ...], + quiet: bool, + verbose: bool, + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + report: "Report", + stdin_filename: Optional[str], +) -> Set[Path]: + """Compute the set of files to be formatted.""" + sources: Set[Path] = set() + root = ctx.obj["root"] + + using_default_exclude = exclude is None + exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude + gitignore: Optional[Dict[Path, PathSpec]] = None + root_gitignore = get_gitignore(root) + + for s in src: + if s == "-" and stdin_filename: + p = Path(stdin_filename) + is_stdin = True + else: + p = Path(s) + is_stdin = False + + if is_stdin or p.is_file(): + normalized_path = normalize_path_maybe_ignore(p, ctx.obj["root"], report) + if normalized_path is None: + continue + + normalized_path = "/" + normalized_path + # Hard-exclude any files that matches the `--force-exclude` regex. + if force_exclude: + force_exclude_match = force_exclude.search(normalized_path) + else: + force_exclude_match = None + if force_exclude_match and force_exclude_match.group(0): + report.path_ignored(p, "matches the --force-exclude regular expression") + continue + + if is_stdin: + p = Path(f"{STDIN_PLACEHOLDER}{str(p)}") + + if p.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + + sources.add(p) + elif p.is_dir(): + p = root / normalize_path_maybe_ignore(p, ctx.obj["root"], report) + if using_default_exclude: + gitignore = { + root: root_gitignore, + p: get_gitignore(p), + } + sources.update( + gen_python_files( + p.iterdir(), + ctx.obj["root"], + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore, + verbose=verbose, + quiet=quiet, + ) + ) + elif s == "-": + sources.add(p) + else: + err(f"invalid path: {s}") + return sources + + +def path_empty( + src: Sized, msg: str, quiet: bool, verbose: bool, ctx: click.Context +) -> None: + """ + Exit if there is no `src` provided for formatting + """ + if not src: + if verbose or not quiet: + out(msg) + ctx.exit(0) + + +def reformat_code( + content: str, fast: bool, write_back: WriteBack, mode: Mode, report: Report +) -> None: + """ + Reformat and print out `content` without spawning child processes. + Similar to `reformat_one`, but for string content. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + path = Path("") + try: + changed = Changed.NO + if format_stdin_to_stdout( + content=content, fast=fast, write_back=write_back, mode=mode + ): + changed = Changed.YES + report.done(path, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(path, str(exc)) + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_one( + src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report" +) -> None: + """Reformat a single file under `src` without spawning child processes. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + try: + changed = Changed.NO + + if str(src) == "-": + is_stdin = True + elif str(src).startswith(STDIN_PLACEHOLDER): + is_stdin = True + # Use the original name again in case we want to print something + # to the user + src = Path(str(src)[len(STDIN_PLACEHOLDER) :]) + else: + is_stdin = False + + if is_stdin: + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) + if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): + changed = Changed.YES + else: + cache: Cache = {} + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + cache = read_cache(mode) + res_src = src.resolve() + res_src_s = str(res_src) + if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src): + changed = Changed.CACHED + if changed is not Changed.CACHED and format_file_in_place( + src, fast=fast, write_back=write_back, mode=mode + ): + changed = Changed.YES + if (write_back is WriteBack.YES and changed is not Changed.CACHED) or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + write_cache(cache, [src], mode) + report.done(src, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(src, str(exc)) + + +def format_file_in_place( + src: Path, + fast: bool, + mode: Mode, + write_back: WriteBack = WriteBack.NO, + lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy +) -> bool: + """Format file under `src` path. Return True if changed. + + If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted + code to the file. + `mode` and `fast` options are passed to :func:`format_file_contents`. + """ + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) + + then = datetime.utcfromtimestamp(src.stat().st_mtime) + header = b"" + with open(src, "rb") as buf: + if mode.skip_source_first_line: + header = buf.readline() + src_contents, encoding, newline = decode_bytes(buf.read()) + try: + dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) + except NothingChanged: + return False + except JSONDecodeError: + raise ValueError( + f"File '{src}' cannot be parsed as valid Jupyter notebook." + ) from None + src_contents = header.decode(encoding) + src_contents + dst_contents = header.decode(encoding) + dst_contents + + if write_back == WriteBack.YES: + with open(src, "w", encoding=encoding, newline=newline) as f: + f.write(dst_contents) + elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + now = datetime.utcnow() + src_name = f"{src}\t{then} +0000" + dst_name = f"{src}\t{now} +0000" + if mode.is_ipynb: + diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name) + else: + diff_contents = diff(src_contents, dst_contents, src_name, dst_name) + + if write_back == WriteBack.COLOR_DIFF: + diff_contents = color_diff(diff_contents) + + with lock or nullcontext(): + f = io.TextIOWrapper( + sys.stdout.buffer, + encoding=encoding, + newline=newline, + write_through=True, + ) + f = wrap_stream_for_windows(f) + f.write(diff_contents) + f.detach() + + return True + + +def format_stdin_to_stdout( + fast: bool, + *, + content: Optional[str] = None, + write_back: WriteBack = WriteBack.NO, + mode: Mode, +) -> bool: + """Format file on stdin. Return True if changed. + + If content is None, it's read from sys.stdin. + + If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, + write a diff to stdout. The `mode` argument is passed to + :func:`format_file_contents`. + """ + then = datetime.utcnow() + + if content is None: + src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) + else: + src, encoding, newline = content, "utf-8", "" + + dst = src + try: + dst = format_file_contents(src, fast=fast, mode=mode) + return True + + except NothingChanged: + return False + + finally: + f = io.TextIOWrapper( + sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True + ) + if write_back == WriteBack.YES: + # Make sure there's a newline after the content + if dst and dst[-1] != "\n": + dst += "\n" + f.write(dst) + elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + now = datetime.utcnow() + src_name = f"STDIN\t{then} +0000" + dst_name = f"STDOUT\t{now} +0000" + d = diff(src, dst, src_name, dst_name) + if write_back == WriteBack.COLOR_DIFF: + d = color_diff(d) + f = wrap_stream_for_windows(f) + f.write(d) + f.detach() + + +def check_stability_and_equivalence( + src_contents: str, dst_contents: str, *, mode: Mode +) -> None: + """Perform stability and equivalence checks. + + Raise AssertionError if source and destination contents are not + equivalent, or if a second pass of the formatter would format the + content differently. + """ + assert_equivalent(src_contents, dst_contents) + assert_stable(src_contents, dst_contents, mode=mode) + + +def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Reformat contents of a file and return new contents. + + If `fast` is False, additionally confirm that the reformatted code is + valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it. + `mode` is passed to :func:`format_str`. + """ + if mode.is_ipynb: + dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode) + else: + dst_contents = format_str(src_contents, mode=mode) + if src_contents == dst_contents: + raise NothingChanged + + if not fast and not mode.is_ipynb: + # Jupyter notebooks will already have been checked above. + check_stability_and_equivalence(src_contents, dst_contents, mode=mode) + return dst_contents + + +def validate_cell(src: str, mode: Mode) -> None: + """Check that cell does not already contain TransformerManager transformations, + or non-Python cell magics, which might cause tokenizer_rt to break because of + indentations. + + If a cell contains ``!ls``, then it'll be transformed to + ``get_ipython().system('ls')``. However, if the cell originally contained + ``get_ipython().system('ls')``, then it would get transformed in the same way: + + >>> TransformerManager().transform_cell("get_ipython().system('ls')") + "get_ipython().system('ls')\n" + >>> TransformerManager().transform_cell("!ls") + "get_ipython().system('ls')\n" + + Due to the impossibility of safely roundtripping in such situations, cells + containing transformed magics will be ignored. + """ + if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): + raise NothingChanged + if ( + src[:2] == "%%" + and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics + ): + raise NothingChanged + + +def format_cell(src: str, *, fast: bool, mode: Mode) -> str: + """Format code in given cell of Jupyter notebook. + + General idea is: + + - if cell has trailing semicolon, remove it; + - if cell has IPython magics, mask them; + - format cell; + - reinstate IPython magics; + - reinstate trailing semicolon (if originally present); + - strip trailing newlines. + + Cells with syntax errors will not be processed, as they + could potentially be automagics or multi-line magics, which + are currently not supported. + """ + validate_cell(src, mode) + src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon( + src + ) + try: + masked_src, replacements = mask_cell(src_without_trailing_semicolon) + except SyntaxError: + raise NothingChanged from None + masked_dst = format_str(masked_src, mode=mode) + if not fast: + check_stability_and_equivalence(masked_src, masked_dst, mode=mode) + dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements) + dst = put_trailing_semicolon_back( + dst_without_trailing_semicolon, has_trailing_semicolon + ) + dst = dst.rstrip("\n") + if dst == src: + raise NothingChanged from None + return dst + + +def validate_metadata(nb: MutableMapping[str, Any]) -> None: + """If notebook is marked as non-Python, don't format it. + + All notebook metadata fields are optional, see + https://nbformat.readthedocs.io/en/latest/format_description.html. So + if a notebook has empty metadata, we will try to parse it anyway. + """ + language = nb.get("metadata", {}).get("language_info", {}).get("name", None) + if language is not None and language != "python": + raise NothingChanged from None + + +def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Format Jupyter notebook. + + Operate cell-by-cell, only on code cells, only for Python notebooks. + If the ``.ipynb`` originally had a trailing newline, it'll be preserved. + """ + if not src_contents: + raise NothingChanged + + trailing_newline = src_contents[-1] == "\n" + modified = False + nb = json.loads(src_contents) + validate_metadata(nb) + for cell in nb["cells"]: + if cell.get("cell_type", None) == "code": + try: + src = "".join(cell["source"]) + dst = format_cell(src, fast=fast, mode=mode) + except NothingChanged: + pass + else: + cell["source"] = dst.splitlines(keepends=True) + modified = True + if modified: + dst_contents = json.dumps(nb, indent=1, ensure_ascii=False) + if trailing_newline: + dst_contents = dst_contents + "\n" + return dst_contents + else: + raise NothingChanged + + +def format_str(src_contents: str, *, mode: Mode) -> str: + """Reformat a string and return new contents. + + `mode` determines formatting options, such as how many characters per line are + allowed. Example: + + >>> import black + >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode())) + def f(arg: str = "") -> None: + ... + + A more complex example: + + >>> print( + ... black.format_str( + ... "def f(arg:str='')->None: hey", + ... mode=black.Mode( + ... target_versions={black.TargetVersion.PY36}, + ... line_length=10, + ... string_normalization=False, + ... is_pyi=False, + ... ), + ... ), + ... ) + def f( + arg: str = '', + ) -> None: + hey + + """ + dst_contents = _format_str_once(src_contents, mode=mode) + # Forced second pass to work around optional trailing commas (becoming + # forced trailing commas on pass 2) interacting differently with optional + # parentheses. Admittedly ugly. + if src_contents != dst_contents: + return _format_str_once(dst_contents, mode=mode) + return dst_contents + + +def _format_str_once(src_contents: str, *, mode: Mode) -> str: + src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) + dst_blocks: List[LinesBlock] = [] + if mode.target_versions: + versions = mode.target_versions + else: + future_imports = get_future_imports(src_node) + versions = detect_target_versions(src_node, future_imports=future_imports) + + context_manager_features = { + feature + for feature in {Feature.PARENTHESIZED_CONTEXT_MANAGERS} + if supports_feature(versions, feature) + } + normalize_fmt_off(src_node) + lines = LineGenerator(mode=mode, features=context_manager_features) + elt = EmptyLineTracker(mode=mode) + split_line_features = { + feature + for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF} + if supports_feature(versions, feature) + } + block: Optional[LinesBlock] = None + for current_line in lines.visit(src_node): + block = elt.maybe_empty_lines(current_line) + dst_blocks.append(block) + for line in transform_line( + current_line, mode=mode, features=split_line_features + ): + block.content_lines.append(str(line)) + if dst_blocks: + dst_blocks[-1].after = 0 + dst_contents = [] + for block in dst_blocks: + dst_contents.extend(block.all_lines()) + if not dst_contents: + # Use decode_bytes to retrieve the correct source newline (CRLF or LF), + # and check if normalized_content has more than one line + normalized_content, _, newline = decode_bytes(src_contents.encode("utf-8")) + if "\n" in normalized_content: + return newline + return "" + return "".join(dst_contents) + + +def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: + """Return a tuple of (decoded_contents, encoding, newline). + + `newline` is either CRLF or LF but `decoded_contents` is decoded with + universal newlines (i.e. only contains LF). + """ + srcbuf = io.BytesIO(src) + encoding, lines = tokenize.detect_encoding(srcbuf.readline) + if not lines: + return "", encoding, "\n" + + newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" + srcbuf.seek(0) + with io.TextIOWrapper(srcbuf, encoding) as tiow: + return tiow.read(), encoding, newline + + +def get_features_used( # noqa: C901 + node: Node, *, future_imports: Optional[Set[str]] = None +) -> Set[Feature]: + """Return a set of (relatively) new Python features used in this file. + + Currently looking for: + - f-strings; + - self-documenting expressions in f-strings (f"{x=}"); + - underscores in numeric literals; + - trailing commas after * or ** in function signatures and calls; + - positional only arguments in function signatures and lambdas; + - assignment expression; + - relaxed decorator syntax; + - usage of __future__ flags (annotations); + - print / exec statements; + - parenthesized context managers; + - match statements; + - except* clause; + - variadic generics; + """ + features: Set[Feature] = set() + if future_imports: + features |= { + FUTURE_FLAG_TO_FEATURE[future_import] + for future_import in future_imports + if future_import in FUTURE_FLAG_TO_FEATURE + } + + for n in node.pre_order(): + if is_string_token(n): + value_head = n.value[:2] + if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: + features.add(Feature.F_STRINGS) + if Feature.DEBUG_F_STRINGS not in features: + for span_beg, span_end in iter_fexpr_spans(n.value): + if n.value[span_beg : span_end - 1].rstrip().endswith("="): + features.add(Feature.DEBUG_F_STRINGS) + break + + elif is_number_token(n): + if "_" in n.value: + features.add(Feature.NUMERIC_UNDERSCORES) + + elif n.type == token.SLASH: + if n.parent and n.parent.type in { + syms.typedargslist, + syms.arglist, + syms.varargslist, + }: + features.add(Feature.POS_ONLY_ARGUMENTS) + + elif n.type == token.COLONEQUAL: + features.add(Feature.ASSIGNMENT_EXPRESSIONS) + + elif n.type == syms.decorator: + if len(n.children) > 1 and not is_simple_decorator_expression( + n.children[1] + ): + features.add(Feature.RELAXED_DECORATORS) + + elif ( + n.type in {syms.typedargslist, syms.arglist} + and n.children + and n.children[-1].type == token.COMMA + ): + if n.type == syms.typedargslist: + feature = Feature.TRAILING_COMMA_IN_DEF + else: + feature = Feature.TRAILING_COMMA_IN_CALL + + for ch in n.children: + if ch.type in STARS: + features.add(feature) + + if ch.type == syms.argument: + for argch in ch.children: + if argch.type in STARS: + features.add(feature) + + elif ( + n.type in {syms.return_stmt, syms.yield_expr} + and len(n.children) >= 2 + and n.children[1].type == syms.testlist_star_expr + and any(child.type == syms.star_expr for child in n.children[1].children) + ): + features.add(Feature.UNPACKING_ON_FLOW) + + elif ( + n.type == syms.annassign + and len(n.children) >= 4 + and n.children[3].type == syms.testlist_star_expr + ): + features.add(Feature.ANN_ASSIGN_EXTENDED_RHS) + + elif ( + n.type == syms.with_stmt + and len(n.children) > 2 + and n.children[1].type == syms.atom + ): + atom_children = n.children[1].children + if ( + len(atom_children) == 3 + and atom_children[0].type == token.LPAR + and atom_children[1].type == syms.testlist_gexp + and atom_children[2].type == token.RPAR + ): + features.add(Feature.PARENTHESIZED_CONTEXT_MANAGERS) + + elif n.type == syms.match_stmt: + features.add(Feature.PATTERN_MATCHING) + + elif ( + n.type == syms.except_clause + and len(n.children) >= 2 + and n.children[1].type == token.STAR + ): + features.add(Feature.EXCEPT_STAR) + + elif n.type in {syms.subscriptlist, syms.trailer} and any( + child.type == syms.star_expr for child in n.children + ): + features.add(Feature.VARIADIC_GENERICS) + + elif ( + n.type == syms.tname_star + and len(n.children) == 3 + and n.children[2].type == syms.star_expr + ): + features.add(Feature.VARIADIC_GENERICS) + + return features + + +def detect_target_versions( + node: Node, *, future_imports: Optional[Set[str]] = None +) -> Set[TargetVersion]: + """Detect the version to target based on the nodes used.""" + features = get_features_used(node, future_imports=future_imports) + return { + version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] + } + + +def get_future_imports(node: Node) -> Set[str]: + """Return a set of __future__ imports in the file.""" + imports: Set[str] = set() + + def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: + for child in children: + if isinstance(child, Leaf): + if child.type == token.NAME: + yield child.value + + elif child.type == syms.import_as_name: + orig_name = child.children[0] + assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports" + assert orig_name.type == token.NAME, "Invalid syntax parsing imports" + yield orig_name.value + + elif child.type == syms.import_as_names: + yield from get_imports_from_children(child.children) + + else: + raise AssertionError("Invalid syntax parsing imports") + + for child in node.children: + if child.type != syms.simple_stmt: + break + + first_child = child.children[0] + if isinstance(first_child, Leaf): + # Continue looking if we see a docstring; otherwise stop. + if ( + len(child.children) == 2 + and first_child.type == token.STRING + and child.children[1].type == token.NEWLINE + ): + continue + + break + + elif first_child.type == syms.import_from: + module_name = first_child.children[1] + if not isinstance(module_name, Leaf) or module_name.value != "__future__": + break + + imports |= set(get_imports_from_children(first_child.children[3:])) + else: + break + + return imports + + +def assert_equivalent(src: str, dst: str) -> None: + """Raise AssertionError if `src` and `dst` aren't equivalent.""" + try: + src_ast = parse_ast(src) + except Exception as exc: + raise AssertionError( + "cannot use --safe with this file; failed to parse source file AST: " + f"{exc}\n" + "This could be caused by running Black with an older Python version " + "that does not support new syntax used in your source file." + ) from exc + + try: + dst_ast = parse_ast(dst) + except Exception as exc: + log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) + raise AssertionError( + f"INTERNAL ERROR: Black produced invalid code: {exc}. " + "Please report a bug on https://github.com/psf/black/issues. " + f"This invalid output might be helpful: {log}" + ) from None + + src_ast_str = "\n".join(stringify_ast(src_ast)) + dst_ast_str = "\n".join(stringify_ast(dst_ast)) + if src_ast_str != dst_ast_str: + log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) + raise AssertionError( + "INTERNAL ERROR: Black produced code that is not equivalent to the" + " source. Please report a bug on " + f"https://github.com/psf/black/issues. This diff might be helpful: {log}" + ) from None + + +def assert_stable(src: str, dst: str, mode: Mode) -> None: + """Raise AssertionError if `dst` reformats differently the second time.""" + # We shouldn't call format_str() here, because that formats the string + # twice and may hide a bug where we bounce back and forth between two + # versions. + newdst = _format_str_once(dst, mode=mode) + if dst != newdst: + log = dump_to_file( + str(mode), + diff(src, dst, "source", "first pass"), + diff(dst, newdst, "first pass", "second pass"), + ) + raise AssertionError( + "INTERNAL ERROR: Black produced different code on the second pass of the" + " formatter. Please report a bug on https://github.com/psf/black/issues." + f" This diff might be helpful: {log}" + ) from None + + +@contextmanager +def nullcontext() -> Iterator[None]: + """Return an empty context manager. + + To be used like `nullcontext` in Python 3.7. + """ + yield + + +def patch_click() -> None: + """Make Click not crash on Python 3.6 with LANG=C. + + On certain misconfigured environments, Python 3 selects the ASCII encoding as the + default which restricts paths that it can access during the lifetime of the + application. Click refuses to work in this scenario by raising a RuntimeError. + + In case of Black the likelihood that non-ASCII characters are going to be used in + file paths is minimal since it's Python source code. Moreover, this crash was + spurious on Python 3.7 thanks to PEP 538 and PEP 540. + """ + modules: List[Any] = [] + try: + from click import core + except ImportError: + pass + else: + modules.append(core) + try: + # Removed in Click 8.1.0 and newer; we keep this around for users who have + # older versions installed. + from click import _unicodefun # type: ignore + except ImportError: + pass + else: + modules.append(_unicodefun) + + for module in modules: + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + if hasattr(module, "_verify_python_env"): + module._verify_python_env = lambda: None + + +def patched_main() -> None: + # PyInstaller patches multiprocessing to need freeze_support() even in non-Windows + # environments so just assume we always need to call it if frozen. + if getattr(sys, "frozen", False): + from multiprocessing import freeze_support + + freeze_support() + + patch_click() + main() + + +if __name__ == "__main__": + patched_main() diff --git a/.venv/lib/python3.11/site-packages/black/__main__.py b/.venv/lib/python3.11/site-packages/black/__main__.py new file mode 100644 index 0000000..19b810b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/__main__.py @@ -0,0 +1,3 @@ +from black import patched_main + +patched_main() diff --git a/.venv/lib/python3.11/site-packages/black/brackets.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/brackets.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..fc2c0c6 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/brackets.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/brackets.py b/.venv/lib/python3.11/site-packages/black/brackets.py new file mode 100644 index 0000000..343f060 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/brackets.py @@ -0,0 +1,381 @@ +"""Builds on top of nodes.py to track brackets.""" + +import sys +from dataclasses import dataclass, field +from typing import Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.nodes import ( + BRACKET, + CLOSING_BRACKETS, + COMPARATORS, + LOGIC_OPERATORS, + MATH_OPERATORS, + OPENING_BRACKETS, + UNPACKING_PARENTS, + VARARGS_PARENTS, + is_vararg, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] +Depth = int +LeafID = int +NodeType = int +Priority = int + + +COMPREHENSION_PRIORITY: Final = 20 +COMMA_PRIORITY: Final = 18 +TERNARY_PRIORITY: Final = 16 +LOGIC_PRIORITY: Final = 14 +STRING_PRIORITY: Final = 12 +COMPARATOR_PRIORITY: Final = 10 +MATH_PRIORITIES: Final = { + token.VBAR: 9, + token.CIRCUMFLEX: 8, + token.AMPER: 7, + token.LEFTSHIFT: 6, + token.RIGHTSHIFT: 6, + token.PLUS: 5, + token.MINUS: 5, + token.STAR: 4, + token.SLASH: 4, + token.DOUBLESLASH: 4, + token.PERCENT: 4, + token.AT: 4, + token.TILDE: 3, + token.DOUBLESTAR: 2, +} +DOT_PRIORITY: Final = 1 + + +class BracketMatchError(Exception): + """Raised when an opening bracket is unable to be matched to a closing bracket.""" + + +@dataclass +class BracketTracker: + """Keeps track of brackets on a line.""" + + depth: int = 0 + bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) + delimiters: Dict[LeafID, Priority] = field(default_factory=dict) + previous: Optional[Leaf] = None + _for_loop_depths: List[int] = field(default_factory=list) + _lambda_argument_depths: List[int] = field(default_factory=list) + invisible: List[Leaf] = field(default_factory=list) + + def mark(self, leaf: Leaf) -> None: + """Mark `leaf` with bracket-related metadata. Keep track of delimiters. + + All leaves receive an int `bracket_depth` field that stores how deep + within brackets a given leaf is. 0 means there are no enclosing brackets + that started on this line. + + If a leaf is itself a closing bracket and there is a matching opening + bracket earlier, it receives an `opening_bracket` field with which it forms a + pair. This is a one-directional link to avoid reference cycles. Closing + bracket without opening happens on lines continued from previous + breaks, e.g. `) -> "ReturnType":` as part of a funcdef where we place + the return type annotation on its own line of the previous closing RPAR. + + If a leaf is a delimiter (a token on which Black can split the line if + needed) and it's on depth 0, its `id()` is stored in the tracker's + `delimiters` field. + """ + if leaf.type == token.COMMENT: + return + + if ( + self.depth == 0 + and leaf.type in CLOSING_BRACKETS + and (self.depth, leaf.type) not in self.bracket_match + ): + return + + self.maybe_decrement_after_for_loop_variable(leaf) + self.maybe_decrement_after_lambda_arguments(leaf) + if leaf.type in CLOSING_BRACKETS: + self.depth -= 1 + try: + opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) + except KeyError as e: + raise BracketMatchError( + "Unable to match a closing bracket to the following opening" + f" bracket: {leaf}" + ) from e + leaf.opening_bracket = opening_bracket + if not leaf.value: + self.invisible.append(leaf) + leaf.bracket_depth = self.depth + if self.depth == 0: + delim = is_split_before_delimiter(leaf, self.previous) + if delim and self.previous is not None: + self.delimiters[id(self.previous)] = delim + else: + delim = is_split_after_delimiter(leaf, self.previous) + if delim: + self.delimiters[id(leaf)] = delim + if leaf.type in OPENING_BRACKETS: + self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf + self.depth += 1 + if not leaf.value: + self.invisible.append(leaf) + self.previous = leaf + self.maybe_increment_lambda_arguments(leaf) + self.maybe_increment_for_loop_variable(leaf) + + def any_open_brackets(self) -> bool: + """Return True if there is an yet unmatched open bracket on the line.""" + return bool(self.bracket_match) + + def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: + """Return the highest priority of a delimiter found on the line. + + Values are consistent with what `is_split_*_delimiter()` return. + Raises ValueError on no delimiters. + """ + return max(v for k, v in self.delimiters.items() if k not in exclude) + + def delimiter_count_with_priority(self, priority: Priority = 0) -> int: + """Return the number of delimiters with the given `priority`. + + If no `priority` is passed, defaults to max priority on the line. + """ + if not self.delimiters: + return 0 + + priority = priority or self.max_delimiter_priority() + return sum(1 for p in self.delimiters.values() if p == priority) + + def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: + """In a for loop, or comprehension, the variables are often unpacks. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `for` and `in`. + """ + if leaf.type == token.NAME and leaf.value == "for": + self.depth += 1 + self._for_loop_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: + """See `maybe_increment_for_loop_variable` above for explanation.""" + if ( + self._for_loop_depths + and self._for_loop_depths[-1] == self.depth + and leaf.type == token.NAME + and leaf.value == "in" + ): + self.depth -= 1 + self._for_loop_depths.pop() + return True + + return False + + def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: + """In a lambda expression, there might be more than one argument. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `lambda` and `:`. + """ + if leaf.type == token.NAME and leaf.value == "lambda": + self.depth += 1 + self._lambda_argument_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: + """See `maybe_increment_lambda_arguments` above for explanation.""" + if ( + self._lambda_argument_depths + and self._lambda_argument_depths[-1] == self.depth + and leaf.type == token.COLON + ): + self.depth -= 1 + self._lambda_argument_depths.pop() + return True + + return False + + def get_open_lsqb(self) -> Optional[Leaf]: + """Return the most recent opening square bracket (if any).""" + return self.bracket_match.get((self.depth - 1, token.RSQB)) + + +def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break after it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break after themselves. + + Higher numbers are higher priority. + """ + if leaf.type == token.COMMA: + return COMMA_PRIORITY + + return 0 + + +def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break before it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break before themselves. + + Higher numbers are higher priority. + """ + if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): + # * and ** might also be MATH_OPERATORS but in this case they are not. + # Don't treat them as a delimiter. + return 0 + + if ( + leaf.type == token.DOT + and leaf.parent + and leaf.parent.type not in {syms.import_from, syms.dotted_name} + and (previous is None or previous.type in CLOSING_BRACKETS) + ): + return DOT_PRIORITY + + if ( + leaf.type in MATH_OPERATORS + and leaf.parent + and leaf.parent.type not in {syms.factor, syms.star_expr} + ): + return MATH_PRIORITIES[leaf.type] + + if leaf.type in COMPARATORS: + return COMPARATOR_PRIORITY + + if ( + leaf.type == token.STRING + and previous is not None + and previous.type == token.STRING + ): + return STRING_PRIORITY + + if leaf.type not in {token.NAME, token.ASYNC}: + return 0 + + if ( + leaf.value == "for" + and leaf.parent + and leaf.parent.type in {syms.comp_for, syms.old_comp_for} + or leaf.type == token.ASYNC + ): + if ( + not isinstance(leaf.prev_sibling, Leaf) + or leaf.prev_sibling.value != "async" + ): + return COMPREHENSION_PRIORITY + + if ( + leaf.value == "if" + and leaf.parent + and leaf.parent.type in {syms.comp_if, syms.old_comp_if} + ): + return COMPREHENSION_PRIORITY + + if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: + return TERNARY_PRIORITY + + if leaf.value == "is": + return COMPARATOR_PRIORITY + + if ( + leaf.value == "in" + and leaf.parent + and leaf.parent.type in {syms.comp_op, syms.comparison} + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "not" + ) + ): + return COMPARATOR_PRIORITY + + if ( + leaf.value == "not" + and leaf.parent + and leaf.parent.type == syms.comp_op + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "is" + ) + ): + return COMPARATOR_PRIORITY + + if leaf.value in LOGIC_OPERATORS and leaf.parent: + return LOGIC_PRIORITY + + return 0 + + +def max_delimiter_priority_in_atom(node: LN) -> Priority: + """Return maximum delimiter priority inside `node`. + + This is specific to atoms with contents contained in a pair of parentheses. + If `node` isn't an atom or there are no enclosing parentheses, returns 0. + """ + if node.type != syms.atom: + return 0 + + first = node.children[0] + last = node.children[-1] + if not (first.type == token.LPAR and last.type == token.RPAR): + return 0 + + bt = BracketTracker() + for c in node.children[1:-1]: + if isinstance(c, Leaf): + bt.mark(c) + else: + for leaf in c.leaves(): + bt.mark(leaf) + try: + return bt.max_delimiter_priority() + + except ValueError: + return 0 + + +def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]: + """Return leaves that are inside matching brackets. + + The input `leaves` can have non-matching brackets at the head or tail parts. + Matching brackets are included. + """ + try: + # Start with the first opening bracket and ignore closing brackets before. + start_index = next( + i for i, l in enumerate(leaves) if l.type in OPENING_BRACKETS + ) + except StopIteration: + return set() + bracket_stack = [] + ids = set() + for i in range(start_index, len(leaves)): + leaf = leaves[i] + if leaf.type in OPENING_BRACKETS: + bracket_stack.append((BRACKET[leaf.type], i)) + if leaf.type in CLOSING_BRACKETS: + if bracket_stack and leaf.type == bracket_stack[-1][0]: + _, start = bracket_stack.pop() + for j in range(start, i + 1): + ids.add(id(leaves[j])) + else: + break + return ids diff --git a/.venv/lib/python3.11/site-packages/black/cache.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/cache.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..e966d69 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/cache.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/cache.py b/.venv/lib/python3.11/site-packages/black/cache.py new file mode 100644 index 0000000..9455ff4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/cache.py @@ -0,0 +1,97 @@ +"""Caching of formatted files with feature-based invalidation.""" + +import os +import pickle +import tempfile +from pathlib import Path +from typing import Dict, Iterable, Set, Tuple + +from platformdirs import user_cache_dir + +from _black_version import version as __version__ +from black.mode import Mode + +# types +Timestamp = float +FileSize = int +CacheInfo = Tuple[Timestamp, FileSize] +Cache = Dict[str, CacheInfo] + + +def get_cache_dir() -> Path: + """Get the cache directory used by black. + + Users can customize this directory on all systems using `BLACK_CACHE_DIR` + environment variable. By default, the cache directory is the user cache directory + under the black application. + + This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid + repeated calls. + """ + # NOTE: Function mostly exists as a clean way to test getting the cache directory. + default_cache_dir = user_cache_dir("black", version=__version__) + cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir)) + return cache_dir + + +CACHE_DIR = get_cache_dir() + + +def read_cache(mode: Mode) -> Cache: + """Read the cache if it exists and is well formed. + + If it is not well formed, the call to write_cache later should resolve the issue. + """ + cache_file = get_cache_file(mode) + if not cache_file.exists(): + return {} + + with cache_file.open("rb") as fobj: + try: + cache: Cache = pickle.load(fobj) + except (pickle.UnpicklingError, ValueError, IndexError): + return {} + + return cache + + +def get_cache_file(mode: Mode) -> Path: + return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" + + +def get_cache_info(path: Path) -> CacheInfo: + """Return the information used to check if a file is already formatted or not.""" + stat = path.stat() + return stat.st_mtime, stat.st_size + + +def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: + """Split an iterable of paths in `sources` into two sets. + + The first contains paths of files that modified on disk or are not in the + cache. The other contains paths to non-modified files. + """ + todo, done = set(), set() + for src in sources: + res_src = src.resolve() + if cache.get(str(res_src)) != get_cache_info(res_src): + todo.add(src) + else: + done.add(src) + return todo, done + + +def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None: + """Update the cache file.""" + cache_file = get_cache_file(mode) + try: + CACHE_DIR.mkdir(parents=True, exist_ok=True) + new_cache = { + **cache, + **{str(src.resolve()): get_cache_info(src) for src in sources}, + } + with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: + pickle.dump(new_cache, f, protocol=4) + os.replace(f.name, cache_file) + except OSError: + pass diff --git a/.venv/lib/python3.11/site-packages/black/comments.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/comments.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..d1a0b99 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/comments.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/comments.py b/.venv/lib/python3.11/site-packages/black/comments.py new file mode 100644 index 0000000..7cf15bf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/comments.py @@ -0,0 +1,334 @@ +import re +import sys +from dataclasses import dataclass +from functools import lru_cache +from typing import Iterator, List, Optional, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from black.nodes import ( + CLOSING_BRACKETS, + STANDALONE_COMMENT, + WHITESPACE, + container_of, + first_leaf_of, + preceding_leaf, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] + +FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"} +FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"} +FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP} +FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"} + +COMMENT_EXCEPTIONS = " !:#'" + + +@dataclass +class ProtoComment: + """Describes a piece of syntax that is a comment. + + It's not a :class:`blib2to3.pytree.Leaf` so that: + + * it can be cached (`Leaf` objects should not be reused more than once as + they store their lineno, column, prefix, and parent information); + * `newlines` and `consumed` fields are kept separate from the `value`. This + simplifies handling of special marker comments like ``# fmt: off/on``. + """ + + type: int # token.COMMENT or STANDALONE_COMMENT + value: str # content of the comment + newlines: int # how many newlines before the comment + consumed: int # how many characters of the original leaf's prefix did we consume + + +def generate_comments(leaf: LN) -> Iterator[Leaf]: + """Clean the prefix of the `leaf` and generate comments from it, if any. + + Comments in lib2to3 are shoved into the whitespace prefix. This happens + in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation + move because it does away with modifying the grammar to include all the + possible places in which comments can be placed. + + The sad consequence for us though is that comments don't "belong" anywhere. + This is why this function generates simple parentless Leaf objects for + comments. We simply don't know what the correct parent should be. + + No matter though, we can live without this. We really only need to + differentiate between inline and standalone comments. The latter don't + share the line with any code. + + Inline comments are emitted as regular token.COMMENT leaves. Standalone + are emitted with a fake STANDALONE_COMMENT token identifier. + """ + for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): + yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) + + +@lru_cache(maxsize=4096) +def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: + """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" + result: List[ProtoComment] = [] + if not prefix or "#" not in prefix: + return result + + consumed = 0 + nlines = 0 + ignored_lines = 0 + for index, line in enumerate(re.split("\r?\n", prefix)): + consumed += len(line) + 1 # adding the length of the split '\n' + line = line.lstrip() + if not line: + nlines += 1 + if not line.startswith("#"): + # Escaped newlines outside of a comment are not really newlines at + # all. We treat a single-line comment following an escaped newline + # as a simple trailing comment. + if line.endswith("\\"): + ignored_lines += 1 + continue + + if index == ignored_lines and not is_endmarker: + comment_type = token.COMMENT # simple trailing comment + else: + comment_type = STANDALONE_COMMENT + comment = make_comment(line) + result.append( + ProtoComment( + type=comment_type, value=comment, newlines=nlines, consumed=consumed + ) + ) + nlines = 0 + return result + + +def make_comment(content: str) -> str: + """Return a consistently formatted comment from the given `content` string. + + All comments (except for "##", "#!", "#:", '#'") should have a single + space between the hash sign and the content. + + If `content` didn't start with a hash sign, one is provided. + """ + content = content.rstrip() + if not content: + return "#" + + if content[0] == "#": + content = content[1:] + NON_BREAKING_SPACE = " " + if ( + content + and content[0] == NON_BREAKING_SPACE + and not content.lstrip().startswith("type:") + ): + content = " " + content[1:] # Replace NBSP by a simple space + if content and content[0] not in COMMENT_EXCEPTIONS: + content = " " + content + return "#" + content + + +def normalize_fmt_off(node: Node) -> None: + """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" + try_again = True + while try_again: + try_again = convert_one_fmt_off_pair(node) + + +def convert_one_fmt_off_pair(node: Node) -> bool: + """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. + + Returns True if a pair was converted. + """ + for leaf in node.leaves(): + previous_consumed = 0 + for comment in list_comments(leaf.prefix, is_endmarker=False): + if comment.value not in FMT_PASS: + previous_consumed = comment.consumed + continue + # We only want standalone comments. If there's no previous leaf or + # the previous leaf is indentation, it's a standalone comment in + # disguise. + if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT: + prev = preceding_leaf(leaf) + if prev: + if comment.value in FMT_OFF and prev.type not in WHITESPACE: + continue + if comment.value in FMT_SKIP and prev.type in WHITESPACE: + continue + + ignored_nodes = list(generate_ignored_nodes(leaf, comment)) + if not ignored_nodes: + continue + + first = ignored_nodes[0] # Can be a container node with the `leaf`. + parent = first.parent + prefix = first.prefix + if comment.value in FMT_OFF: + first.prefix = prefix[comment.consumed :] + if comment.value in FMT_SKIP: + first.prefix = "" + standalone_comment_prefix = prefix + else: + standalone_comment_prefix = ( + prefix[:previous_consumed] + "\n" * comment.newlines + ) + hidden_value = "".join(str(n) for n in ignored_nodes) + if comment.value in FMT_OFF: + hidden_value = comment.value + "\n" + hidden_value + if comment.value in FMT_SKIP: + hidden_value += " " + comment.value + if hidden_value.endswith("\n"): + # That happens when one of the `ignored_nodes` ended with a NEWLINE + # leaf (possibly followed by a DEDENT). + hidden_value = hidden_value[:-1] + first_idx: Optional[int] = None + for ignored in ignored_nodes: + index = ignored.remove() + if first_idx is None: + first_idx = index + assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" + assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" + parent.insert_child( + first_idx, + Leaf( + STANDALONE_COMMENT, + hidden_value, + prefix=standalone_comment_prefix, + ), + ) + return True + + return False + + +def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: + """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. + + If comment is skip, returns leaf only. + Stops at the end of the block. + """ + if comment.value in FMT_SKIP: + yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment) + return + container: Optional[LN] = container_of(leaf) + while container is not None and container.type != token.ENDMARKER: + if is_fmt_on(container): + return + + # fix for fmt: on in children + if children_contains_fmt_on(container): + for index, child in enumerate(container.children): + if isinstance(child, Leaf) and is_fmt_on(child): + if child.type in CLOSING_BRACKETS: + # This means `# fmt: on` is placed at a different bracket level + # than `# fmt: off`. This is an invalid use, but as a courtesy, + # we include this closing bracket in the ignored nodes. + # The alternative is to fail the formatting. + yield child + return + if ( + child.type == token.INDENT + and index < len(container.children) - 1 + and children_contains_fmt_on(container.children[index + 1]) + ): + # This means `# fmt: on` is placed right after an indentation + # level, and we shouldn't swallow the previous INDENT token. + return + if children_contains_fmt_on(child): + return + yield child + else: + if container.type == token.DEDENT and container.next_sibling is None: + # This can happen when there is no matching `# fmt: on` comment at the + # same level as `# fmt: on`. We need to keep this DEDENT. + return + yield container + container = container.next_sibling + + +def _generate_ignored_nodes_from_fmt_skip( + leaf: Leaf, comment: ProtoComment +) -> Iterator[LN]: + """Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`.""" + prev_sibling = leaf.prev_sibling + parent = leaf.parent + # Need to properly format the leaf prefix to compare it to comment.value, + # which is also formatted + comments = list_comments(leaf.prefix, is_endmarker=False) + if not comments or comment.value != comments[0].value: + return + if prev_sibling is not None: + leaf.prefix = "" + siblings = [prev_sibling] + while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None: + prev_sibling = prev_sibling.prev_sibling + siblings.insert(0, prev_sibling) + yield from siblings + elif ( + parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE + ): + # The `# fmt: skip` is on the colon line of the if/while/def/class/... + # statements. The ignored nodes should be previous siblings of the + # parent suite node. + leaf.prefix = "" + ignored_nodes: List[LN] = [] + parent_sibling = parent.prev_sibling + while parent_sibling is not None and parent_sibling.type != syms.suite: + ignored_nodes.insert(0, parent_sibling) + parent_sibling = parent_sibling.prev_sibling + # Special case for `async_stmt` where the ASYNC token is on the + # grandparent node. + grandparent = parent.parent + if ( + grandparent is not None + and grandparent.prev_sibling is not None + and grandparent.prev_sibling.type == token.ASYNC + ): + ignored_nodes.insert(0, grandparent.prev_sibling) + yield from iter(ignored_nodes) + + +def is_fmt_on(container: LN) -> bool: + """Determine whether formatting is switched on within a container. + Determined by whether the last `# fmt:` comment is `on` or `off`. + """ + fmt_on = False + for comment in list_comments(container.prefix, is_endmarker=False): + if comment.value in FMT_ON: + fmt_on = True + elif comment.value in FMT_OFF: + fmt_on = False + return fmt_on + + +def children_contains_fmt_on(container: LN) -> bool: + """Determine if children have formatting switched on.""" + for child in container.children: + leaf = first_leaf_of(child) + if leaf is not None and is_fmt_on(leaf): + return True + + return False + + +def contains_pragma_comment(comment_list: List[Leaf]) -> bool: + """ + Returns: + True iff one of the comments in @comment_list is a pragma used by one + of the more common static analysis tools for python (e.g. mypy, flake8, + pylint). + """ + for comment in comment_list: + if comment.value.startswith(("# type:", "# noqa", "# pylint:")): + return True + + return False diff --git a/.venv/lib/python3.11/site-packages/black/concurrency.py b/.venv/lib/python3.11/site-packages/black/concurrency.py new file mode 100644 index 0000000..1598f51 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/concurrency.py @@ -0,0 +1,187 @@ +""" +Formatting many files at once via multiprocessing. Contains entrypoint and utilities. + +NOTE: this module is only imported if we need to format several files at once. +""" + +import asyncio +import logging +import os +import signal +import sys +from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor +from multiprocessing import Manager +from pathlib import Path +from typing import Any, Iterable, Optional, Set + +from mypy_extensions import mypyc_attr + +from black import WriteBack, format_file_in_place +from black.cache import Cache, filter_cached, read_cache, write_cache +from black.mode import Mode +from black.output import err +from black.report import Changed, Report + + +def maybe_install_uvloop() -> None: + """If our environment has uvloop installed we use it. + + This is called only from command-line entry points to avoid + interfering with the parent process if Black is used as a library. + """ + try: + import uvloop + + uvloop.install() + except ImportError: + pass + + +def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: + """asyncio signal handler that cancels all `tasks` and reports to stderr.""" + err("Aborted!") + for task in tasks: + task.cancel() + + +def shutdown(loop: asyncio.AbstractEventLoop) -> None: + """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" + try: + # This part is borrowed from asyncio/runners.py in Python 3.7b2. + to_cancel = [task for task in asyncio.all_tasks(loop) if not task.done()] + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + finally: + # `concurrent.futures.Future` objects cannot be cancelled once they + # are already running. There might be some when the `shutdown()` happened. + # Silence their logger's spew about the event loop being closed. + cf_logger = logging.getLogger("concurrent.futures") + cf_logger.setLevel(logging.CRITICAL) + loop.close() + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_many( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: Report, + workers: Optional[int], +) -> None: + """Reformat multiple files using a ProcessPoolExecutor.""" + maybe_install_uvloop() + + executor: Executor + if workers is None: + workers = os.cpu_count() or 1 + if sys.platform == "win32": + # Work around https://bugs.python.org/issue26903 + workers = min(workers, 60) + try: + executor = ProcessPoolExecutor(max_workers=workers) + except (ImportError, NotImplementedError, OSError): + # we arrive here if the underlying system does not support multi-processing + # like in AWS Lambda or Termux, in which case we gracefully fallback to + # a ThreadPoolExecutor with just a single worker (more workers would not do us + # any good due to the Global Interpreter Lock) + executor = ThreadPoolExecutor(max_workers=1) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + schedule_formatting( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + loop=loop, + executor=executor, + ) + ) + finally: + try: + shutdown(loop) + finally: + asyncio.set_event_loop(None) + if executor is not None: + executor.shutdown() + + +async def schedule_formatting( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: "Report", + loop: asyncio.AbstractEventLoop, + executor: "Executor", +) -> None: + """Run formatting of `sources` in parallel using the provided `executor`. + + (Use ProcessPoolExecutors for actual parallelism.) + + `write_back`, `fast`, and `mode` options are passed to + :func:`format_file_in_place`. + """ + cache: Cache = {} + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + cache = read_cache(mode) + sources, cached = filter_cached(cache, sources) + for src in sorted(cached): + report.done(src, Changed.CACHED) + if not sources: + return + + cancelled = [] + sources_to_cache = [] + lock = None + if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + # For diff output, we need locks to ensure we don't interleave output + # from different processes. + manager = Manager() + lock = manager.Lock() + tasks = { + asyncio.ensure_future( + loop.run_in_executor( + executor, format_file_in_place, src, fast, mode, write_back, lock + ) + ): src + for src in sorted(sources) + } + pending = tasks.keys() + try: + loop.add_signal_handler(signal.SIGINT, cancel, pending) + loop.add_signal_handler(signal.SIGTERM, cancel, pending) + except NotImplementedError: + # There are no good alternatives for these on Windows. + pass + while pending: + done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + for task in done: + src = tasks.pop(task) + if task.cancelled(): + cancelled.append(task) + elif task.exception(): + report.failed(src, str(task.exception())) + else: + changed = Changed.YES if task.result() else Changed.NO + # If the file was written back or was successfully checked as + # well-formatted, store this information in the cache. + if write_back is WriteBack.YES or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + sources_to_cache.append(src) + report.done(src, changed) + if cancelled: + await asyncio.gather(*cancelled, return_exceptions=True) + if sources_to_cache: + write_cache(cache, sources_to_cache, mode) diff --git a/.venv/lib/python3.11/site-packages/black/const.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/const.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..2143ca0 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/const.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/const.py b/.venv/lib/python3.11/site-packages/black/const.py new file mode 100644 index 0000000..0e13f31 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/const.py @@ -0,0 +1,4 @@ +DEFAULT_LINE_LENGTH = 88 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|\.ipynb_checkpoints|_build|buck-out|build|dist|__pypackages__)/" # noqa: B950 +DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$" +STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" diff --git a/.venv/lib/python3.11/site-packages/black/debug.py b/.venv/lib/python3.11/site-packages/black/debug.py new file mode 100644 index 0000000..150b448 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/debug.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass +from typing import Iterator, TypeVar, Union + +from black.nodes import Visitor +from black.output import out +from black.parsing import lib2to3_parse +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node, type_repr + +LN = Union[Leaf, Node] +T = TypeVar("T") + + +@dataclass +class DebugVisitor(Visitor[T]): + tree_depth: int = 0 + + def visit_default(self, node: LN) -> Iterator[T]: + indent = " " * (2 * self.tree_depth) + if isinstance(node, Node): + _type = type_repr(node.type) + out(f"{indent}{_type}", fg="yellow") + self.tree_depth += 1 + for child in node.children: + yield from self.visit(child) + + self.tree_depth -= 1 + out(f"{indent}/{_type}", fg="yellow", bold=False) + else: + _type = token.tok_name.get(node.type, str(node.type)) + out(f"{indent}{_type}", fg="blue", nl=False) + if node.prefix: + # We don't have to handle prefixes for `Node` objects since + # that delegates to the first child anyway. + out(f" {node.prefix!r}", fg="green", bold=False, nl=False) + out(f" {node.value!r}", fg="blue", bold=False) + + @classmethod + def show(cls, code: Union[str, Leaf, Node]) -> None: + """Pretty-print the lib2to3 AST of a given string of `code`. + + Convenience method for debugging. + """ + v: DebugVisitor[None] = DebugVisitor() + if isinstance(code, str): + code = lib2to3_parse(code) + list(v.visit(code)) diff --git a/.venv/lib/python3.11/site-packages/black/files.py b/.venv/lib/python3.11/site-packages/black/files.py new file mode 100644 index 0000000..8c01311 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/files.py @@ -0,0 +1,399 @@ +import io +import os +import sys +from functools import lru_cache +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Sequence, + Tuple, + Union, +) + +from mypy_extensions import mypyc_attr +from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet +from packaging.version import InvalidVersion, Version +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError + +if sys.version_info >= (3, 11): + try: + import tomllib + except ImportError: + # Help users on older alphas + if not TYPE_CHECKING: + import tomli as tomllib +else: + import tomli as tomllib + +from black.handle_ipynb_magics import jupyter_dependencies_are_installed +from black.mode import TargetVersion +from black.output import err +from black.report import Report + +if TYPE_CHECKING: + import colorama # noqa: F401 + + +@lru_cache() +def find_project_root( + srcs: Sequence[str], stdin_filename: Optional[str] = None +) -> Tuple[Path, str]: + """Return a directory containing .git, .hg, or pyproject.toml. + + That directory will be a common parent of all files and directories + passed in `srcs`. + + If no directory in the tree contains a marker that would specify it's the + project root, the root of the file system is returned. + + Returns a two-tuple with the first element as the project root path and + the second element as a string describing the method by which the + project root was discovered. + """ + if stdin_filename is not None: + srcs = tuple(stdin_filename if s == "-" else s for s in srcs) + if not srcs: + srcs = [str(Path.cwd().resolve())] + + path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): + if (directory / ".git").exists(): + return directory, ".git directory" + + if (directory / ".hg").is_dir(): + return directory, ".hg directory" + + if (directory / "pyproject.toml").is_file(): + return directory, "pyproject.toml" + + return directory, "file system root" + + +def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]: + """Find the absolute filepath to a pyproject.toml if it exists""" + path_project_root, _ = find_project_root(path_search_start) + path_pyproject_toml = path_project_root / "pyproject.toml" + if path_pyproject_toml.is_file(): + return str(path_pyproject_toml) + + try: + path_user_pyproject_toml = find_user_pyproject_toml() + return ( + str(path_user_pyproject_toml) + if path_user_pyproject_toml.is_file() + else None + ) + except (PermissionError, RuntimeError) as e: + # We do not have access to the user-level config directory, so ignore it. + err(f"Ignoring user configuration directory due to {e!r}") + return None + + +@mypyc_attr(patchable=True) +def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: + """Parse a pyproject toml file, pulling out relevant parts for Black. + + If parsing fails, will raise a tomllib.TOMLDecodeError. + """ + with open(path_config, "rb") as f: + pyproject_toml = tomllib.load(f) + config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {}) + config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} + + if "target_version" not in config: + inferred_target_version = infer_target_version(pyproject_toml) + if inferred_target_version is not None: + config["target_version"] = [v.name.lower() for v in inferred_target_version] + + return config + + +def infer_target_version( + pyproject_toml: Dict[str, Any] +) -> Optional[List[TargetVersion]]: + """Infer Black's target version from the project metadata in pyproject.toml. + + Supports the PyPA standard format (PEP 621): + https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#requires-python + + If the target version cannot be inferred, returns None. + """ + project_metadata = pyproject_toml.get("project", {}) + requires_python = project_metadata.get("requires-python", None) + if requires_python is not None: + try: + return parse_req_python_version(requires_python) + except InvalidVersion: + pass + try: + return parse_req_python_specifier(requires_python) + except (InvalidSpecifier, InvalidVersion): + pass + + return None + + +def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]: + """Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion. + + If parsing fails, will raise a packaging.version.InvalidVersion error. + If the parsed version cannot be mapped to a valid TargetVersion, returns None. + """ + version = Version(requires_python) + if version.release[0] != 3: + return None + try: + return [TargetVersion(version.release[1])] + except (IndexError, ValueError): + return None + + +def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]: + """Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion. + + If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error. + If the parsed specifier cannot be mapped to a valid TargetVersion, returns None. + """ + specifier_set = strip_specifier_set(SpecifierSet(requires_python)) + if not specifier_set: + return None + + target_version_map = {f"3.{v.value}": v for v in TargetVersion} + compatible_versions: List[str] = list(specifier_set.filter(target_version_map)) + if compatible_versions: + return [target_version_map[v] for v in compatible_versions] + return None + + +def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet: + """Strip minor versions for some specifiers in the specifier set. + + For background on version specifiers, see PEP 440: + https://peps.python.org/pep-0440/#version-specifiers + """ + specifiers = [] + for s in specifier_set: + if "*" in str(s): + specifiers.append(s) + elif s.operator in ["~=", "==", ">=", "==="]: + version = Version(s.version) + stripped = Specifier(f"{s.operator}{version.major}.{version.minor}") + specifiers.append(stripped) + elif s.operator == ">": + version = Version(s.version) + if len(version.release) > 2: + s = Specifier(f">={version.major}.{version.minor}") + specifiers.append(s) + else: + specifiers.append(s) + + return SpecifierSet(",".join(str(s) for s in specifiers)) + + +@lru_cache() +def find_user_pyproject_toml() -> Path: + r"""Return the path to the top-level user configuration for black. + + This looks for ~\.black on Windows and ~/.config/black on Linux and other + Unix systems. + + May raise: + - RuntimeError: if the current user has no homedir + - PermissionError: if the current process cannot access the user's homedir + """ + if sys.platform == "win32": + # Windows + user_config_path = Path.home() / ".black" + else: + config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config") + user_config_path = Path(config_root).expanduser() / "black" + return user_config_path.resolve() + + +@lru_cache() +def get_gitignore(root: Path) -> PathSpec: + """Return a PathSpec matching gitignore content if present.""" + gitignore = root / ".gitignore" + lines: List[str] = [] + if gitignore.is_file(): + with gitignore.open(encoding="utf-8") as gf: + lines = gf.readlines() + try: + return PathSpec.from_lines("gitwildmatch", lines) + except GitWildMatchPatternError as e: + err(f"Could not parse {gitignore}: {e}") + raise + + +def normalize_path_maybe_ignore( + path: Path, + root: Path, + report: Optional[Report] = None, +) -> Optional[str]: + """Normalize `path`. May return `None` if `path` was ignored. + + `report` is where "path ignored" output goes. + """ + try: + abspath = path if path.is_absolute() else Path.cwd() / path + normalized_path = abspath.resolve() + try: + root_relative_path = normalized_path.relative_to(root).as_posix() + except ValueError: + if report: + report.path_ignored( + path, f"is a symbolic link that points outside {root}" + ) + return None + + except OSError as e: + if report: + report.path_ignored(path, f"cannot be read because {e}") + return None + + return root_relative_path + + +def path_is_ignored( + path: Path, gitignore_dict: Dict[Path, PathSpec], report: Report +) -> bool: + for gitignore_path, pattern in gitignore_dict.items(): + relative_path = normalize_path_maybe_ignore(path, gitignore_path, report) + if relative_path is None: + break + if pattern.match_file(relative_path): + report.path_ignored(path, "matches a .gitignore file content") + return True + return False + + +def path_is_excluded( + normalized_path: str, + pattern: Optional[Pattern[str]], +) -> bool: + match = pattern.search(normalized_path) if pattern else None + return bool(match and match.group(0)) + + +def gen_python_files( + paths: Iterable[Path], + root: Path, + include: Pattern[str], + exclude: Pattern[str], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + report: Report, + gitignore_dict: Optional[Dict[Path, PathSpec]], + *, + verbose: bool, + quiet: bool, +) -> Iterator[Path]: + """Generate all files under `path` whose paths are not excluded by the + `exclude_regex`, `extend_exclude`, or `force_exclude` regexes, + but are included by the `include` regex. + + Symbolic links pointing outside of the `root` directory are ignored. + + `report` is where output about exclusions goes. + """ + + assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" + for child in paths: + normalized_path = normalize_path_maybe_ignore(child, root, report) + if normalized_path is None: + continue + + # First ignore files matching .gitignore, if passed + if gitignore_dict and path_is_ignored(child, gitignore_dict, report): + continue + + # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. + normalized_path = "/" + normalized_path + if child.is_dir(): + normalized_path += "/" + + if path_is_excluded(normalized_path, exclude): + report.path_ignored(child, "matches the --exclude regular expression") + continue + + if path_is_excluded(normalized_path, extend_exclude): + report.path_ignored( + child, "matches the --extend-exclude regular expression" + ) + continue + + if path_is_excluded(normalized_path, force_exclude): + report.path_ignored(child, "matches the --force-exclude regular expression") + continue + + if child.is_dir(): + # If gitignore is None, gitignore usage is disabled, while a Falsey + # gitignore is when the directory doesn't have a .gitignore file. + if gitignore_dict is not None: + new_gitignore_dict = { + **gitignore_dict, + root / child: get_gitignore(child), + } + else: + new_gitignore_dict = None + yield from gen_python_files( + child.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + new_gitignore_dict, + verbose=verbose, + quiet=quiet, + ) + + elif child.is_file(): + if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + include_match = include.search(normalized_path) if include else True + if include_match: + yield child + + +def wrap_stream_for_windows( + f: io.TextIOWrapper, +) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]: + """ + Wrap stream with colorama's wrap_stream so colors are shown on Windows. + + If `colorama` is unavailable, the original stream is returned unmodified. + Otherwise, the `wrap_stream()` function determines whether the stream needs + to be wrapped for a Windows environment and will accordingly either return + an `AnsiToWin32` wrapper or the original stream. + """ + try: + from colorama.initialise import wrap_stream + except ImportError: + return f + else: + # Set `strip=False` to avoid needing to modify test_express_diff_with_color. + return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) diff --git a/.venv/lib/python3.11/site-packages/black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..825e3f2 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/handle_ipynb_magics.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/handle_ipynb_magics.py b/.venv/lib/python3.11/site-packages/black/handle_ipynb_magics.py new file mode 100644 index 0000000..9e1af75 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/handle_ipynb_magics.py @@ -0,0 +1,459 @@ +"""Functions to process IPython magics with.""" + +import ast +import collections +import dataclasses +import secrets +import sys +from functools import lru_cache +from typing import Dict, List, Optional, Tuple + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from black.output import out +from black.report import NothingChanged + +TRANSFORMED_MAGICS = frozenset( + ( + "get_ipython().run_cell_magic", + "get_ipython().system", + "get_ipython().getoutput", + "get_ipython().run_line_magic", + ) +) +TOKENS_TO_IGNORE = frozenset( + ( + "ENDMARKER", + "NL", + "NEWLINE", + "COMMENT", + "DEDENT", + "UNIMPORTANT_WS", + "ESCAPED_NL", + ) +) +PYTHON_CELL_MAGICS = frozenset( + ( + "capture", + "prun", + "pypy", + "python", + "python3", + "time", + "timeit", + ) +) +TOKEN_HEX = secrets.token_hex + + +@dataclasses.dataclass(frozen=True) +class Replacement: + mask: str + src: str + + +@lru_cache() +def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: + try: + import IPython # noqa:F401 + import tokenize_rt # noqa:F401 + except ModuleNotFoundError: + if verbose or not quiet: + msg = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + 'You can fix this by running ``pip install "black[jupyter]"``' + ) + out(msg) + return False + else: + return True + + +def remove_trailing_semicolon(src: str) -> Tuple[str, bool]: + """Remove trailing semicolon from Jupyter notebook cell. + + For example, + + fig, ax = plt.subplots() + ax.plot(x_data, y_data); # plot data + + would become + + fig, ax = plt.subplots() + ax.plot(x_data, y_data) # plot data + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + trailing_semicolon = False + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + if token.name == "OP" and token.src == ";": + del tokens[idx] + trailing_semicolon = True + break + if not trailing_semicolon: + return src, False + return tokens_to_src(tokens), True + + +def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str: + """Put trailing semicolon back if cell originally had it. + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + if not has_trailing_semicolon: + return src + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + tokens[idx] = token._replace(src=token.src + ";") + break + else: # pragma: nocover + raise AssertionError( + "INTERNAL ERROR: Was not able to reinstate trailing semicolon. " + "Please report a bug on https://github.com/psf/black/issues. " + ) from None + return str(tokens_to_src(tokens)) + + +def mask_cell(src: str) -> Tuple[str, List[Replacement]]: + """Mask IPython magics so content becomes parseable Python code. + + For example, + + %matplotlib inline + 'foo' + + becomes + + "25716f358c32750e" + 'foo' + + The replacements are returned, along with the transformed code. + """ + replacements: List[Replacement] = [] + try: + ast.parse(src) + except SyntaxError: + # Might have IPython magics, will process below. + pass + else: + # Syntax is fine, nothing to mask, early return. + return src, replacements + + from IPython.core.inputtransformer2 import TransformerManager + + transformer_manager = TransformerManager() + transformed = transformer_manager.transform_cell(src) + transformed, cell_magic_replacements = replace_cell_magics(transformed) + replacements += cell_magic_replacements + transformed = transformer_manager.transform_cell(transformed) + transformed, magic_replacements = replace_magics(transformed) + if len(transformed.splitlines()) != len(src.splitlines()): + # Multi-line magic, not supported. + raise NothingChanged + replacements += magic_replacements + return transformed, replacements + + +def get_token(src: str, magic: str) -> str: + """Return randomly generated token to mask IPython magic with. + + For example, if 'magic' was `%matplotlib inline`, then a possible + token to mask it with would be `"43fdd17f7e5ddc83"`. The token + will be the same length as the magic, and we make sure that it was + not already present anywhere else in the cell. + """ + assert magic + nbytes = max(len(magic) // 2 - 1, 1) + token = TOKEN_HEX(nbytes) + counter = 0 + while token in src: + token = TOKEN_HEX(nbytes) + counter += 1 + if counter > 100: + raise AssertionError( + "INTERNAL ERROR: Black was not able to replace IPython magic. " + "Please report a bug on https://github.com/psf/black/issues. " + f"The magic might be helpful: {magic}" + ) from None + if len(token) + 2 < len(magic): + token = f"{token}." + return f'"{token}"' + + +def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace cell magic with token. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, + + get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n') + + becomes + + "a794." + ls =!ls + + The replacement, along with the transformed code, is returned. + """ + replacements: List[Replacement] = [] + + tree = ast.parse(src) + + cell_magic_finder = CellMagicFinder() + cell_magic_finder.visit(tree) + if cell_magic_finder.cell_magic is None: + return src, replacements + header = cell_magic_finder.cell_magic.header + mask = get_token(src, header) + replacements.append(Replacement(mask=mask, src=header)) + return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements + + +def replace_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace magics within body of cell. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, this + + get_ipython().run_line_magic('matplotlib', 'inline') + 'foo' + + becomes + + "5e67db56d490fd39" + 'foo' + + The replacement, along with the transformed code, are returned. + """ + replacements = [] + magic_finder = MagicFinder() + magic_finder.visit(ast.parse(src)) + new_srcs = [] + for i, line in enumerate(src.splitlines(), start=1): + if i in magic_finder.magics: + offsets_and_magics = magic_finder.magics[i] + if len(offsets_and_magics) != 1: # pragma: nocover + raise AssertionError( + f"Expecting one magic per line, got: {offsets_and_magics}\n" + "Please report a bug on https://github.com/psf/black/issues." + ) + col_offset, magic = ( + offsets_and_magics[0].col_offset, + offsets_and_magics[0].magic, + ) + mask = get_token(src, magic) + replacements.append(Replacement(mask=mask, src=magic)) + line = line[:col_offset] + mask + new_srcs.append(line) + return "\n".join(new_srcs), replacements + + +def unmask_cell(src: str, replacements: List[Replacement]) -> str: + """Remove replacements from cell. + + For example + + "9b20" + foo = bar + + becomes + + %%time + foo = bar + """ + for replacement in replacements: + src = src.replace(replacement.mask, replacement.src) + return src + + +def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: + """Check if attribute is IPython magic. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + """ + return ( + isinstance(node, ast.Attribute) + and isinstance(node.value, ast.Call) + and isinstance(node.value.func, ast.Name) + and node.value.func.id == "get_ipython" + ) + + +def _get_str_args(args: List[ast.expr]) -> List[str]: + str_args = [] + for arg in args: + assert isinstance(arg, ast.Str) + str_args.append(arg.s) + return str_args + + +@dataclasses.dataclass(frozen=True) +class CellMagic: + name: str + params: Optional[str] + body: str + + @property + def header(self) -> str: + if self.params: + return f"%%{self.name} {self.params}" + return f"%%{self.name}" + + +# ast.NodeVisitor + dataclass = breakage under mypyc. +class CellMagicFinder(ast.NodeVisitor): + """Find cell magics. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %%time\nfoo() + + would have been transformed to + + get_ipython().run_cell_magic('time', '', 'foo()\\n') + + and we look for instances of the latter. + """ + + def __init__(self, cell_magic: Optional[CellMagic] = None) -> None: + self.cell_magic = cell_magic + + def visit_Expr(self, node: ast.Expr) -> None: + """Find cell magic, extract header and body.""" + if ( + isinstance(node.value, ast.Call) + and _is_ipython_magic(node.value.func) + and node.value.func.attr == "run_cell_magic" + ): + args = _get_str_args(node.value.args) + self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2]) + self.generic_visit(node) + + +@dataclasses.dataclass(frozen=True) +class OffsetAndMagic: + col_offset: int + magic: str + + +# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here +# as mypyc will generate broken code. +class MagicFinder(ast.NodeVisitor): + """Visit cell to look for get_ipython calls. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %matplotlib inline + + would have been transformed to + + get_ipython().run_line_magic('matplotlib', 'inline') + + and we look for instances of the latter (and likewise for other + types of magics). + """ + + def __init__(self) -> None: + self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list) + + def visit_Assign(self, node: ast.Assign) -> None: + """Look for system assign magics. + + For example, + + black_version = !black --version + env = %env var + + would have been (respectively) transformed to + + black_version = get_ipython().getoutput('black --version') + env = get_ipython().run_line_magic('env', 'var') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "getoutput": + src = f"!{args[0]}" + elif node.value.func.attr == "run_line_magic": + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + else: + raise AssertionError( + f"Unexpected IPython magic {node.value.func.attr!r} found. " + "Please report a bug on https://github.com/psf/black/issues." + ) from None + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) + + def visit_Expr(self, node: ast.Expr) -> None: + """Look for magics in body of cell. + + For examples, + + !ls + !!ls + ?ls + ??ls + + would (respectively) get transformed to + + get_ipython().system('ls') + get_ipython().getoutput('ls') + get_ipython().run_line_magic('pinfo', 'ls') + get_ipython().run_line_magic('pinfo2', 'ls') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "run_line_magic": + if args[0] == "pinfo": + src = f"?{args[1]}" + elif args[0] == "pinfo2": + src = f"??{args[1]}" + else: + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + elif node.value.func.attr == "system": + src = f"!{args[0]}" + elif node.value.func.attr == "getoutput": + src = f"!!{args[0]}" + else: + raise NothingChanged # unsupported magic. + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) diff --git a/.venv/lib/python3.11/site-packages/black/linegen.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/linegen.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..063de20 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/linegen.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/linegen.py b/.venv/lib/python3.11/site-packages/black/linegen.py new file mode 100644 index 0000000..7afb173 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/linegen.py @@ -0,0 +1,1517 @@ +""" +Generating lines of code. +""" +import sys +from dataclasses import dataclass +from enum import Enum, auto +from functools import partial, wraps +from typing import Collection, Iterator, List, Optional, Set, Union, cast + +from black.brackets import ( + COMMA_PRIORITY, + DOT_PRIORITY, + get_leaves_inside_matching_brackets, + max_delimiter_priority_in_atom, +) +from black.comments import FMT_OFF, generate_comments, list_comments +from black.lines import ( + Line, + append_leaves, + can_be_split, + can_omit_invisible_parens, + is_line_short_enough, + line_to_string, +) +from black.mode import Feature, Mode, Preview +from black.nodes import ( + ASSIGNMENTS, + BRACKETS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + RARROW, + STANDALONE_COMMENT, + STATEMENT, + WHITESPACE, + Visitor, + ensure_visible, + is_arith_like, + is_atom_with_invisible_parens, + is_docstring, + is_empty_tuple, + is_lpar_token, + is_multiline_string, + is_name_token, + is_one_sequence_between, + is_one_tuple, + is_rpar_token, + is_stub_body, + is_stub_suite, + is_tuple_containing_walrus, + is_vararg, + is_walrus_assignment, + is_yield, + syms, + wrap_in_parentheses, +) +from black.numerics import normalize_numeric_literal +from black.strings import ( + fix_docstring, + get_string_prefix, + normalize_string_prefix, + normalize_string_quotes, + normalize_unicode_escape_sequences, +) +from black.trans import ( + CannotTransform, + StringMerger, + StringParenStripper, + StringParenWrapper, + StringSplitter, + Transformer, + hug_power_op, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LeafID = int +LN = Union[Leaf, Node] + + +class CannotSplit(CannotTransform): + """A readable split that fits the allotted line length is impossible.""" + + +# This isn't a dataclass because @dataclass + Generic breaks mypyc. +# See also https://github.com/mypyc/mypyc/issues/827. +class LineGenerator(Visitor[Line]): + """Generates reformatted Line objects. Empty lines are not emitted. + + Note: destroys the tree it's visiting by mutating prefixes of its leaves + in ways that will no longer stringify to valid Python code on the tree. + """ + + def __init__(self, mode: Mode, features: Collection[Feature]) -> None: + self.mode = mode + self.features = features + self.current_line: Line + self.__post_init__() + + def line(self, indent: int = 0) -> Iterator[Line]: + """Generate a line. + + If the line is empty, only emit if it makes sense. + If the line is too long, split it first and then generate. + + If any lines were generated, set up a new current_line. + """ + if not self.current_line: + self.current_line.depth += indent + return # Line is empty, don't emit. Creating a new one unnecessary. + + complete_line = self.current_line + self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) + yield complete_line + + def visit_default(self, node: LN) -> Iterator[Line]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Leaf): + any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() + for comment in generate_comments(node): + if any_open_brackets: + # any comment within brackets is subject to splitting + self.current_line.append(comment) + elif comment.type == token.COMMENT: + # regular trailing comment + self.current_line.append(comment) + yield from self.line() + + else: + # regular standalone comment + yield from self.line() + + self.current_line.append(comment) + yield from self.line() + + normalize_prefix(node, inside_brackets=any_open_brackets) + if self.mode.string_normalization and node.type == token.STRING: + node.value = normalize_string_prefix(node.value) + node.value = normalize_string_quotes(node.value) + if node.type == token.NUMBER: + normalize_numeric_literal(node) + if node.type not in WHITESPACE: + self.current_line.append(node) + yield from super().visit_default(node) + + def visit_test(self, node: Node) -> Iterator[Line]: + """Visit an `x if y else z` test""" + + if Preview.parenthesize_conditional_expressions in self.mode: + already_parenthesized = ( + node.prev_sibling and node.prev_sibling.type == token.LPAR + ) + + if not already_parenthesized: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + node.insert_child(0, lpar) + node.append_child(rpar) + + yield from self.visit_default(node) + + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: + """Increase indentation level, maybe yield a line.""" + # In blib2to3 INDENT never holds comments. + yield from self.line(+1) + yield from self.visit_default(node) + + def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: + """Decrease indentation level, maybe yield a line.""" + # The current line might still wait for trailing comments. At DEDENT time + # there won't be any (they would be prefixes on the preceding NEWLINE). + # Emit the line then. + yield from self.line() + + # While DEDENT has no value, its prefix may contain standalone comments + # that belong to the current indentation level. Get 'em. + yield from self.visit_default(node) + + # Finally, emit the dedent. + yield from self.line(-1) + + def visit_stmt( + self, node: Node, keywords: Set[str], parens: Set[str] + ) -> Iterator[Line]: + """Visit a statement. + + This implementation is shared for `if`, `while`, `for`, `try`, `except`, + `def`, `with`, `class`, `assert`, and assignments. + + The relevant Python language `keywords` for a given statement will be + NAME leaves within it. This methods puts those on a separate line. + + `parens` holds a set of string leaf values immediately after which + invisible parens should be put. + """ + normalize_invisible_parens( + node, parens_after=parens, mode=self.mode, features=self.features + ) + for child in node.children: + if is_name_token(child) and child.value in keywords: + yield from self.line() + + yield from self.visit(child) + + def visit_dictsetmaker(self, node: Node) -> Iterator[Line]: + if Preview.wrap_long_dict_values_in_parens in self.mode: + for i, child in enumerate(node.children): + if i == 0: + continue + if node.children[i - 1].type == token.COLON: + if child.type == syms.atom and child.children[0].type == token.LPAR: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=False, + ): + wrap_in_parentheses(node, child, visible=False) + else: + wrap_in_parentheses(node, child, visible=False) + yield from self.visit_default(node) + + def visit_funcdef(self, node: Node) -> Iterator[Line]: + """Visit function definition.""" + yield from self.line() + + # Remove redundant brackets around return type annotation. + is_return_annotation = False + for child in node.children: + if child.type == token.RARROW: + is_return_annotation = True + elif is_return_annotation: + if child.type == syms.atom and child.children[0].type == token.LPAR: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=False, + ): + wrap_in_parentheses(node, child, visible=False) + else: + wrap_in_parentheses(node, child, visible=False) + is_return_annotation = False + + for child in node.children: + yield from self.visit(child) + + def visit_match_case(self, node: Node) -> Iterator[Line]: + """Visit either a match or case statement.""" + normalize_invisible_parens( + node, parens_after=set(), mode=self.mode, features=self.features + ) + + yield from self.line() + for child in node.children: + yield from self.visit(child) + + def visit_suite(self, node: Node) -> Iterator[Line]: + """Visit a suite.""" + if self.mode.is_pyi and is_stub_suite(node): + yield from self.visit(node.children[2]) + else: + yield from self.visit_default(node) + + def visit_simple_stmt(self, node: Node) -> Iterator[Line]: + """Visit a statement without nested statements.""" + prev_type: Optional[int] = None + for child in node.children: + if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child): + wrap_in_parentheses(node, child, visible=False) + prev_type = child.type + + is_suite_like = node.parent and node.parent.type in STATEMENT + if is_suite_like: + if self.mode.is_pyi and is_stub_body(node): + yield from self.visit_default(node) + else: + yield from self.line(+1) + yield from self.visit_default(node) + yield from self.line(-1) + + else: + if ( + not self.mode.is_pyi + or not node.parent + or not is_stub_suite(node.parent) + ): + yield from self.line() + yield from self.visit_default(node) + + def visit_async_stmt(self, node: Node) -> Iterator[Line]: + """Visit `async def`, `async for`, `async with`.""" + yield from self.line() + + children = iter(node.children) + for child in children: + yield from self.visit(child) + + if child.type == token.ASYNC or child.type == STANDALONE_COMMENT: + # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async + # line. + break + + internal_stmt = next(children) + for child in internal_stmt.children: + yield from self.visit(child) + + def visit_decorators(self, node: Node) -> Iterator[Line]: + """Visit decorators.""" + for child in node.children: + yield from self.line() + yield from self.visit(child) + + def visit_power(self, node: Node) -> Iterator[Line]: + for idx, leaf in enumerate(node.children[:-1]): + next_leaf = node.children[idx + 1] + + if not isinstance(leaf, Leaf): + continue + + value = leaf.value.lower() + if ( + leaf.type == token.NUMBER + and next_leaf.type == syms.trailer + # Ensure that we are in an attribute trailer + and next_leaf.children[0].type == token.DOT + # It shouldn't wrap hexadecimal, binary and octal literals + and not value.startswith(("0x", "0b", "0o")) + # It shouldn't wrap complex literals + and "j" not in value + ): + wrap_in_parentheses(node, leaf) + + remove_await_parens(node) + + yield from self.visit_default(node) + + def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: + """Remove a semicolon and put the other statement on a separate line.""" + yield from self.line() + + def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: + """End of file. Process outstanding comments and end with a newline.""" + yield from self.visit_default(leaf) + yield from self.line() + + def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: + if not self.current_line.bracket_tracker.any_open_brackets(): + yield from self.line() + yield from self.visit_default(leaf) + + def visit_factor(self, node: Node) -> Iterator[Line]: + """Force parentheses between a unary op and a binary power: + + -2 ** 8 -> -(2 ** 8) + """ + _operator, operand = node.children + if ( + operand.type == syms.power + and len(operand.children) == 3 + and operand.children[1].type == token.DOUBLESTAR + ): + lpar = Leaf(token.LPAR, "(") + rpar = Leaf(token.RPAR, ")") + index = operand.remove() or 0 + node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) + yield from self.visit_default(node) + + def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + if Preview.hex_codes_in_unicode_sequences in self.mode: + normalize_unicode_escape_sequences(leaf) + + if is_docstring(leaf) and "\\\n" not in leaf.value: + # We're ignoring docstrings with backslash newline escapes because changing + # indentation of those changes the AST representation of the code. + if self.mode.string_normalization: + docstring = normalize_string_prefix(leaf.value) + # visit_default() does handle string normalization for us, but + # since this method acts differently depending on quote style (ex. + # see padding logic below), there's a possibility for unstable + # formatting as visit_default() is called *after*. To avoid a + # situation where this function formats a docstring differently on + # the second pass, normalize it early. + docstring = normalize_string_quotes(docstring) + else: + docstring = leaf.value + prefix = get_string_prefix(docstring) + docstring = docstring[len(prefix) :] # Remove the prefix + quote_char = docstring[0] + # A natural way to remove the outer quotes is to do: + # docstring = docstring.strip(quote_char) + # but that breaks on """""x""" (which is '""x'). + # So we actually need to remove the first character and the next two + # characters but only if they are the same as the first. + quote_len = 1 if docstring[1] != quote_char else 3 + docstring = docstring[quote_len:-quote_len] + docstring_started_empty = not docstring + indent = " " * 4 * self.current_line.depth + + if is_multiline_string(leaf): + docstring = fix_docstring(docstring, indent) + else: + docstring = docstring.strip() + + has_trailing_backslash = False + if docstring: + # Add some padding if the docstring starts / ends with a quote mark. + if docstring[0] == quote_char: + docstring = " " + docstring + if docstring[-1] == quote_char: + docstring += " " + if docstring[-1] == "\\": + backslash_count = len(docstring) - len(docstring.rstrip("\\")) + if backslash_count % 2: + # Odd number of tailing backslashes, add some padding to + # avoid escaping the closing string quote. + docstring += " " + has_trailing_backslash = True + elif not docstring_started_empty: + docstring = " " + + # We could enforce triple quotes at this point. + quote = quote_char * quote_len + + # It's invalid to put closing single-character quotes on a new line. + if self.mode and quote_len == 3: + # We need to find the length of the last line of the docstring + # to find if we can add the closing quotes to the line without + # exceeding the maximum line length. + # If docstring is one line, we don't put the closing quotes on a + # separate line because it looks ugly (#3320). + lines = docstring.splitlines() + last_line_length = len(lines[-1]) if docstring else 0 + + # If adding closing quotes would cause the last line to exceed + # the maximum line length then put a line break before the + # closing quotes + if ( + len(lines) > 1 + and last_line_length + quote_len > self.mode.line_length + and len(indent) + quote_len <= self.mode.line_length + and not has_trailing_backslash + ): + leaf.value = prefix + quote + docstring + "\n" + indent + quote + else: + leaf.value = prefix + quote + docstring + quote + else: + leaf.value = prefix + quote + docstring + quote + + yield from self.visit_default(leaf) + + def __post_init__(self) -> None: + """You are in a twisty little maze of passages.""" + self.current_line = Line(mode=self.mode) + + v = self.visit_stmt + Ø: Set[str] = set() + self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) + self.visit_if_stmt = partial( + v, keywords={"if", "else", "elif"}, parens={"if", "elif"} + ) + self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) + self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) + self.visit_try_stmt = partial( + v, keywords={"try", "except", "else", "finally"}, parens=Ø + ) + self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"}) + self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) + self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) + self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) + self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) + self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) + self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) + self.visit_async_funcdef = self.visit_async_stmt + self.visit_decorated = self.visit_decorators + + # PEP 634 + self.visit_match_stmt = self.visit_match_case + self.visit_case_block = self.visit_match_case + + +def transform_line( + line: Line, mode: Mode, features: Collection[Feature] = () +) -> Iterator[Line]: + """Transform a `line`, potentially splitting it into many lines. + + They should fit in the allotted `line_length` but might not be able to. + + `features` are syntactical features that may be used in the output. + """ + if line.is_comment: + yield line + return + + line_str = line_to_string(line) + + ll = mode.line_length + sn = mode.string_normalization + string_merge = StringMerger(ll, sn) + string_paren_strip = StringParenStripper(ll, sn) + string_split = StringSplitter(ll, sn) + string_paren_wrap = StringParenWrapper(ll, sn) + + transformers: List[Transformer] + if ( + not line.contains_uncollapsable_type_comments() + and not line.should_split_rhs + and not line.magic_trailing_comma + and ( + is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) + or line.contains_unsplittable_type_ignore() + ) + and not (line.inside_brackets and line.contains_standalone_comments()) + ): + # Only apply basic string preprocessing, since lines shouldn't be split here. + if Preview.string_processing in mode: + transformers = [string_merge, string_paren_strip] + else: + transformers = [] + elif line.is_def: + transformers = [left_hand_split] + else: + + def _rhs( + self: object, line: Line, features: Collection[Feature] + ) -> Iterator[Line]: + """Wraps calls to `right_hand_split`. + + The calls increasingly `omit` right-hand trailers (bracket pairs with + content), meaning the trailers get glued together to split on another + bracket pair instead. + """ + for omit in generate_trailers_to_omit(line, mode.line_length): + lines = list( + right_hand_split(line, mode.line_length, features, omit=omit) + ) + # Note: this check is only able to figure out if the first line of the + # *current* transformation fits in the line length. This is true only + # for simple cases. All others require running more transforms via + # `transform_line()`. This check doesn't know if those would succeed. + if is_line_short_enough(lines[0], line_length=mode.line_length): + yield from lines + return + + # All splits failed, best effort split with no omits. + # This mostly happens to multiline strings that are by definition + # reported as not fitting a single line, as well as lines that contain + # trailing commas (those have to be exploded). + yield from right_hand_split( + line, line_length=mode.line_length, features=features + ) + + # HACK: nested functions (like _rhs) compiled by mypyc don't retain their + # __name__ attribute which is needed in `run_transformer` further down. + # Unfortunately a nested class breaks mypyc too. So a class must be created + # via type ... https://github.com/mypyc/mypyc/issues/884 + rhs = type("rhs", (), {"__call__": _rhs})() + + if Preview.string_processing in mode: + if line.inside_brackets: + transformers = [ + string_merge, + string_paren_strip, + string_split, + delimiter_split, + standalone_comment_split, + string_paren_wrap, + rhs, + ] + else: + transformers = [ + string_merge, + string_paren_strip, + string_split, + string_paren_wrap, + rhs, + ] + else: + if line.inside_brackets: + transformers = [delimiter_split, standalone_comment_split, rhs] + else: + transformers = [rhs] + # It's always safe to attempt hugging of power operations and pretty much every line + # could match. + transformers.append(hug_power_op) + + for transform in transformers: + # We are accumulating lines in `result` because we might want to abort + # mission and return the original line in the end, or attempt a different + # split altogether. + try: + result = run_transformer(line, transform, mode, features, line_str=line_str) + except CannotTransform: + continue + else: + yield from result + break + + else: + yield line + + +class _BracketSplitComponent(Enum): + head = auto() + body = auto() + tail = auto() + + +def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: + """Split line into many lines, starting with the first matching bracket pair. + + Note: this usually looks weird, only use this for function definitions. + Prefer RHS otherwise. This is why this function is not symmetrical with + :func:`right_hand_split` which also handles optional parentheses. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = head_leaves + matching_bracket: Optional[Leaf] = None + for leaf in line.leaves: + if ( + current_leaves is body_leaves + and leaf.type in CLOSING_BRACKETS + and leaf.opening_bracket is matching_bracket + and isinstance(matching_bracket, Leaf) + ): + ensure_visible(leaf) + ensure_visible(matching_bracket) + current_leaves = tail_leaves if body_leaves else head_leaves + current_leaves.append(leaf) + if current_leaves is head_leaves: + if leaf.type in OPENING_BRACKETS: + matching_bracket = leaf + current_leaves = body_leaves + if not matching_bracket: + raise CannotSplit("No brackets found") + + head = bracket_split_build_line( + head_leaves, line, matching_bracket, component=_BracketSplitComponent.head + ) + body = bracket_split_build_line( + body_leaves, line, matching_bracket, component=_BracketSplitComponent.body + ) + tail = bracket_split_build_line( + tail_leaves, line, matching_bracket, component=_BracketSplitComponent.tail + ) + bracket_split_succeeded_or_raise(head, body, tail) + for result in (head, body, tail): + if result: + yield result + + +@dataclass +class _RHSResult: + """Intermediate split result from a right hand split.""" + + head: Line + body: Line + tail: Line + opening_bracket: Leaf + closing_bracket: Leaf + + +def right_hand_split( + line: Line, + line_length: int, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + """Split line into many lines, starting with the last matching bracket pair. + + If the split was by optional parentheses, attempt splitting without them, too. + `omit` is a collection of closing bracket IDs that shouldn't be considered for + this split. + + Note: running this function modifies `bracket_depth` on the leaves of `line`. + """ + rhs_result = _first_right_hand_split(line, omit=omit) + yield from _maybe_split_omitting_optional_parens( + rhs_result, line, line_length, features=features, omit=omit + ) + + +def _first_right_hand_split( + line: Line, + omit: Collection[LeafID] = (), +) -> _RHSResult: + """Split the line into head, body, tail starting with the last bracket pair. + + Note: this function should not have side effects. It's relied upon by + _maybe_split_omitting_optional_parens to get an opinion whether to prefer + splitting on the right side of an assignment statement. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = tail_leaves + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + for leaf in reversed(line.leaves): + if current_leaves is body_leaves: + if leaf is opening_bracket: + current_leaves = head_leaves if body_leaves else tail_leaves + current_leaves.append(leaf) + if current_leaves is tail_leaves: + if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + current_leaves = body_leaves + if not (opening_bracket and closing_bracket and head_leaves): + # If there is no opening or closing_bracket that means the split failed and + # all content is in the tail. Otherwise, if `head_leaves` are empty, it means + # the matching `opening_bracket` wasn't available on `line` anymore. + raise CannotSplit("No brackets found") + + tail_leaves.reverse() + body_leaves.reverse() + head_leaves.reverse() + head = bracket_split_build_line( + head_leaves, line, opening_bracket, component=_BracketSplitComponent.head + ) + body = bracket_split_build_line( + body_leaves, line, opening_bracket, component=_BracketSplitComponent.body + ) + tail = bracket_split_build_line( + tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail + ) + bracket_split_succeeded_or_raise(head, body, tail) + return _RHSResult(head, body, tail, opening_bracket, closing_bracket) + + +def _maybe_split_omitting_optional_parens( + rhs: _RHSResult, + line: Line, + line_length: int, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + if ( + Feature.FORCE_OPTIONAL_PARENTHESES not in features + # the opening bracket is an optional paren + and rhs.opening_bracket.type == token.LPAR + and not rhs.opening_bracket.value + # the closing bracket is an optional paren + and rhs.closing_bracket.type == token.RPAR + and not rhs.closing_bracket.value + # it's not an import (optional parens are the only thing we can split on + # in this case; attempting a split without them is a waste of time) + and not line.is_import + # there are no standalone comments in the body + and not rhs.body.contains_standalone_comments(0) + # and we can actually remove the parens + and can_omit_invisible_parens(rhs.body, line_length) + ): + omit = {id(rhs.closing_bracket), *omit} + try: + # The _RHSResult Omitting Optional Parens. + rhs_oop = _first_right_hand_split(line, omit=omit) + if not ( + Preview.prefer_splitting_right_hand_side_of_assignments in line.mode + # the split is right after `=` + and len(rhs.head.leaves) >= 2 + and rhs.head.leaves[-2].type == token.EQUAL + # the left side of assignement contains brackets + and any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]) + # the left side of assignment is short enough (the -1 is for the ending + # optional paren) + and is_line_short_enough(rhs.head, line_length=line_length - 1) + # the left side of assignment won't explode further because of magic + # trailing comma + and rhs.head.magic_trailing_comma is None + # the split by omitting optional parens isn't preferred by some other + # reason + and not _prefer_split_rhs_oop(rhs_oop, line_length=line_length) + ): + yield from _maybe_split_omitting_optional_parens( + rhs_oop, line, line_length, features=features, omit=omit + ) + return + + except CannotSplit as e: + if not ( + can_be_split(rhs.body) + or is_line_short_enough(rhs.body, line_length=line_length) + ): + raise CannotSplit( + "Splitting failed, body is still too long and can't be split." + ) from e + + elif ( + rhs.head.contains_multiline_strings() + or rhs.tail.contains_multiline_strings() + ): + raise CannotSplit( + "The current optional pair of parentheses is bound to fail to" + " satisfy the splitting algorithm because the head or the tail" + " contains multiline strings which by definition never fit one" + " line." + ) from e + + ensure_visible(rhs.opening_bracket) + ensure_visible(rhs.closing_bracket) + for result in (rhs.head, rhs.body, rhs.tail): + if result: + yield result + + +def _prefer_split_rhs_oop(rhs_oop: _RHSResult, line_length: int) -> bool: + """ + Returns whether we should prefer the result from a split omitting optional parens. + """ + has_closing_bracket_after_assign = False + for leaf in reversed(rhs_oop.head.leaves): + if leaf.type == token.EQUAL: + break + if leaf.type in CLOSING_BRACKETS: + has_closing_bracket_after_assign = True + break + return ( + # contains matching brackets after the `=` (done by checking there is a + # closing bracket) + has_closing_bracket_after_assign + or ( + # the split is actually from inside the optional parens (done by checking + # the first line still contains the `=`) + any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves) + # the first line is short enough + and is_line_short_enough(rhs_oop.head, line_length=line_length) + ) + # contains unsplittable type ignore + or rhs_oop.head.contains_unsplittable_type_ignore() + or rhs_oop.body.contains_unsplittable_type_ignore() + or rhs_oop.tail.contains_unsplittable_type_ignore() + ) + + +def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: + """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. + + Do nothing otherwise. + + A left- or right-hand split is based on a pair of brackets. Content before + (and including) the opening bracket is left on one line, content inside the + brackets is put on a separate line, and finally content starting with and + following the closing bracket is put on a separate line. + + Those are called `head`, `body`, and `tail`, respectively. If the split + produced the same line (all content in `head`) or ended up with an empty `body` + and the `tail` is just the closing bracket, then it's considered failed. + """ + tail_len = len(str(tail).strip()) + if not body: + if tail_len == 0: + raise CannotSplit("Splitting brackets produced the same line") + + elif tail_len < 3: + raise CannotSplit( + f"Splitting brackets on an empty body to save {tail_len} characters is" + " not worth it" + ) + + +def bracket_split_build_line( + leaves: List[Leaf], + original: Line, + opening_bracket: Leaf, + *, + component: _BracketSplitComponent, +) -> Line: + """Return a new line with given `leaves` and respective comments from `original`. + + If it's the head component, brackets will be tracked so trailing commas are + respected. + + If it's the body component, the result line is one-indented inside brackets and as + such has its first leaf's prefix normalized and a trailing comma added when + expected. + """ + result = Line(mode=original.mode, depth=original.depth) + if component is _BracketSplitComponent.body: + result.inside_brackets = True + result.depth += 1 + if leaves: + # Since body is a new indent level, remove spurious leading whitespace. + normalize_prefix(leaves[0], inside_brackets=True) + # Ensure a trailing comma for imports and standalone function arguments, but + # be careful not to add one after any comments or within type annotations. + no_commas = ( + original.is_def + and opening_bracket.value == "(" + and not any(leaf.type == token.COMMA for leaf in leaves) + # In particular, don't add one within a parenthesized return annotation. + # Unfortunately the indicator we're in a return annotation (RARROW) may + # be defined directly in the parent node, the parent of the parent ... + # and so on depending on how complex the return annotation is. + # This isn't perfect and there's some false negatives but they are in + # contexts were a comma is actually fine. + and not any( + node.prev_sibling.type == RARROW + for node in ( + leaves[0].parent, + getattr(leaves[0].parent, "parent", None), + ) + if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf) + ) + ) + + if original.is_import or no_commas: + for i in range(len(leaves) - 1, -1, -1): + if leaves[i].type == STANDALONE_COMMENT: + continue + + if leaves[i].type != token.COMMA: + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) + break + + leaves_to_track: Set[LeafID] = set() + if component is _BracketSplitComponent.head: + leaves_to_track = get_leaves_inside_matching_brackets(leaves) + # Populate the line + for leaf in leaves: + result.append( + leaf, + preformatted=True, + track_bracket=id(leaf) in leaves_to_track, + ) + for comment_after in original.comments_after(leaf): + result.append(comment_after, preformatted=True) + if component is _BracketSplitComponent.body and should_split_line( + result, opening_bracket + ): + result.should_split_rhs = True + return result + + +def dont_increase_indentation(split_func: Transformer) -> Transformer: + """Normalize prefix of the first leaf in every line returned by `split_func`. + + This is a decorator over relevant split functions. + """ + + @wraps(split_func) + def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + for split_line in split_func(line, features): + normalize_prefix(split_line.leaves[0], inside_brackets=True) + yield split_line + + return split_wrapper + + +@dont_increase_indentation +def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + """Split according to delimiters of the highest priority. + + If the appropriate Features are given, the split will add trailing commas + also in function signatures and calls that contain `*` and `**`. + """ + try: + last_leaf = line.leaves[-1] + except IndexError: + raise CannotSplit("Line empty") from None + + bt = line.bracket_tracker + try: + delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) + except ValueError: + raise CannotSplit("No delimiters found") from None + + if delimiter_priority == DOT_PRIORITY: + if bt.delimiter_count_with_priority(delimiter_priority) == 1: + raise CannotSplit("Splitting a single attribute from its owner looks wrong") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + lowest_depth = sys.maxsize + trailing_comma_safe = True + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + lowest_depth = min(lowest_depth, leaf.bracket_depth) + if leaf.bracket_depth == lowest_depth: + if is_vararg(leaf, within={syms.typedargslist}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features + ) + elif is_vararg(leaf, within={syms.arglist, syms.argument}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features + ) + + leaf_priority = bt.delimiters.get(id(leaf)) + if leaf_priority == delimiter_priority: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + if current_line: + if ( + trailing_comma_safe + and delimiter_priority == COMMA_PRIORITY + and current_line.leaves[-1].type != token.COMMA + and current_line.leaves[-1].type != STANDALONE_COMMENT + ): + new_comma = Leaf(token.COMMA, ",") + current_line.append(new_comma) + yield current_line + + +@dont_increase_indentation +def standalone_comment_split( + line: Line, features: Collection[Feature] = () +) -> Iterator[Line]: + """Split standalone comments from the rest of the line.""" + if not line.contains_standalone_comments(0): + raise CannotSplit("Line does not have any standalone comments") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + if current_line: + yield current_line + + +def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: + """Leave existing extra newlines if not `inside_brackets`. Remove everything + else. + + Note: don't use backslashes for formatting or you'll lose your voting rights. + """ + if not inside_brackets: + spl = leaf.prefix.split("#") + if "\\" not in spl[0]: + nl_count = spl[-1].count("\n") + if len(spl) > 1: + nl_count -= 1 + leaf.prefix = "\n" * nl_count + return + + leaf.prefix = "" + + +def normalize_invisible_parens( + node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature] +) -> None: + """Make existing optional parentheses invisible or create new ones. + + `parens_after` is a set of string leaf values immediately after which parens + should be put. + + Standardizes on visible parentheses for single-element tuples, and keeps + existing visible parentheses for other tuples and generator expressions. + """ + for pc in list_comments(node.prefix, is_endmarker=False): + if pc.value in FMT_OFF: + # This `node` has a prefix with `# fmt: off`, don't mess with parens. + return + + # The multiple context managers grammar has a different pattern, thus this is + # separate from the for-loop below. This possibly wraps them in invisible parens, + # and later will be removed in remove_with_parens when needed. + if node.type == syms.with_stmt: + _maybe_wrap_cms_in_parens(node, mode, features) + + check_lpar = False + for index, child in enumerate(list(node.children)): + # Fixes a bug where invisible parens are not properly stripped from + # assignment statements that contain type annotations. + if isinstance(child, Node) and child.type == syms.annassign: + normalize_invisible_parens( + child, parens_after=parens_after, mode=mode, features=features + ) + + # Add parentheses around long tuple unpacking in assignments. + if ( + index == 0 + and isinstance(child, Node) + and child.type == syms.testlist_star_expr + ): + check_lpar = True + + if check_lpar: + if ( + child.type == syms.atom + and node.type == syms.for_stmt + and isinstance(child.prev_sibling, Leaf) + and child.prev_sibling.type == token.NAME + and child.prev_sibling.value == "for" + ): + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, child, visible=False) + elif isinstance(child, Node) and node.type == syms.with_stmt: + remove_with_parens(child, node) + elif child.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + ): + wrap_in_parentheses(node, child, visible=False) + elif is_one_tuple(child): + wrap_in_parentheses(node, child, visible=True) + elif node.type == syms.import_from: + _normalize_import_from(node, child, index) + break + elif ( + index == 1 + and child.type == token.STAR + and node.type == syms.except_clause + ): + # In except* (PEP 654), the star is actually part of + # of the keyword. So we need to skip the insertion of + # invisible parentheses to work more precisely. + continue + + elif not (isinstance(child, Leaf) and is_multiline_string(child)): + wrap_in_parentheses(node, child, visible=False) + + comma_check = child.type == token.COMMA + + check_lpar = isinstance(child, Leaf) and ( + child.value in parens_after or comma_check + ) + + +def _normalize_import_from(parent: Node, child: LN, index: int) -> None: + # "import from" nodes store parentheses directly as part of + # the statement + if is_lpar_token(child): + assert is_rpar_token(parent.children[-1]) + # make parentheses invisible + child.value = "" + parent.children[-1].value = "" + elif child.type != token.STAR: + # insert invisible parentheses + parent.insert_child(index, Leaf(token.LPAR, "")) + parent.append_child(Leaf(token.RPAR, "")) + + +def remove_await_parens(node: Node) -> None: + if node.children[0].type == token.AWAIT and len(node.children) > 1: + if ( + node.children[1].type == syms.atom + and node.children[1].children[0].type == token.LPAR + ): + if maybe_make_parens_invisible_in_atom( + node.children[1], + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[1], visible=False) + + # Since await is an expression we shouldn't remove + # brackets in cases where this would change + # the AST due to operator precedence. + # Therefore we only aim to remove brackets around + # power nodes that aren't also await expressions themselves. + # https://peps.python.org/pep-0492/#updated-operator-precedence-table + # N.B. We've still removed any redundant nested brackets though :) + opening_bracket = cast(Leaf, node.children[1].children[0]) + closing_bracket = cast(Leaf, node.children[1].children[-1]) + bracket_contents = node.children[1].children[1] + if isinstance(bracket_contents, Node): + if bracket_contents.type != syms.power: + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + elif ( + bracket_contents.type == syms.power + and bracket_contents.children[0].type == token.AWAIT + ): + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + # If we are in a nested await then recurse down. + remove_await_parens(bracket_contents) + + +def _maybe_wrap_cms_in_parens( + node: Node, mode: Mode, features: Collection[Feature] +) -> None: + """When enabled and safe, wrap the multiple context managers in invisible parens. + + It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS. + """ + if ( + Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features + or Preview.wrap_multiple_context_managers_in_parens not in mode + or len(node.children) <= 2 + # If it's an atom, it's already wrapped in parens. + or node.children[1].type == syms.atom + ): + return + colon_index: Optional[int] = None + for i in range(2, len(node.children)): + if node.children[i].type == token.COLON: + colon_index = i + break + if colon_index is not None: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + context_managers = node.children[1:colon_index] + for child in context_managers: + child.remove() + # After wrapping, the with_stmt will look like this: + # with_stmt + # NAME 'with' + # atom + # LPAR '' + # testlist_gexp + # ... <-- context_managers + # /testlist_gexp + # RPAR '' + # /atom + # COLON ':' + new_child = Node( + syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar] + ) + node.insert_child(1, new_child) + + +def remove_with_parens(node: Node, parent: Node) -> None: + """Recursively hide optional parens in `with` statements.""" + # Removing all unnecessary parentheses in with statements in one pass is a tad + # complex as different variations of bracketed statements result in pretty + # different parse trees: + # + # with (open("file")) as f: # this is an asexpr_test + # ... + # + # with (open("file") as f): # this is an atom containing an + # ... # asexpr_test + # + # with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA, + # ... # asexpr_test + # + # with (open("file") as f, open("file") as f): # an atom containing a + # ... # testlist_gexp which then + # # contains multiple asexpr_test(s) + if node.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + node, + parent=parent, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(parent, node, visible=False) + if isinstance(node.children[1], Node): + remove_with_parens(node.children[1], node) + elif node.type == syms.testlist_gexp: + for child in node.children: + if isinstance(child, Node): + remove_with_parens(child, node) + elif node.type == syms.asexpr_test and not any( + leaf.type == token.COLONEQUAL for leaf in node.leaves() + ): + if maybe_make_parens_invisible_in_atom( + node.children[0], + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[0], visible=False) + + +def maybe_make_parens_invisible_in_atom( + node: LN, + parent: LN, + remove_brackets_around_comma: bool = False, +) -> bool: + """If it's safe, make the parens in the atom `node` invisible, recursively. + Additionally, remove repeated, adjacent invisible parens from the atom `node` + as they are redundant. + + Returns whether the node should itself be wrapped in invisible parentheses. + """ + if ( + node.type != syms.atom + or is_empty_tuple(node) + or is_one_tuple(node) + or (is_yield(node) and parent.type != syms.expr_stmt) + or ( + # This condition tries to prevent removing non-optional brackets + # around a tuple, however, can be a bit overzealous so we provide + # and option to skip this check for `for` and `with` statements. + not remove_brackets_around_comma + and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY + ) + or is_tuple_containing_walrus(node) + ): + return False + + if is_walrus_assignment(node): + if parent.type in [ + syms.annassign, + syms.expr_stmt, + syms.assert_stmt, + syms.return_stmt, + syms.except_clause, + syms.funcdef, + syms.with_stmt, + # these ones aren't useful to end users, but they do please fuzzers + syms.for_stmt, + syms.del_stmt, + syms.for_stmt, + ]: + return False + + first = node.children[0] + last = node.children[-1] + if is_lpar_token(first) and is_rpar_token(last): + middle = node.children[1] + # make parentheses invisible + first.value = "" + last.value = "" + maybe_make_parens_invisible_in_atom( + middle, + parent=parent, + remove_brackets_around_comma=remove_brackets_around_comma, + ) + + if is_atom_with_invisible_parens(middle): + # Strip the invisible parens from `middle` by replacing + # it with the child in-between the invisible parens + middle.replace(middle.children[1]) + + return False + + return True + + +def should_split_line(line: Line, opening_bracket: Leaf) -> bool: + """Should `line` be immediately split with `delimiter_split()` after RHS?""" + + if not (opening_bracket.parent and opening_bracket.value in "[{("): + return False + + # We're essentially checking if the body is delimited by commas and there's more + # than one of them (we're excluding the trailing comma and if the delimiter priority + # is still commas, that means there's more). + exclude = set() + trailing_comma = False + try: + last_leaf = line.leaves[-1] + if last_leaf.type == token.COMMA: + trailing_comma = True + exclude.add(id(last_leaf)) + max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) + except (IndexError, ValueError): + return False + + return max_priority == COMMA_PRIORITY and ( + (line.mode.magic_trailing_comma and trailing_comma) + # always explode imports + or opening_bracket.parent.type in {syms.atom, syms.import_from} + ) + + +def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: + """Generate sets of closing bracket IDs that should be omitted in a RHS. + + Brackets can be omitted if the entire trailer up to and including + a preceding closing bracket fits in one line. + + Yielded sets are cumulative (contain results of previous yields, too). First + set is empty, unless the line should explode, in which case bracket pairs until + the one that needs to explode are omitted. + """ + + omit: Set[LeafID] = set() + if not line.magic_trailing_comma: + yield omit + + length = 4 * line.depth + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + inner_brackets: Set[LeafID] = set() + for index, leaf, leaf_length in line.enumerate_with_length(reversed=True): + length += leaf_length + if length > line_length: + break + + has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) + if leaf.type == STANDALONE_COMMENT or has_inline_comment: + break + + if opening_bracket: + if leaf is opening_bracket: + opening_bracket = None + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between( + leaf.opening_bracket, leaf, line.leaves + ) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + inner_brackets.add(id(leaf)) + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if prev and prev.type in OPENING_BRACKETS: + # Empty brackets would fail a split so treat them as "inner" + # brackets (e.g. only add them to the `omit` set if another + # pair of brackets was good enough. + inner_brackets.add(id(leaf)) + continue + + if closing_bracket: + omit.add(id(closing_bracket)) + omit.update(inner_brackets) + inner_brackets.clear() + yield omit + + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + if leaf.value: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + + +def run_transformer( + line: Line, + transform: Transformer, + mode: Mode, + features: Collection[Feature], + *, + line_str: str = "", +) -> List[Line]: + if not line_str: + line_str = line_to_string(line) + result: List[Line] = [] + for transformed_line in transform(line, features): + if str(transformed_line).strip("\n") == line_str: + raise CannotTransform("Line transformer returned an unchanged result") + + result.extend(transform_line(transformed_line, mode=mode, features=features)) + + features_set = set(features) + if ( + Feature.FORCE_OPTIONAL_PARENTHESES in features_set + or transform.__class__.__name__ != "rhs" + or not line.bracket_tracker.invisible + or any(bracket.value for bracket in line.bracket_tracker.invisible) + or line.contains_multiline_strings() + or result[0].contains_uncollapsable_type_comments() + or result[0].contains_unsplittable_type_ignore() + or is_line_short_enough(result[0], line_length=mode.line_length) + # If any leaves have no parents (which _can_ occur since + # `transform(line)` potentially destroys the line's underlying node + # structure), then we can't proceed. Doing so would cause the below + # call to `append_leaves()` to fail. + or any(leaf.parent is None for leaf in line.leaves) + ): + return result + + line_copy = line.clone() + append_leaves(line_copy, line, line.leaves) + features_fop = features_set | {Feature.FORCE_OPTIONAL_PARENTHESES} + second_opinion = run_transformer( + line_copy, transform, mode, features_fop, line_str=line_str + ) + if all( + is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion + ): + result = second_opinion + return result diff --git a/.venv/lib/python3.11/site-packages/black/lines.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/lines.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..4ed8ff5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/lines.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/lines.py b/.venv/lib/python3.11/site-packages/black/lines.py new file mode 100644 index 0000000..ec6ef5d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/lines.py @@ -0,0 +1,868 @@ +import itertools +import sys +from dataclasses import dataclass, field +from typing import ( + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + cast, +) + +from black.brackets import DOT_PRIORITY, BracketTracker +from black.mode import Mode +from black.nodes import ( + BRACKETS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + TEST_DESCENDANTS, + child_towards, + is_import, + is_multiline_string, + is_one_sequence_between, + is_type_comment, + replace_child, + syms, + whitespace, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +T = TypeVar("T") +Index = int +LeafID = int + + +@dataclass +class Line: + """Holds leaves and comments. Can be printed with `str(line)`.""" + + mode: Mode + depth: int = 0 + leaves: List[Leaf] = field(default_factory=list) + # keys ordered like `leaves` + comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) + bracket_tracker: BracketTracker = field(default_factory=BracketTracker) + inside_brackets: bool = False + should_split_rhs: bool = False + magic_trailing_comma: Optional[Leaf] = None + + def append( + self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False + ) -> None: + """Add a new `leaf` to the end of the line. + + Unless `preformatted` is True, the `leaf` will receive a new consistent + whitespace prefix and metadata applied by :class:`BracketTracker`. + Trailing commas are maybe removed, unpacked for loop variables are + demoted from being delimiters. + + Inline comments are put aside. + """ + has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) + if not has_value: + return + + if token.COLON == leaf.type and self.is_class_paren_empty: + del self.leaves[-2:] + if self.leaves and not preformatted: + # Note: at this point leaf.prefix should be empty except for + # imports, for which we only preserve newlines. + leaf.prefix += whitespace( + leaf, complex_subscript=self.is_complex_subscript(leaf) + ) + if self.inside_brackets or not preformatted or track_bracket: + self.bracket_tracker.mark(leaf) + if self.mode.magic_trailing_comma: + if self.has_magic_trailing_comma(leaf): + self.magic_trailing_comma = leaf + elif self.has_magic_trailing_comma(leaf, ensure_removable=True): + self.remove_trailing_comma() + if not self.append_comment(leaf): + self.leaves.append(leaf) + + def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: + """Like :func:`append()` but disallow invalid standalone comment structure. + + Raises ValueError when any `leaf` is appended after a standalone comment + or when a standalone comment is not the first leaf on the line. + """ + if self.bracket_tracker.depth == 0: + if self.is_comment: + raise ValueError("cannot append to standalone comments") + + if self.leaves and leaf.type == STANDALONE_COMMENT: + raise ValueError( + "cannot append standalone comments to a populated line" + ) + + self.append(leaf, preformatted=preformatted) + + @property + def is_comment(self) -> bool: + """Is this line a standalone comment?""" + return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT + + @property + def is_decorator(self) -> bool: + """Is this line a decorator?""" + return bool(self) and self.leaves[0].type == token.AT + + @property + def is_import(self) -> bool: + """Is this an import line?""" + return bool(self) and is_import(self.leaves[0]) + + @property + def is_class(self) -> bool: + """Is this line a class definition?""" + return ( + bool(self) + and self.leaves[0].type == token.NAME + and self.leaves[0].value == "class" + ) + + @property + def is_stub_class(self) -> bool: + """Is this line a class definition with a body consisting only of "..."?""" + return self.is_class and self.leaves[-3:] == [ + Leaf(token.DOT, ".") for _ in range(3) + ] + + @property + def is_def(self) -> bool: + """Is this a function definition? (Also returns True for async defs.)""" + try: + first_leaf = self.leaves[0] + except IndexError: + return False + + try: + second_leaf: Optional[Leaf] = self.leaves[1] + except IndexError: + second_leaf = None + return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( + first_leaf.type == token.ASYNC + and second_leaf is not None + and second_leaf.type == token.NAME + and second_leaf.value == "def" + ) + + @property + def is_class_paren_empty(self) -> bool: + """Is this a class with no base classes but using parentheses? + + Those are unnecessary and should be removed. + """ + return ( + bool(self) + and len(self.leaves) == 4 + and self.is_class + and self.leaves[2].type == token.LPAR + and self.leaves[2].value == "(" + and self.leaves[3].type == token.RPAR + and self.leaves[3].value == ")" + ) + + @property + def is_triple_quoted_string(self) -> bool: + """Is the line a triple quoted string?""" + return ( + bool(self) + and self.leaves[0].type == token.STRING + and self.leaves[0].value.startswith(('"""', "'''")) + ) + + @property + def opens_block(self) -> bool: + """Does this line open a new level of indentation.""" + if len(self.leaves) == 0: + return False + return self.leaves[-1].type == token.COLON + + def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: + """If so, needs to be split before emitting.""" + for leaf in self.leaves: + if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: + return True + + return False + + def contains_uncollapsable_type_comments(self) -> bool: + ignored_ids = set() + try: + last_leaf = self.leaves[-1] + ignored_ids.add(id(last_leaf)) + if last_leaf.type == token.COMMA or ( + last_leaf.type == token.RPAR and not last_leaf.value + ): + # When trailing commas or optional parens are inserted by Black for + # consistency, comments after the previous last element are not moved + # (they don't have to, rendering will still be correct). So we ignore + # trailing commas and invisible. + last_leaf = self.leaves[-2] + ignored_ids.add(id(last_leaf)) + except IndexError: + return False + + # A type comment is uncollapsable if it is attached to a leaf + # that isn't at the end of the line (since that could cause it + # to get associated to a different argument) or if there are + # comments before it (since that could cause it to get hidden + # behind a comment. + comment_seen = False + for leaf_id, comments in self.comments.items(): + for comment in comments: + if is_type_comment(comment): + if comment_seen or ( + not is_type_comment(comment, " ignore") + and leaf_id not in ignored_ids + ): + return True + + comment_seen = True + + return False + + def contains_unsplittable_type_ignore(self) -> bool: + if not self.leaves: + return False + + # If a 'type: ignore' is attached to the end of a line, we + # can't split the line, because we can't know which of the + # subexpressions the ignore was meant to apply to. + # + # We only want this to apply to actual physical lines from the + # original source, though: we don't want the presence of a + # 'type: ignore' at the end of a multiline expression to + # justify pushing it all onto one line. Thus we + # (unfortunately) need to check the actual source lines and + # only report an unsplittable 'type: ignore' if this line was + # one line in the original code. + + # Grab the first and last line numbers, skipping generated leaves + first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) + last_line = next( + (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 + ) + + if first_line == last_line: + # We look at the last two leaves since a comma or an + # invisible paren could have been added at the end of the + # line. + for node in self.leaves[-2:]: + for comment in self.comments.get(id(node), []): + if is_type_comment(comment, " ignore"): + return True + + return False + + def contains_multiline_strings(self) -> bool: + return any(is_multiline_string(leaf) for leaf in self.leaves) + + def has_magic_trailing_comma( + self, closing: Leaf, ensure_removable: bool = False + ) -> bool: + """Return True if we have a magic trailing comma, that is when: + - there's a trailing comma here + - it's not a one-tuple + - it's not a single-element subscript + Additionally, if ensure_removable: + - it's not from square bracket indexing + (specifically, single-element square bracket indexing) + """ + if not ( + closing.type in CLOSING_BRACKETS + and self.leaves + and self.leaves[-1].type == token.COMMA + ): + return False + + if closing.type == token.RBRACE: + return True + + if closing.type == token.RSQB: + if ( + closing.parent + and closing.parent.type == syms.trailer + and closing.opening_bracket + and is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ): + return False + + if not ensure_removable: + return True + + comma = self.leaves[-1] + if comma.parent is None: + return False + return ( + comma.parent.type != syms.subscriptlist + or closing.opening_bracket is None + or not is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ) + + if self.is_import: + return True + + if closing.opening_bracket is not None and not is_one_sequence_between( + closing.opening_bracket, closing, self.leaves + ): + return True + + return False + + def append_comment(self, comment: Leaf) -> bool: + """Add an inline or standalone comment to the line.""" + if ( + comment.type == STANDALONE_COMMENT + and self.bracket_tracker.any_open_brackets() + ): + comment.prefix = "" + return False + + if comment.type != token.COMMENT: + return False + + if not self.leaves: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-1] + if ( + last_leaf.type == token.RPAR + and not last_leaf.value + and last_leaf.parent + and len(list(last_leaf.parent.leaves())) <= 3 + and not is_type_comment(comment) + ): + # Comments on an optional parens wrapping a single leaf should belong to + # the wrapped node except if it's a type comment. Pinning the comment like + # this avoids unstable formatting caused by comment migration. + if len(self.leaves) < 2: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-2] + self.comments.setdefault(id(last_leaf), []).append(comment) + return True + + def comments_after(self, leaf: Leaf) -> List[Leaf]: + """Generate comments that should appear directly after `leaf`.""" + return self.comments.get(id(leaf), []) + + def remove_trailing_comma(self) -> None: + """Remove the trailing comma and moves the comments attached to it.""" + trailing_comma = self.leaves.pop() + trailing_comma_comments = self.comments.pop(id(trailing_comma), []) + self.comments.setdefault(id(self.leaves[-1]), []).extend( + trailing_comma_comments + ) + + def is_complex_subscript(self, leaf: Leaf) -> bool: + """Return True iff `leaf` is part of a slice with non-trivial exprs.""" + open_lsqb = self.bracket_tracker.get_open_lsqb() + if open_lsqb is None: + return False + + subscript_start = open_lsqb.next_sibling + + if isinstance(subscript_start, Node): + if subscript_start.type == syms.listmaker: + return False + + if subscript_start.type == syms.subscriptlist: + subscript_start = child_towards(subscript_start, leaf) + return subscript_start is not None and any( + n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() + ) + + def enumerate_with_length( + self, reversed: bool = False + ) -> Iterator[Tuple[Index, Leaf, int]]: + """Return an enumeration of leaves with their length. + + Stops prematurely on multiline strings and standalone comments. + """ + op = cast( + Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], + enumerate_reversed if reversed else enumerate, + ) + for index, leaf in op(self.leaves): + length = len(leaf.prefix) + len(leaf.value) + if "\n" in leaf.value: + return # Multiline strings, we can't continue. + + for comment in self.comments_after(leaf): + length += len(comment.value) + + yield index, leaf, length + + def clone(self) -> "Line": + return Line( + mode=self.mode, + depth=self.depth, + inside_brackets=self.inside_brackets, + should_split_rhs=self.should_split_rhs, + magic_trailing_comma=self.magic_trailing_comma, + ) + + def __str__(self) -> str: + """Render the line.""" + if not self: + return "\n" + + indent = " " * self.depth + leaves = iter(self.leaves) + first = next(leaves) + res = f"{first.prefix}{indent}{first.value}" + for leaf in leaves: + res += str(leaf) + for comment in itertools.chain.from_iterable(self.comments.values()): + res += str(comment) + + return res + "\n" + + def __bool__(self) -> bool: + """Return True if the line has leaves or comments.""" + return bool(self.leaves or self.comments) + + +@dataclass +class LinesBlock: + """Class that holds information about a block of formatted lines. + + This is introduced so that the EmptyLineTracker can look behind the standalone + comments and adjust their empty lines for class or def lines. + """ + + mode: Mode + previous_block: Optional["LinesBlock"] + original_line: Line + before: int = 0 + content_lines: List[str] = field(default_factory=list) + after: int = 0 + + def all_lines(self) -> List[str]: + empty_line = str(Line(mode=self.mode)) + return ( + [empty_line * self.before] + self.content_lines + [empty_line * self.after] + ) + + +@dataclass +class EmptyLineTracker: + """Provides a stateful method that returns the number of potential extra + empty lines needed before and after the currently processed line. + + Note: this tracker works on lines that haven't been split yet. It assumes + the prefix of the first leaf consists of optional newlines. Those newlines + are consumed by `maybe_empty_lines()` and included in the computation. + """ + + mode: Mode + previous_line: Optional[Line] = None + previous_block: Optional[LinesBlock] = None + previous_defs: List[int] = field(default_factory=list) + semantic_leading_comment: Optional[LinesBlock] = None + + def maybe_empty_lines(self, current_line: Line) -> LinesBlock: + """Return the number of extra empty lines before and after the `current_line`. + + This is for separating `def`, `async def` and `class` with extra empty + lines (two on module-level). + """ + before, after = self._maybe_empty_lines(current_line) + previous_after = self.previous_block.after if self.previous_block else 0 + before = ( + # Black should not insert empty lines at the beginning + # of the file + 0 + if self.previous_line is None + else before - previous_after + ) + block = LinesBlock( + mode=self.mode, + previous_block=self.previous_block, + original_line=current_line, + before=before, + after=after, + ) + + # Maintain the semantic_leading_comment state. + if current_line.is_comment: + if self.previous_line is None or ( + not self.previous_line.is_decorator + # `or before` means this comment already has an empty line before + and (not self.previous_line.is_comment or before) + and (self.semantic_leading_comment is None or before) + ): + self.semantic_leading_comment = block + # `or before` means this decorator already has an empty line before + elif not current_line.is_decorator or before: + self.semantic_leading_comment = None + + self.previous_line = current_line + self.previous_block = block + return block + + def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + max_allowed = 1 + if current_line.depth == 0: + max_allowed = 1 if self.mode.is_pyi else 2 + if current_line.leaves: + # Consume the first leaf's extra newlines. + first_leaf = current_line.leaves[0] + before = first_leaf.prefix.count("\n") + before = min(before, max_allowed) + first_leaf.prefix = "" + else: + before = 0 + depth = current_line.depth + while self.previous_defs and self.previous_defs[-1] >= depth: + if self.mode.is_pyi: + assert self.previous_line is not None + if depth and not current_line.is_def and self.previous_line.is_def: + # Empty lines between attributes and methods should be preserved. + before = min(1, before) + elif depth: + before = 0 + else: + before = 1 + else: + if depth: + before = 1 + elif ( + not depth + and self.previous_defs[-1] + and current_line.leaves[-1].type == token.COLON + and ( + current_line.leaves[0].value + not in ("with", "try", "for", "while", "if", "match") + ) + ): + # We shouldn't add two newlines between an indented function and + # a dependent non-indented clause. This is to avoid issues with + # conditional function definitions that are technically top-level + # and therefore get two trailing newlines, but look weird and + # inconsistent when they're followed by elif, else, etc. This is + # worse because these functions only get *one* preceding newline + # already. + before = 1 + else: + before = 2 + self.previous_defs.pop() + if current_line.is_decorator or current_line.is_def or current_line.is_class: + return self._maybe_empty_lines_for_class_or_def(current_line, before) + + if ( + self.previous_line + and self.previous_line.is_import + and not current_line.is_import + and depth == self.previous_line.depth + ): + return (before or 1), 0 + + if ( + self.previous_line + and self.previous_line.is_class + and current_line.is_triple_quoted_string + ): + return before, 1 + + if self.previous_line and self.previous_line.opens_block: + return 0, 0 + return before, 0 + + def _maybe_empty_lines_for_class_or_def( + self, current_line: Line, before: int + ) -> Tuple[int, int]: + if not current_line.is_decorator: + self.previous_defs.append(current_line.depth) + if self.previous_line is None: + # Don't insert empty lines before the first line in the file. + return 0, 0 + + if self.previous_line.is_decorator: + if self.mode.is_pyi and current_line.is_stub_class: + # Insert an empty line after a decorated stub class + return 0, 1 + + return 0, 0 + + if self.previous_line.depth < current_line.depth and ( + self.previous_line.is_class or self.previous_line.is_def + ): + return 0, 0 + + comment_to_add_newlines: Optional[LinesBlock] = None + if ( + self.previous_line.is_comment + and self.previous_line.depth == current_line.depth + and before == 0 + ): + slc = self.semantic_leading_comment + if ( + slc is not None + and slc.previous_block is not None + and not slc.previous_block.original_line.is_class + and not slc.previous_block.original_line.opens_block + and slc.before <= 1 + ): + comment_to_add_newlines = slc + else: + return 0, 0 + + if self.mode.is_pyi: + if current_line.is_class or self.previous_line.is_class: + if self.previous_line.depth < current_line.depth: + newlines = 0 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + elif current_line.is_stub_class and self.previous_line.is_stub_class: + # No blank line between classes with an empty body + newlines = 0 + else: + newlines = 1 + elif ( + current_line.is_def or current_line.is_decorator + ) and not self.previous_line.is_def: + if current_line.depth: + # In classes empty lines between attributes and methods should + # be preserved. + newlines = min(1, before) + else: + # Blank line between a block of functions (maybe with preceding + # decorators) and a block of non-functions + newlines = 1 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + else: + newlines = 0 + else: + newlines = 1 if current_line.depth else 2 + if comment_to_add_newlines is not None: + previous_block = comment_to_add_newlines.previous_block + if previous_block is not None: + comment_to_add_newlines.before = ( + max(comment_to_add_newlines.before, newlines) - previous_block.after + ) + newlines = 0 + return newlines, 0 + + +def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: + """Like `reversed(enumerate(sequence))` if that were possible.""" + index = len(sequence) - 1 + for element in reversed(sequence): + yield (index, element) + index -= 1 + + +def append_leaves( + new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False +) -> None: + """ + Append leaves (taken from @old_line) to @new_line, making sure to fix the + underlying Node structure where appropriate. + + All of the leaves in @leaves are duplicated. The duplicates are then + appended to @new_line and used to replace their originals in the underlying + Node structure. Any comments attached to the old leaves are reattached to + the new leaves. + + Pre-conditions: + set(@leaves) is a subset of set(@old_line.leaves). + """ + for old_leaf in leaves: + new_leaf = Leaf(old_leaf.type, old_leaf.value) + replace_child(old_leaf, new_leaf) + new_line.append(new_leaf, preformatted=preformatted) + + for comment_leaf in old_line.comments_after(old_leaf): + new_line.append(comment_leaf, preformatted=True) + + +def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: + """Return True if `line` is no longer than `line_length`. + + Uses the provided `line_str` rendering, if any, otherwise computes a new one. + """ + if not line_str: + line_str = line_to_string(line) + return ( + len(line_str) <= line_length + and "\n" not in line_str # multiline strings + and not line.contains_standalone_comments() + ) + + +def can_be_split(line: Line) -> bool: + """Return False if the line cannot be split *for sure*. + + This is not an exhaustive search but a cheap heuristic that we can use to + avoid some unfortunate formattings (mostly around wrapping unsplittable code + in unnecessary parentheses). + """ + leaves = line.leaves + if len(leaves) < 2: + return False + + if leaves[0].type == token.STRING and leaves[1].type == token.DOT: + call_count = 0 + dot_count = 0 + next = leaves[-1] + for leaf in leaves[-2::-1]: + if leaf.type in OPENING_BRACKETS: + if next.type not in CLOSING_BRACKETS: + return False + + call_count += 1 + elif leaf.type == token.DOT: + dot_count += 1 + elif leaf.type == token.NAME: + if not (next.type == token.DOT or next.type in OPENING_BRACKETS): + return False + + elif leaf.type not in CLOSING_BRACKETS: + return False + + if dot_count > 1 and call_count > 1: + return False + + return True + + +def can_omit_invisible_parens( + line: Line, + line_length: int, +) -> bool: + """Does `line` have a shape safe to reformat without optional parens around it? + + Returns True for only a subset of potentially nice looking formattings but + the point is to not return false positives that end up producing lines that + are too long. + """ + bt = line.bracket_tracker + if not bt.delimiters: + # Without delimiters the optional parentheses are useless. + return True + + max_priority = bt.max_delimiter_priority() + if bt.delimiter_count_with_priority(max_priority) > 1: + # With more than one delimiter of a kind the optional parentheses read better. + return False + + if max_priority == DOT_PRIORITY: + # A single stranded method call doesn't require optional parentheses. + return True + + assert len(line.leaves) >= 2, "Stranded delimiter" + + # With a single delimiter, omit if the expression starts or ends with + # a bracket. + first = line.leaves[0] + second = line.leaves[1] + if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: + if _can_omit_opening_paren(line, first=first, line_length=line_length): + return True + + # Note: we are not returning False here because a line might have *both* + # a leading opening bracket and a trailing closing bracket. If the + # opening bracket doesn't match our rule, maybe the closing will. + + penultimate = line.leaves[-2] + last = line.leaves[-1] + + if ( + last.type == token.RPAR + or last.type == token.RBRACE + or ( + # don't use indexing for omitting optional parentheses; + # it looks weird + last.type == token.RSQB + and last.parent + and last.parent.type != syms.trailer + ) + ): + if penultimate.type in OPENING_BRACKETS: + # Empty brackets don't help. + return False + + if is_multiline_string(first): + # Additional wrapping of a multiline string in this situation is + # unnecessary. + return True + + if _can_omit_closing_paren(line, last=last, line_length=line_length): + return True + + return False + + +def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + remainder = False + length = 4 * line.depth + _index = -1 + for _index, leaf, leaf_length in line.enumerate_with_length(): + if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: + remainder = True + if remainder: + length += leaf_length + if length > line_length: + break + + if leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + remainder = False + + else: + # checked the entire string and line length wasn't exceeded + if len(line.leaves) == _index + 1: + return True + + return False + + +def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + length = 4 * line.depth + seen_other_brackets = False + for _index, leaf, leaf_length in line.enumerate_with_length(): + length += leaf_length + if leaf is last.opening_bracket: + if seen_other_brackets or length <= line_length: + return True + + elif leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + seen_other_brackets = True + + return False + + +def line_to_string(line: Line) -> str: + """Returns the string representation of @line. + + WARNING: This is known to be computationally expensive. + """ + return str(line).strip("\n") diff --git a/.venv/lib/python3.11/site-packages/black/mode.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/mode.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..5d0922b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/mode.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/mode.py b/.venv/lib/python3.11/site-packages/black/mode.py new file mode 100644 index 0000000..1af1607 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/mode.py @@ -0,0 +1,225 @@ +"""Data structures configuring Black behavior. + +Mostly around Python language feature support per version and Black configuration +chosen by the user. +""" + +import sys +from dataclasses import dataclass, field +from enum import Enum, auto +from hashlib import sha256 +from operator import attrgetter +from typing import Dict, Set +from warnings import warn + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.const import DEFAULT_LINE_LENGTH + + +class TargetVersion(Enum): + PY33 = 3 + PY34 = 4 + PY35 = 5 + PY36 = 6 + PY37 = 7 + PY38 = 8 + PY39 = 9 + PY310 = 10 + PY311 = 11 + + +class Feature(Enum): + F_STRINGS = 2 + NUMERIC_UNDERSCORES = 3 + TRAILING_COMMA_IN_CALL = 4 + TRAILING_COMMA_IN_DEF = 5 + # The following two feature-flags are mutually exclusive, and exactly one should be + # set for every version of python. + ASYNC_IDENTIFIERS = 6 + ASYNC_KEYWORDS = 7 + ASSIGNMENT_EXPRESSIONS = 8 + POS_ONLY_ARGUMENTS = 9 + RELAXED_DECORATORS = 10 + PATTERN_MATCHING = 11 + UNPACKING_ON_FLOW = 12 + ANN_ASSIGN_EXTENDED_RHS = 13 + EXCEPT_STAR = 14 + VARIADIC_GENERICS = 15 + DEBUG_F_STRINGS = 16 + PARENTHESIZED_CONTEXT_MANAGERS = 17 + FORCE_OPTIONAL_PARENTHESES = 50 + + # __future__ flags + FUTURE_ANNOTATIONS = 51 + + +FUTURE_FLAG_TO_FEATURE: Final = { + "annotations": Feature.FUTURE_ANNOTATIONS, +} + + +VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { + TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY36: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_IDENTIFIERS, + }, + TargetVersion.PY37: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + }, + TargetVersion.PY38: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + }, + TargetVersion.PY39: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + }, + TargetVersion.PY310: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + }, + TargetVersion.PY311: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + }, +} + + +def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: + return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) + + +class Preview(Enum): + """Individual preview style features.""" + + hex_codes_in_unicode_sequences = auto() + prefer_splitting_right_hand_side_of_assignments = auto() + # NOTE: string_processing requires wrap_long_dict_values_in_parens + # for https://github.com/psf/black/issues/3117 to be fixed. + string_processing = auto() + parenthesize_conditional_expressions = auto() + skip_magic_trailing_comma_in_subscript = auto() + wrap_long_dict_values_in_parens = auto() + wrap_multiple_context_managers_in_parens = auto() + + +class Deprecated(UserWarning): + """Visible deprecation warning.""" + + +@dataclass +class Mode: + target_versions: Set[TargetVersion] = field(default_factory=set) + line_length: int = DEFAULT_LINE_LENGTH + string_normalization: bool = True + is_pyi: bool = False + is_ipynb: bool = False + skip_source_first_line: bool = False + magic_trailing_comma: bool = True + experimental_string_processing: bool = False + python_cell_magics: Set[str] = field(default_factory=set) + preview: bool = False + + def __post_init__(self) -> None: + if self.experimental_string_processing: + warn( + ( + "`experimental string processing` has been included in `preview`" + " and deprecated. Use `preview` instead." + ), + Deprecated, + ) + + def __contains__(self, feature: Preview) -> bool: + """ + Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag. + + The argument is not checked and features are not differentiated. + They only exist to make development easier by clarifying intent. + """ + if feature is Preview.string_processing: + return self.preview or self.experimental_string_processing + return self.preview + + def get_cache_key(self) -> str: + if self.target_versions: + version_str = ",".join( + str(version.value) + for version in sorted(self.target_versions, key=attrgetter("value")) + ) + else: + version_str = "-" + parts = [ + version_str, + str(self.line_length), + str(int(self.string_normalization)), + str(int(self.is_pyi)), + str(int(self.is_ipynb)), + str(int(self.skip_source_first_line)), + str(int(self.magic_trailing_comma)), + str(int(self.experimental_string_processing)), + str(int(self.preview)), + sha256((",".join(sorted(self.python_cell_magics))).encode()).hexdigest(), + ] + return ".".join(parts) diff --git a/.venv/lib/python3.11/site-packages/black/nodes.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/nodes.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..a776141 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/nodes.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/nodes.py b/.venv/lib/python3.11/site-packages/black/nodes.py new file mode 100644 index 0000000..a588077 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/nodes.py @@ -0,0 +1,873 @@ +""" +blib2to3 Node/Leaf transformation-related utility functions. +""" + +import sys +from typing import Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from mypy_extensions import mypyc_attr + +from black.cache import CACHE_DIR +from black.strings import has_triple_quotes +from blib2to3 import pygram +from blib2to3.pgen2 import token +from blib2to3.pytree import NL, Leaf, Node, type_repr + +pygram.initialize(CACHE_DIR) +syms: Final = pygram.python_symbols + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +LeafID = int +NodeType = int + + +WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} +STATEMENT: Final = { + syms.if_stmt, + syms.while_stmt, + syms.for_stmt, + syms.try_stmt, + syms.except_clause, + syms.with_stmt, + syms.funcdef, + syms.classdef, + syms.match_stmt, + syms.case_block, +} +STANDALONE_COMMENT: Final = 153 +token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" +LOGIC_OPERATORS: Final = {"and", "or"} +COMPARATORS: Final = { + token.LESS, + token.GREATER, + token.EQEQUAL, + token.NOTEQUAL, + token.LESSEQUAL, + token.GREATEREQUAL, +} +MATH_OPERATORS: Final = { + token.VBAR, + token.CIRCUMFLEX, + token.AMPER, + token.LEFTSHIFT, + token.RIGHTSHIFT, + token.PLUS, + token.MINUS, + token.STAR, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.AT, + token.TILDE, + token.DOUBLESTAR, +} +STARS: Final = {token.STAR, token.DOUBLESTAR} +VARARGS_SPECIALS: Final = STARS | {token.SLASH} +VARARGS_PARENTS: Final = { + syms.arglist, + syms.argument, # double star in arglist + syms.trailer, # single argument to call + syms.typedargslist, + syms.varargslist, # lambdas +} +UNPACKING_PARENTS: Final = { + syms.atom, # single element of a list or set literal + syms.dictsetmaker, + syms.listmaker, + syms.testlist_gexp, + syms.testlist_star_expr, + syms.subject_expr, + syms.pattern, +} +TEST_DESCENDANTS: Final = { + syms.test, + syms.lambdef, + syms.or_test, + syms.and_test, + syms.not_test, + syms.comparison, + syms.star_expr, + syms.expr, + syms.xor_expr, + syms.and_expr, + syms.shift_expr, + syms.arith_expr, + syms.trailer, + syms.term, + syms.power, +} +TYPED_NAMES: Final = {syms.tname, syms.tname_star} +ASSIGNMENTS: Final = { + "=", + "+=", + "-=", + "*=", + "@=", + "/=", + "%=", + "&=", + "|=", + "^=", + "<<=", + ">>=", + "**=", + "//=", +} + +IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist} +BRACKET: Final = { + token.LPAR: token.RPAR, + token.LSQB: token.RSQB, + token.LBRACE: token.RBRACE, +} +OPENING_BRACKETS: Final = set(BRACKET.keys()) +CLOSING_BRACKETS: Final = set(BRACKET.values()) +BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS +ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} + +RARROW = 55 + + +@mypyc_attr(allow_interpreted_subclasses=True) +class Visitor(Generic[T]): + """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" + + def visit(self, node: LN) -> Iterator[T]: + """Main method to visit `node` and its children. + + It tries to find a `visit_*()` method for the given `node.type`, like + `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. + If no dedicated `visit_*()` method is found, chooses `visit_default()` + instead. + + Then yields objects of type `T` from the selected visitor. + """ + if node.type < 256: + name = token.tok_name[node.type] + else: + name = str(type_repr(node.type)) + # We explicitly branch on whether a visitor exists (instead of + # using self.visit_default as the default arg to getattr) in order + # to save needing to create a bound method object and so mypyc can + # generate a native call to visit_default. + visitf = getattr(self, f"visit_{name}", None) + if visitf: + yield from visitf(node) + else: + yield from self.visit_default(node) + + def visit_default(self, node: LN) -> Iterator[T]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Node): + for child in node.children: + yield from self.visit(child) + + +def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 + """Return whitespace prefix if needed for the given `leaf`. + + `complex_subscript` signals whether the given leaf is part of a subscription + which has non-trivial arguments, like arithmetic expressions or function calls. + """ + NO: Final = "" + SPACE: Final = " " + DOUBLESPACE: Final = " " + t = leaf.type + p = leaf.parent + v = leaf.value + if t in ALWAYS_NO_SPACE: + return NO + + if t == token.COMMENT: + return DOUBLESPACE + + assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" + if t == token.COLON and p.type not in { + syms.subscript, + syms.subscriptlist, + syms.sliceop, + }: + return NO + + prev = leaf.prev_sibling + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + if t == token.COLON: + if prevp.type == token.COLON: + return NO + + elif prevp.type != token.COMMA and not complex_subscript: + return NO + + return SPACE + + if prevp.type == token.EQUAL: + if prevp.parent: + if prevp.parent.type in { + syms.arglist, + syms.argument, + syms.parameters, + syms.varargslist, + }: + return NO + + elif prevp.parent.type == syms.typedargslist: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using + # that, too. + return prevp.prefix + + elif ( + prevp.type == token.STAR + and parent_type(prevp) == syms.star_expr + and parent_type(prevp.parent) == syms.subscriptlist + ): + # No space between typevar tuples. + return NO + + elif prevp.type in VARARGS_SPECIALS: + if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): + return NO + + elif prevp.type == token.COLON: + if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: + return SPACE if complex_subscript else NO + + elif ( + prevp.parent + and prevp.parent.type == syms.factor + and prevp.type in MATH_OPERATORS + ): + return NO + + elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator: + # no space in decorators + return NO + + elif prev.type in OPENING_BRACKETS: + return NO + + if p.type in {syms.parameters, syms.arglist}: + # untyped function signatures or calls + if not prev or prev.type != token.COMMA: + return NO + + elif p.type == syms.varargslist: + # lambdas + if prev and prev.type != token.COMMA: + return NO + + elif p.type == syms.typedargslist: + # typed function signatures + if not prev: + return NO + + if t == token.EQUAL: + if prev.type not in TYPED_NAMES: + return NO + + elif prev.type == token.EQUAL: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using that, too. + return prev.prefix + + elif prev.type != token.COMMA: + return NO + + elif p.type in TYPED_NAMES: + # type names + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type != token.COMMA: + return NO + + elif p.type == syms.trailer: + # attributes and calls + if t == token.LPAR or t == token.RPAR: + return NO + + if not prev: + if t == token.DOT or t == token.LSQB: + return NO + + elif prev.type != token.COMMA: + return NO + + elif p.type == syms.argument: + # single argument + if t == token.EQUAL: + return NO + + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.LPAR: + return NO + + elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: + return NO + + elif p.type == syms.decorator: + # decorators + return NO + + elif p.type == syms.dotted_name: + if prev: + return NO + + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.AT or prevp.type == token.DOT: + return NO + + elif p.type == syms.classdef: + if t == token.LPAR: + return NO + + if prev and prev.type == token.LPAR: + return NO + + elif p.type in {syms.subscript, syms.sliceop}: + # indexing + if not prev: + assert p.parent is not None, "subscripts are always parented" + if p.parent.type == syms.subscriptlist: + return SPACE + + return NO + + elif not complex_subscript: + return NO + + elif p.type == syms.atom: + if prev and t == token.DOT: + # dots, but not the first one. + return NO + + elif p.type == syms.dictsetmaker: + # dict unpacking + if prev and prev.type == token.DOUBLESTAR: + return NO + + elif p.type in {syms.factor, syms.star_expr}: + # unary ops + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + prevp_parent = prevp.parent + assert prevp_parent is not None + if prevp.type == token.COLON and prevp_parent.type in { + syms.subscript, + syms.sliceop, + }: + return NO + + elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: + return NO + + elif t in {token.NAME, token.NUMBER, token.STRING}: + return NO + + elif p.type == syms.import_from: + if t == token.DOT: + if prev and prev.type == token.DOT: + return NO + + elif t == token.NAME: + if v == "import": + return SPACE + + if prev and prev.type == token.DOT: + return NO + + elif p.type == syms.sliceop: + return NO + + elif p.type == syms.except_clause: + if t == token.STAR: + return NO + + return SPACE + + +def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: + """Return the first leaf that precedes `node`, if any.""" + while node: + res = node.prev_sibling + if res: + if isinstance(res, Leaf): + return res + + try: + return list(res.leaves())[-1] + + except IndexError: + return None + + node = node.parent + return None + + +def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: + """Return if the `node` and its previous siblings match types against the provided + list of tokens; the provided `node`has its type matched against the last element in + the list. `None` can be used as the first element to declare that the start of the + list is anchored at the start of its parent's children.""" + if not tokens: + return True + if tokens[-1] is None: + return node is None + if not node: + return False + if node.type != tokens[-1]: + return False + return prev_siblings_are(node.prev_sibling, tokens[:-1]) + + +def parent_type(node: Optional[LN]) -> Optional[NodeType]: + """ + Returns: + @node.parent.type, if @node is not None and has a parent. + OR + None, otherwise. + """ + if node is None or node.parent is None: + return None + + return node.parent.type + + +def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: + """Return the child of `ancestor` that contains `descendant`.""" + node: Optional[LN] = descendant + while node and node.parent != ancestor: + node = node.parent + return node + + +def replace_child(old_child: LN, new_child: LN) -> None: + """ + Side Effects: + * If @old_child.parent is set, replace @old_child with @new_child in + @old_child's underlying Node structure. + OR + * Otherwise, this function does nothing. + """ + parent = old_child.parent + if not parent: + return + + child_idx = old_child.remove() + if child_idx is not None: + parent.insert_child(child_idx, new_child) + + +def container_of(leaf: Leaf) -> LN: + """Return `leaf` or one of its ancestors that is the topmost container of it. + + By "container" we mean a node where `leaf` is the very first child. + """ + same_prefix = leaf.prefix + container: LN = leaf + while container: + parent = container.parent + if parent is None: + break + + if parent.children[0].prefix != same_prefix: + break + + if parent.type == syms.file_input: + break + + if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: + break + + container = parent + return container + + +def first_leaf_of(node: LN) -> Optional[Leaf]: + """Returns the first leaf of the node tree.""" + if isinstance(node, Leaf): + return node + if node.children: + return first_leaf_of(node.children[0]) + else: + return None + + +def is_arith_like(node: LN) -> bool: + """Whether node is an arithmetic or a binary arithmetic expression""" + return node.type in { + syms.arith_expr, + syms.shift_expr, + syms.xor_expr, + syms.and_expr, + } + + +def is_docstring(leaf: Leaf) -> bool: + if prev_siblings_are( + leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] + ): + return True + + # Multiline docstring on the same line as the `def`. + if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): + # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python + # grammar. We're safe to return True without further checks. + return True + + return False + + +def is_empty_tuple(node: LN) -> bool: + """Return True if `node` holds an empty tuple.""" + return ( + node.type == syms.atom + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + + +def is_one_tuple(node: LN) -> bool: + """Return True if `node` holds a tuple with one element, with or without parens.""" + if node.type == syms.atom: + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA + + return ( + node.type in IMPLICIT_TUPLE + and len(node.children) == 2 + and node.children[1].type == token.COMMA + ) + + +def is_tuple_containing_walrus(node: LN) -> bool: + """Return True if `node` holds a tuple that contains a walrus operator.""" + if node.type != syms.atom: + return False + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return any(child.type == syms.namedexpr_test for child in gexp.children) + + +def is_one_sequence_between( + opening: Leaf, + closing: Leaf, + leaves: List[Leaf], + brackets: Tuple[int, int] = (token.LPAR, token.RPAR), +) -> bool: + """Return True if content between `opening` and `closing` is a one-sequence.""" + if (opening.type, closing.type) != brackets: + return False + + depth = closing.bracket_depth + 1 + for _opening_index, leaf in enumerate(leaves): + if leaf is opening: + break + + else: + raise LookupError("Opening paren not found in `leaves`") + + commas = 0 + _opening_index += 1 + for leaf in leaves[_opening_index:]: + if leaf is closing: + break + + bracket_depth = leaf.bracket_depth + if bracket_depth == depth and leaf.type == token.COMMA: + commas += 1 + if leaf.parent and leaf.parent.type in { + syms.arglist, + syms.typedargslist, + }: + commas += 1 + break + + return commas < 2 + + +def is_walrus_assignment(node: LN) -> bool: + """Return True iff `node` is of the shape ( test := test )""" + inner = unwrap_singleton_parenthesis(node) + return inner is not None and inner.type == syms.namedexpr_test + + +def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool: + """Return True iff `node` is a trailer valid in a simple decorator""" + return node.type == syms.trailer and ( + ( + len(node.children) == 2 + and node.children[0].type == token.DOT + and node.children[1].type == token.NAME + ) + # last trailer can be an argument-less parentheses pair + or ( + last + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + # last trailer can be arguments + or ( + last + and len(node.children) == 3 + and node.children[0].type == token.LPAR + # and node.children[1].type == syms.argument + and node.children[2].type == token.RPAR + ) + ) + + +def is_simple_decorator_expression(node: LN) -> bool: + """Return True iff `node` could be a 'dotted name' decorator + + This function takes the node of the 'namedexpr_test' of the new decorator + grammar and test if it would be valid under the old decorator grammar. + + The old grammar was: decorator: @ dotted_name [arguments] NEWLINE + The new grammar is : decorator: @ namedexpr_test NEWLINE + """ + if node.type == token.NAME: + return True + if node.type == syms.power: + if node.children: + return ( + node.children[0].type == token.NAME + and all(map(is_simple_decorator_trailer, node.children[1:-1])) + and ( + len(node.children) < 2 + or is_simple_decorator_trailer(node.children[-1], last=True) + ) + ) + return False + + +def is_yield(node: LN) -> bool: + """Return True if `node` holds a `yield` or `yield from` expression.""" + if node.type == syms.yield_expr: + return True + + if is_name_token(node) and node.value == "yield": + return True + + if node.type != syms.atom: + return False + + if len(node.children) != 3: + return False + + lpar, expr, rpar = node.children + if lpar.type == token.LPAR and rpar.type == token.RPAR: + return is_yield(expr) + + return False + + +def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: + """Return True if `leaf` is a star or double star in a vararg or kwarg. + + If `within` includes VARARGS_PARENTS, this applies to function signatures. + If `within` includes UNPACKING_PARENTS, it applies to right hand-side + extended iterable unpacking (PEP 3132) and additional unpacking + generalizations (PEP 448). + """ + if leaf.type not in VARARGS_SPECIALS or not leaf.parent: + return False + + p = leaf.parent + if p.type == syms.star_expr: + # Star expressions are also used as assignment targets in extended + # iterable unpacking (PEP 3132). See what its parent is instead. + if not p.parent: + return False + + p = p.parent + + return p.type in within + + +def is_multiline_string(leaf: Leaf) -> bool: + """Return True if `leaf` is a multiline string that actually spans many lines.""" + return has_triple_quotes(leaf.value) and "\n" in leaf.value + + +def is_stub_suite(node: Node) -> bool: + """Return True if `node` is a suite with a stub body.""" + if ( + len(node.children) != 4 + or node.children[0].type != token.NEWLINE + or node.children[1].type != token.INDENT + or node.children[3].type != token.DEDENT + ): + return False + + return is_stub_body(node.children[2]) + + +def is_stub_body(node: LN) -> bool: + """Return True if `node` is a simple statement containing an ellipsis.""" + if not isinstance(node, Node) or node.type != syms.simple_stmt: + return False + + if len(node.children) != 2: + return False + + child = node.children[0] + return ( + child.type == syms.atom + and len(child.children) == 3 + and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) + ) + + +def is_atom_with_invisible_parens(node: LN) -> bool: + """Given a `LN`, determines whether it's an atom `node` with invisible + parens. Useful in dedupe-ing and normalizing parens. + """ + if isinstance(node, Leaf) or node.type != syms.atom: + return False + + first, last = node.children[0], node.children[-1] + return ( + isinstance(first, Leaf) + and first.type == token.LPAR + and first.value == "" + and isinstance(last, Leaf) + and last.type == token.RPAR + and last.value == "" + ) + + +def is_empty_par(leaf: Leaf) -> bool: + return is_empty_lpar(leaf) or is_empty_rpar(leaf) + + +def is_empty_lpar(leaf: Leaf) -> bool: + return leaf.type == token.LPAR and leaf.value == "" + + +def is_empty_rpar(leaf: Leaf) -> bool: + return leaf.type == token.RPAR and leaf.value == "" + + +def is_import(leaf: Leaf) -> bool: + """Return True if the given leaf starts an import statement.""" + p = leaf.parent + t = leaf.type + v = leaf.value + return bool( + t == token.NAME + and ( + (v == "import" and p and p.type == syms.import_name) + or (v == "from" and p and p.type == syms.import_from) + ) + ) + + +def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: + """Return True if the given leaf is a special comment. + Only returns true for type comments for now.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) + + +def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: + """Wrap `child` in parentheses. + + This replaces `child` with an atom holding the parentheses and the old + child. That requires moving the prefix. + + If `visible` is False, the leaves will be valueless (and thus invisible). + """ + lpar = Leaf(token.LPAR, "(" if visible else "") + rpar = Leaf(token.RPAR, ")" if visible else "") + prefix = child.prefix + child.prefix = "" + index = child.remove() or 0 + new_child = Node(syms.atom, [lpar, child, rpar]) + new_child.prefix = prefix + parent.insert_child(index, new_child) + + +def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: + """Returns `wrapped` if `node` is of the shape ( wrapped ). + + Parenthesis can be optional. Returns None otherwise""" + if len(node.children) != 3: + return None + + lpar, wrapped, rpar = node.children + if not (lpar.type == token.LPAR and rpar.type == token.RPAR): + return None + + return wrapped + + +def ensure_visible(leaf: Leaf) -> None: + """Make sure parentheses are visible. + + They could be invisible as part of some statements (see + :func:`normalize_invisible_parens` and :func:`visit_import_from`). + """ + if leaf.type == token.LPAR: + leaf.value = "(" + elif leaf.type == token.RPAR: + leaf.value = ")" + + +def is_name_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NAME + + +def is_lpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.LPAR + + +def is_rpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.RPAR + + +def is_string_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.STRING + + +def is_number_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NUMBER + + +def is_part_of_annotation(leaf: Leaf) -> bool: + """Returns whether this leaf is part of type annotations.""" + ancestor = leaf.parent + while ancestor is not None: + if ancestor.prev_sibling and ancestor.prev_sibling.type == token.RARROW: + return True + if ancestor.parent and ancestor.parent.type == syms.tname: + return True + ancestor = ancestor.parent + return False diff --git a/.venv/lib/python3.11/site-packages/black/numerics.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/numerics.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..3e292c0 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/numerics.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/numerics.py b/.venv/lib/python3.11/site-packages/black/numerics.py new file mode 100644 index 0000000..879e5b2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/numerics.py @@ -0,0 +1,60 @@ +""" +Formatting numeric literals. +""" +from blib2to3.pytree import Leaf + + +def format_hex(text: str) -> str: + """ + Formats a hexadecimal string like "0x12B3" + """ + before, after = text[:2], text[2:] + return f"{before}{after.upper()}" + + +def format_scientific_notation(text: str) -> str: + """Formats a numeric string utilizing scentific notation""" + before, after = text.split("e") + sign = "" + if after.startswith("-"): + after = after[1:] + sign = "-" + elif after.startswith("+"): + after = after[1:] + before = format_float_or_int_string(before) + return f"{before}e{sign}{after}" + + +def format_complex_number(text: str) -> str: + """Formats a complex string like `10j`""" + number = text[:-1] + suffix = text[-1] + return f"{format_float_or_int_string(number)}{suffix}" + + +def format_float_or_int_string(text: str) -> str: + """Formats a float string like "1.0".""" + if "." not in text: + return text + + before, after = text.split(".") + return f"{before or 0}.{after or 0}" + + +def normalize_numeric_literal(leaf: Leaf) -> None: + """Normalizes numeric (float, int, and complex) literals. + + All letters used in the representation are normalized to lowercase.""" + text = leaf.value.lower() + if text.startswith(("0o", "0b")): + # Leave octal and binary literals alone. + pass + elif text.startswith("0x"): + text = format_hex(text) + elif "e" in text: + text = format_scientific_notation(text) + elif text.endswith("j"): + text = format_complex_number(text) + else: + text = format_float_or_int_string(text) + leaf.value = text diff --git a/.venv/lib/python3.11/site-packages/black/output.py b/.venv/lib/python3.11/site-packages/black/output.py new file mode 100644 index 0000000..f4c17f2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/output.py @@ -0,0 +1,105 @@ +"""Nice output for Black. + +The double calls are for patching purposes in tests. +""" + +import json +import tempfile +from typing import Any, Optional + +from click import echo, style +from mypy_extensions import mypyc_attr + + +@mypyc_attr(patchable=True) +def _out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "bold" not in styles: + styles["bold"] = True + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def _err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "fg" not in styles: + styles["fg"] = "red" + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _out(message, nl=nl, **styles) + + +def err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _err(message, nl=nl, **styles) + + +def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between each cell in notebooks `a` and `b`.""" + a_nb = json.loads(a) + b_nb = json.loads(b) + diff_lines = [ + diff( + "".join(a_nb["cells"][cell_number]["source"]) + "\n", + "".join(b_nb["cells"][cell_number]["source"]) + "\n", + f"{a_name}:cell_{cell_number}", + f"{b_name}:cell_{cell_number}", + ) + for cell_number, cell in enumerate(a_nb["cells"]) + if cell["cell_type"] == "code" + ] + return "".join(diff_lines) + + +def diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between strings `a` and `b`.""" + import difflib + + a_lines = a.splitlines(keepends=True) + b_lines = b.splitlines(keepends=True) + diff_lines = [] + for line in difflib.unified_diff( + a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5 + ): + # Work around https://bugs.python.org/issue2142 + # See: + # https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html + if line[-1] == "\n": + diff_lines.append(line) + else: + diff_lines.append(line + "\n") + diff_lines.append("\\ No newline at end of file\n") + return "".join(diff_lines) + + +def color_diff(contents: str) -> str: + """Inject the ANSI color codes to the diff.""" + lines = contents.split("\n") + for i, line in enumerate(lines): + if line.startswith("+++") or line.startswith("---"): + line = "\033[1m" + line + "\033[0m" # bold, reset + elif line.startswith("@@"): + line = "\033[36m" + line + "\033[0m" # cyan, reset + elif line.startswith("+"): + line = "\033[32m" + line + "\033[0m" # green, reset + elif line.startswith("-"): + line = "\033[31m" + line + "\033[0m" # red, reset + lines[i] = line + return "\n".join(lines) + + +@mypyc_attr(patchable=True) +def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str: + """Dump `output` to a temporary file. Return path to the file.""" + with tempfile.NamedTemporaryFile( + mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" + ) as f: + for lines in output: + f.write(lines) + if ensure_final_newline and lines and lines[-1] != "\n": + f.write("\n") + return f.name diff --git a/.venv/lib/python3.11/site-packages/black/parsing.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/parsing.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..57cd209 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/parsing.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/parsing.py b/.venv/lib/python3.11/site-packages/black/parsing.py new file mode 100644 index 0000000..ba474c5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/parsing.py @@ -0,0 +1,277 @@ +""" +Parse Python code and perform AST validation. +""" +import ast +import platform +import sys +from typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature +from black.nodes import syms +from blib2to3 import pygram +from blib2to3.pgen2 import driver +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.parse import ParseError +from blib2to3.pgen2.tokenize import TokenError +from blib2to3.pytree import Leaf, Node + +ast3: Any + +_IS_PYPY = platform.python_implementation() == "PyPy" + +try: + from typed_ast import ast3 +except ImportError: + if sys.version_info < (3, 8) and not _IS_PYPY: + print( + ( + "The typed_ast package is required but not installed.\n" + "You can upgrade to Python 3.8+ or install typed_ast with\n" + "`python3 -m pip install typed-ast`." + ), + file=sys.stderr, + ) + sys.exit(1) + else: + ast3 = ast + + +PY2_HINT: Final = "Python 2 support was removed in version 22.0." + + +class InvalidInput(ValueError): + """Raised when input source code fails all parse attempts.""" + + +def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7-3.9 + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar_no_print_statement_no_exec_statement, + # Python 3.10+ + pygram.python_grammar_soft_keywords, + ] + + grammars = [] + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append( + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords + ) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) + if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + errors = {} + for grammar in grammars: + drv = driver.Driver(grammar) + try: + result = drv.parse_string(src_txt, True) + break + + except ParseError as pe: + lineno, column = pe.context[1] + lines = src_txt.splitlines() + try: + faulty_line = lines[lineno - 1] + except IndexError: + faulty_line = "" + errors[grammar.version] = InvalidInput( + f"Cannot parse: {lineno}:{column}: {faulty_line}" + ) + + except TokenError as te: + # In edge cases these are raised; and typically don't have a "faulty_line". + lineno, column = te.args[1] + errors[grammar.version] = InvalidInput( + f"Cannot parse: {lineno}:{column}: {te.args[0]}" + ) + + else: + # Choose the latest version when raising the actual parsing error. + assert len(errors) >= 1 + exc = errors[max(errors)] + + if matches_grammar(src_txt, pygram.python_grammar) or matches_grammar( + src_txt, pygram.python_grammar_no_print_statement + ): + original_msg = exc.args[0] + msg = f"{original_msg}\n{PY2_HINT}" + raise InvalidInput(msg) from None + + raise exc from None + + if isinstance(result, Leaf): + result = Node(syms.file_input, [result]) + return result + + +def matches_grammar(src_txt: str, grammar: Grammar) -> bool: + drv = driver.Driver(grammar) + try: + drv.parse_string(src_txt, True) + except (ParseError, TokenError, IndentationError): + return False + else: + return True + + +def lib2to3_unparse(node: Node) -> str: + """Given a lib2to3 node, return its string representation.""" + code = str(node) + return code + + +def parse_single_version( + src: str, version: Tuple[int, int] +) -> Union[ast.AST, ast3.AST]: + filename = "" + # typed-ast is needed because of feature version limitations in the builtin ast 3.8> + if sys.version_info >= (3, 8) and version >= (3,): + return ast.parse(src, filename, feature_version=version, type_comments=True) + + if _IS_PYPY: + # PyPy 3.7 doesn't support type comment tracking which is not ideal, but there's + # not much we can do as typed-ast won't work either. + if sys.version_info >= (3, 8): + return ast3.parse(src, filename, type_comments=True) + else: + return ast3.parse(src, filename) + else: + # Typed-ast is guaranteed to be used here and automatically tracks type + # comments separately. + return ast3.parse(src, filename, feature_version=version[1]) + + +def parse_ast(src: str) -> Union[ast.AST, ast3.AST]: + # TODO: support Python 4+ ;) + versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] + + first_error = "" + for version in sorted(versions, reverse=True): + try: + return parse_single_version(src, version) + except SyntaxError as e: + if not first_error: + first_error = str(e) + + raise SyntaxError(first_error) + + +ast3_AST: Final[Type[ast3.AST]] = ast3.AST + + +def _normalize(lineend: str, value: str) -> str: + # To normalize, we strip any leading and trailing space from + # each line... + stripped: List[str] = [i.strip() for i in value.splitlines()] + normalized = lineend.join(stripped) + # ...and remove any blank lines at the beginning and end of + # the whole string + return normalized.strip() + + +def stringify_ast(node: Union[ast.AST, ast3.AST], depth: int = 0) -> Iterator[str]: + """Simple visitor generating strings to compare ASTs by content.""" + + node = fixup_ast_constants(node) + + yield f"{' ' * depth}{node.__class__.__name__}(" + + type_ignore_classes: Tuple[Type[Any], ...] + for field in sorted(node._fields): # noqa: F402 + # TypeIgnore will not be present using pypy < 3.8, so need for this + if not (_IS_PYPY and sys.version_info < (3, 8)): + # TypeIgnore has only one field 'lineno' which breaks this comparison + type_ignore_classes = (ast3.TypeIgnore,) + if sys.version_info >= (3, 8): + type_ignore_classes += (ast.TypeIgnore,) + if isinstance(node, type_ignore_classes): + break + + try: + value: object = getattr(node, field) + except AttributeError: + continue + + yield f"{' ' * (depth+1)}{field}=" + + if isinstance(value, list): + for item in value: + # Ignore nested tuples within del statements, because we may insert + # parentheses and they change the AST. + if ( + field == "targets" + and isinstance(node, (ast.Delete, ast3.Delete)) + and isinstance(item, (ast.Tuple, ast3.Tuple)) + ): + for elt in item.elts: + yield from stringify_ast(elt, depth + 2) + + elif isinstance(item, (ast.AST, ast3.AST)): + yield from stringify_ast(item, depth + 2) + + # Note that we are referencing the typed-ast ASTs via global variables and not + # direct module attribute accesses because that breaks mypyc. It's probably + # something to do with the ast3 variables being marked as Any leading + # mypy to think this branch is always taken, leaving the rest of the code + # unanalyzed. Tighting up the types for the typed-ast AST types avoids the + # mypyc crash. + elif isinstance(value, (ast.AST, ast3_AST)): + yield from stringify_ast(value, depth + 2) + + else: + normalized: object + # Constant strings may be indented across newlines, if they are + # docstrings; fold spaces after newlines when comparing. Similarly, + # trailing and leading space may be removed. + if ( + isinstance(node, ast.Constant) + and field == "value" + and isinstance(value, str) + ): + normalized = _normalize("\n", value) + else: + normalized = value + yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" + + yield f"{' ' * depth}) # /{node.__class__.__name__}" + + +def fixup_ast_constants(node: Union[ast.AST, ast3.AST]) -> Union[ast.AST, ast3.AST]: + """Map ast nodes deprecated in 3.8 to Constant.""" + if isinstance(node, (ast.Str, ast3.Str, ast.Bytes, ast3.Bytes)): + return ast.Constant(value=node.s) + + if isinstance(node, (ast.Num, ast3.Num)): + return ast.Constant(value=node.n) + + if isinstance(node, (ast.NameConstant, ast3.NameConstant)): + return ast.Constant(value=node.value) + + return node diff --git a/.venv/lib/python3.11/site-packages/black/py.typed b/.venv/lib/python3.11/site-packages/black/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/black/report.py b/.venv/lib/python3.11/site-packages/black/report.py new file mode 100644 index 0000000..a507671 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/report.py @@ -0,0 +1,106 @@ +""" +Summarize Black runs to users. +""" +from dataclasses import dataclass +from enum import Enum +from pathlib import Path + +from click import style + +from black.output import err, out + + +class Changed(Enum): + NO = 0 + CACHED = 1 + YES = 2 + + +class NothingChanged(UserWarning): + """Raised when reformatted code is the same as source.""" + + +@dataclass +class Report: + """Provides a reformatting counter. Can be rendered with `str(report)`.""" + + check: bool = False + diff: bool = False + quiet: bool = False + verbose: bool = False + change_count: int = 0 + same_count: int = 0 + failure_count: int = 0 + + def done(self, src: Path, changed: Changed) -> None: + """Increment the counter for successful reformatting. Write out a message.""" + if changed is Changed.YES: + reformatted = "would reformat" if self.check or self.diff else "reformatted" + if self.verbose or not self.quiet: + out(f"{reformatted} {src}") + self.change_count += 1 + else: + if self.verbose: + if changed is Changed.NO: + msg = f"{src} already well formatted, good job." + else: + msg = f"{src} wasn't modified on disk since last run." + out(msg, bold=False) + self.same_count += 1 + + def failed(self, src: Path, message: str) -> None: + """Increment the counter for failed reformatting. Write out a message.""" + err(f"error: cannot format {src}: {message}") + self.failure_count += 1 + + def path_ignored(self, path: Path, message: str) -> None: + if self.verbose: + out(f"{path} ignored: {message}", bold=False) + + @property + def return_code(self) -> int: + """Return the exit code that the app should use. + + This considers the current state of changed files and failures: + - if there were any failures, return 123; + - if any files were changed and --check is being used, return 1; + - otherwise return 0. + """ + # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with + # 126 we have special return codes reserved by the shell. + if self.failure_count: + return 123 + + elif self.change_count and self.check: + return 1 + + return 0 + + def __str__(self) -> str: + """Render a color report of the current state. + + Use `click.unstyle` to remove colors. + """ + if self.check or self.diff: + reformatted = "would be reformatted" + unchanged = "would be left unchanged" + failed = "would fail to reformat" + else: + reformatted = "reformatted" + unchanged = "left unchanged" + failed = "failed to reformat" + report = [] + if self.change_count: + s = "s" if self.change_count > 1 else "" + report.append( + style(f"{self.change_count} file{s} ", bold=True, fg="blue") + + style(f"{reformatted}", bold=True) + ) + + if self.same_count: + s = "s" if self.same_count > 1 else "" + report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged) + if self.failure_count: + s = "s" if self.failure_count > 1 else "" + report.append(style(f"{self.failure_count} file{s} {failed}", fg="red")) + return ", ".join(report) + "." diff --git a/.venv/lib/python3.11/site-packages/black/rusty.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/rusty.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..27703da Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/rusty.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/rusty.py b/.venv/lib/python3.11/site-packages/black/rusty.py new file mode 100644 index 0000000..84a80b5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/rusty.py @@ -0,0 +1,27 @@ +"""An error-handling model influenced by that used by the Rust programming language + +See https://doc.rust-lang.org/book/ch09-00-error-handling.html. +""" +from typing import Generic, TypeVar, Union + +T = TypeVar("T") +E = TypeVar("E", bound=Exception) + + +class Ok(Generic[T]): + def __init__(self, value: T) -> None: + self._value = value + + def ok(self) -> T: + return self._value + + +class Err(Generic[E]): + def __init__(self, e: E) -> None: + self._e = e + + def err(self) -> E: + return self._e + + +Result = Union[Ok[T], Err[E]] diff --git a/.venv/lib/python3.11/site-packages/black/strings.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/strings.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..aea34c6 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/strings.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/strings.py b/.venv/lib/python3.11/site-packages/black/strings.py new file mode 100644 index 0000000..3e3bc12 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/strings.py @@ -0,0 +1,280 @@ +""" +Simple formatting on strings. Further string formatting code is in trans.py. +""" + +import re +import sys +from functools import lru_cache +from typing import List, Match, Pattern + +from blib2to3.pytree import Leaf + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + + +STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. +STRING_PREFIX_RE: Final = re.compile( + r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL +) +FIRST_NON_WHITESPACE_RE: Final = re.compile(r"\s*\t+\s*(\S)") +UNICODE_ESCAPE_RE: Final = re.compile( + r"(?P\\+)(?P" + r"(u(?P[a-fA-F0-9]{4}))" # Character with 16-bit hex value xxxx + r"|(U(?P[a-fA-F0-9]{8}))" # Character with 32-bit hex value xxxxxxxx + r"|(x(?P[a-fA-F0-9]{2}))" # Character with hex value hh + r"|(N\{(?P[a-zA-Z0-9 \-]{2,})\})" # Character named name in the Unicode database + r")", + re.VERBOSE, +) + + +def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: + """Replace `regex` with `replacement` twice on `original`. + + This is used by string normalization to perform replaces on + overlapping matches. + """ + return regex.sub(replacement, regex.sub(replacement, original)) + + +def has_triple_quotes(string: str) -> bool: + """ + Returns: + True iff @string starts with three quotation characters. + """ + raw_string = string.lstrip(STRING_PREFIX_CHARS) + return raw_string[:3] in {'"""', "'''"} + + +def lines_with_leading_tabs_expanded(s: str) -> List[str]: + """ + Splits string into lines and expands only leading tabs (following the normal + Python rules) + """ + lines = [] + for line in s.splitlines(): + # Find the index of the first non-whitespace character after a string of + # whitespace that includes at least one tab + match = FIRST_NON_WHITESPACE_RE.match(line) + if match: + first_non_whitespace_idx = match.start(1) + + lines.append( + line[:first_non_whitespace_idx].expandtabs() + + line[first_non_whitespace_idx:] + ) + else: + lines.append(line) + return lines + + +def fix_docstring(docstring: str, prefix: str) -> str: + # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation + if not docstring: + return "" + lines = lines_with_leading_tabs_expanded(docstring) + # Determine minimum indentation (first line doesn't count): + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + last_line_idx = len(lines) - 2 + for i, line in enumerate(lines[1:]): + stripped_line = line[indent:].rstrip() + if stripped_line or i == last_line_idx: + trimmed.append(prefix + stripped_line) + else: + trimmed.append("") + return "\n".join(trimmed) + + +def get_string_prefix(string: str) -> str: + """ + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + @string's prefix (e.g. '', 'r', 'f', or 'rf'). + """ + assert_is_leaf_string(string) + + prefix = "" + prefix_idx = 0 + while string[prefix_idx] in STRING_PREFIX_CHARS: + prefix += string[prefix_idx] + prefix_idx += 1 + + return prefix + + +def assert_is_leaf_string(string: str) -> None: + """ + Checks the pre-condition that @string has the format that you would expect + of `leaf.value` where `leaf` is some Leaf such that `leaf.type == + token.STRING`. A more precise description of the pre-conditions that are + checked are listed below. + + Pre-conditions: + * @string starts with either ', ", ', or " where + `set()` is some subset of `set(STRING_PREFIX_CHARS)`. + * @string ends with a quote character (' or "). + + Raises: + AssertionError(...) if the pre-conditions listed above are not + satisfied. + """ + dquote_idx = string.find('"') + squote_idx = string.find("'") + if -1 in [dquote_idx, squote_idx]: + quote_idx = max(dquote_idx, squote_idx) + else: + quote_idx = min(squote_idx, dquote_idx) + + assert ( + 0 <= quote_idx < len(string) - 1 + ), f"{string!r} is missing a starting quote character (' or \")." + assert string[-1] in ( + "'", + '"', + ), f"{string!r} is missing an ending quote character (' or \")." + assert set(string[:quote_idx]).issubset( + set(STRING_PREFIX_CHARS) + ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." + + +def normalize_string_prefix(s: str) -> str: + """Make all string prefixes lowercase.""" + match = STRING_PREFIX_RE.match(s) + assert match is not None, f"failed to match string {s!r}" + orig_prefix = match.group(1) + new_prefix = ( + orig_prefix.replace("F", "f") + .replace("B", "b") + .replace("U", "") + .replace("u", "") + ) + + # Python syntax guarantees max 2 prefixes and that one of them is "r" + if len(new_prefix) == 2 and "r" != new_prefix[0].lower(): + new_prefix = new_prefix[::-1] + return f"{new_prefix}{match.group(2)}" + + +# Re(gex) does actually cache patterns internally but this still improves +# performance on a long list literal of strings by 5-9% since lru_cache's +# caching overhead is much lower. +@lru_cache(maxsize=64) +def _cached_compile(pattern: str) -> Pattern[str]: + return re.compile(pattern) + + +def normalize_string_quotes(s: str) -> str: + """Prefer double quotes but only if it doesn't cause more escaping. + + Adds or removes backslashes as appropriate. Doesn't parse and fix + strings nested in f-strings. + """ + value = s.lstrip(STRING_PREFIX_CHARS) + if value[:3] == '"""': + return s + + elif value[:3] == "'''": + orig_quote = "'''" + new_quote = '"""' + elif value[0] == '"': + orig_quote = '"' + new_quote = "'" + else: + orig_quote = "'" + new_quote = '"' + first_quote_pos = s.find(orig_quote) + if first_quote_pos == -1: + return s # There's an internal error + + prefix = s[:first_quote_pos] + unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}") + escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") + escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") + body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] + if "r" in prefix.casefold(): + if unescaped_new_quote.search(body): + # There's at least one unescaped new_quote in this raw string + # so converting is impossible + return s + + # Do not introduce or remove backslashes in raw strings + new_body = body + else: + # remove unnecessary escapes + new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) + if body != new_body: + # Consider the string without unnecessary escapes as the original + body = new_body + s = f"{prefix}{orig_quote}{body}{orig_quote}" + new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) + new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) + if "f" in prefix.casefold(): + matches = re.findall( + r""" + (?:(? orig_escape_count: + return s # Do not introduce more escaping + + if new_escape_count == orig_escape_count and orig_quote == '"': + return s # Prefer double quotes + + return f"{prefix}{new_quote}{new_body}{new_quote}" + + +def normalize_unicode_escape_sequences(leaf: Leaf) -> None: + """Replace hex codes in Unicode escape sequences with lowercase representation.""" + text = leaf.value + prefix = get_string_prefix(text) + if "r" in prefix.lower(): + return + + def replace(m: Match[str]) -> str: + groups = m.groupdict() + back_slashes = groups["backslashes"] + + if len(back_slashes) % 2 == 0: + return back_slashes + groups["body"] + + if groups["u"]: + # \u + return back_slashes + "u" + groups["u"].lower() + elif groups["U"]: + # \U + return back_slashes + "U" + groups["U"].lower() + elif groups["x"]: + # \x + return back_slashes + "x" + groups["x"].lower() + else: + assert groups["N"], f"Unexpected match: {m}" + # \N{} + return back_slashes + "N{" + groups["N"].upper() + "}" + + leaf.value = re.sub(UNICODE_ESCAPE_RE, replace, text) diff --git a/.venv/lib/python3.11/site-packages/black/trans.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/black/trans.cpython-311-x86_64-linux-gnu.so new file mode 100755 index 0000000..6a576b2 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/black/trans.cpython-311-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.11/site-packages/black/trans.py b/.venv/lib/python3.11/site-packages/black/trans.py new file mode 100644 index 0000000..2360c13 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/black/trans.py @@ -0,0 +1,2392 @@ +""" +String transformers that can split and merge strings. +""" +import re +import sys +from abc import ABC, abstractmethod +from collections import defaultdict +from dataclasses import dataclass +from typing import ( + Any, + Callable, + ClassVar, + Collection, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + TypeVar, + Union, +) + +if sys.version_info < (3, 8): + from typing_extensions import Final, Literal +else: + from typing import Literal, Final + +from mypy_extensions import trait + +from black.comments import contains_pragma_comment +from black.lines import Line, append_leaves +from black.mode import Feature +from black.nodes import ( + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + is_empty_lpar, + is_empty_par, + is_empty_rpar, + is_part_of_annotation, + parent_type, + replace_child, + syms, +) +from black.rusty import Err, Ok, Result +from black.strings import ( + assert_is_leaf_string, + get_string_prefix, + has_triple_quotes, + normalize_string_quotes, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + + +class CannotTransform(Exception): + """Base class for errors raised by Transformers.""" + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]] +Index = int +NodeType = int +ParserState = int +StringID = int +TResult = Result[T, CannotTransform] # (T)ransform Result +TMatchResult = TResult[List[Index]] + + +def TErr(err_msg: str) -> Err[CannotTransform]: + """(T)ransform Err + + Convenience function used when working with the TResult type. + """ + cant_transform = CannotTransform(err_msg) + return Err(cant_transform) + + +def hug_power_op(line: Line, features: Collection[Feature]) -> Iterator[Line]: + """A transformer which normalizes spacing around power operators.""" + + # Performance optimization to avoid unnecessary Leaf clones and other ops. + for leaf in line.leaves: + if leaf.type == token.DOUBLESTAR: + break + else: + raise CannotTransform("No doublestar token was found in the line.") + + def is_simple_lookup(index: int, step: Literal[1, -1]) -> bool: + # Brackets and parentheses indicate calls, subscripts, etc. ... + # basically stuff that doesn't count as "simple". Only a NAME lookup + # or dotted lookup (eg. NAME.NAME) is OK. + if step == -1: + disallowed = {token.RPAR, token.RSQB} + else: + disallowed = {token.LPAR, token.LSQB} + + while 0 <= index < len(line.leaves): + current = line.leaves[index] + if current.type in disallowed: + return False + if current.type not in {token.NAME, token.DOT} or current.value == "for": + # If the current token isn't disallowed, we'll assume this is simple as + # only the disallowed tokens are semantically attached to this lookup + # expression we're checking. Also, stop early if we hit the 'for' bit + # of a comprehension. + return True + + index += step + + return True + + def is_simple_operand(index: int, kind: Literal["base", "exponent"]) -> bool: + # An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple + # lookup (see above), with or without a preceding unary operator. + start = line.leaves[index] + if start.type in {token.NAME, token.NUMBER}: + return is_simple_lookup(index, step=(1 if kind == "exponent" else -1)) + + if start.type in {token.PLUS, token.MINUS, token.TILDE}: + if line.leaves[index + 1].type in {token.NAME, token.NUMBER}: + # step is always one as bases with a preceding unary op will be checked + # for simplicity starting from the next token (so it'll hit the check + # above). + return is_simple_lookup(index + 1, step=1) + + return False + + new_line = line.clone() + should_hug = False + for idx, leaf in enumerate(line.leaves): + new_leaf = leaf.clone() + if should_hug: + new_leaf.prefix = "" + should_hug = False + + should_hug = ( + (0 < idx < len(line.leaves) - 1) + and leaf.type == token.DOUBLESTAR + and is_simple_operand(idx - 1, kind="base") + and line.leaves[idx - 1].value != "lambda" + and is_simple_operand(idx + 1, kind="exponent") + ) + if should_hug: + new_leaf.prefix = "" + + # We have to be careful to make a new line properly: + # - bracket related metadata must be maintained (handled by Line.append) + # - comments need to copied over, updating the leaf IDs they're attached to + new_line.append(new_leaf, preformatted=True) + for comment_leaf in line.comments_after(leaf): + new_line.append(comment_leaf, preformatted=True) + + yield new_line + + +class StringTransformer(ABC): + """ + An implementation of the Transformer protocol that relies on its + subclasses overriding the template methods `do_match(...)` and + `do_transform(...)`. + + This Transformer works exclusively on strings (for example, by merging + or splitting them). + + The following sections can be found among the docstrings of each concrete + StringTransformer subclass. + + Requirements: + Which requirements must be met of the given Line for this + StringTransformer to be applied? + + Transformations: + If the given Line meets all of the above requirements, which string + transformations can you expect to be applied to it by this + StringTransformer? + + Collaborations: + What contractual agreements does this StringTransformer have with other + StringTransfomers? Such collaborations should be eliminated/minimized + as much as possible. + """ + + __name__: Final = "StringTransformer" + + # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with + # `abc.ABC`. + def __init__(self, line_length: int, normalize_strings: bool) -> None: + self.line_length = line_length + self.normalize_strings = normalize_strings + + @abstractmethod + def do_match(self, line: Line) -> TMatchResult: + """ + Returns: + * Ok(string_indices) such that for each index, `line.leaves[index]` + is our target string if a match was able to be made. For + transformers that don't result in more lines (e.g. StringMerger, + StringParenStripper), multiple matches and transforms are done at + once to reduce the complexity. + OR + * Err(CannotTransform), if no match could be made. + """ + + @abstractmethod + def do_transform( + self, line: Line, string_indices: List[int] + ) -> Iterator[TResult[Line]]: + """ + Yields: + * Ok(new_line) where new_line is the new transformed line. + OR + * Err(CannotTransform) if the transformation failed for some reason. The + `do_match(...)` template method should usually be used to reject + the form of the given Line, but in some cases it is difficult to + know whether or not a Line meets the StringTransformer's + requirements until the transformation is already midway. + + Side Effects: + This method should NOT mutate @line directly, but it MAY mutate the + Line's underlying Node structure. (WARNING: If the underlying Node + structure IS altered, then this method should NOT be allowed to + yield an CannotTransform after that point.) + """ + + def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]: + """ + StringTransformer instances have a call signature that mirrors that of + the Transformer type. + + Raises: + CannotTransform(...) if the concrete StringTransformer class is unable + to transform @line. + """ + # Optimization to avoid calling `self.do_match(...)` when the line does + # not contain any string. + if not any(leaf.type == token.STRING for leaf in line.leaves): + raise CannotTransform("There are no strings in this line.") + + match_result = self.do_match(line) + + if isinstance(match_result, Err): + cant_transform = match_result.err() + raise CannotTransform( + f"The string transformer {self.__class__.__name__} does not recognize" + " this line as one that it can transform." + ) from cant_transform + + string_indices = match_result.ok() + + for line_result in self.do_transform(line, string_indices): + if isinstance(line_result, Err): + cant_transform = line_result.err() + raise CannotTransform( + "StringTransformer failed while attempting to transform string." + ) from cant_transform + line = line_result.ok() + yield line + + +@dataclass +class CustomSplit: + """A custom (i.e. manual) string split. + + A single CustomSplit instance represents a single substring. + + Examples: + Consider the following string: + ``` + "Hi there friend." + " This is a custom" + f" string {split}." + ``` + + This string will correspond to the following three CustomSplit instances: + ``` + CustomSplit(False, 16) + CustomSplit(False, 17) + CustomSplit(True, 16) + ``` + """ + + has_prefix: bool + break_idx: int + + +@trait +class CustomSplitMapMixin: + """ + This mixin class is used to map merged strings to a sequence of + CustomSplits, which will then be used to re-split the strings iff none of + the resultant substrings go over the configured max line length. + """ + + _Key: ClassVar = Tuple[StringID, str] + _CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict( + tuple + ) + + @staticmethod + def _get_key(string: str) -> "CustomSplitMapMixin._Key": + """ + Returns: + A unique identifier that is used internally to map @string to a + group of custom splits. + """ + return (id(string), string) + + def add_custom_splits( + self, string: str, custom_splits: Iterable[CustomSplit] + ) -> None: + """Custom Split Map Setter Method + + Side Effects: + Adds a mapping from @string to the custom splits @custom_splits. + """ + key = self._get_key(string) + self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) + + def pop_custom_splits(self, string: str) -> List[CustomSplit]: + """Custom Split Map Getter Method + + Returns: + * A list of the custom splits that are mapped to @string, if any + exist. + OR + * [], otherwise. + + Side Effects: + Deletes the mapping between @string and its associated custom + splits (which are returned to the caller). + """ + key = self._get_key(string) + + custom_splits = self._CUSTOM_SPLIT_MAP[key] + del self._CUSTOM_SPLIT_MAP[key] + + return list(custom_splits) + + def has_custom_splits(self, string: str) -> bool: + """ + Returns: + True iff @string is associated with a set of custom splits. + """ + key = self._get_key(string) + return key in self._CUSTOM_SPLIT_MAP + + +class StringMerger(StringTransformer, CustomSplitMapMixin): + """StringTransformer that merges strings together. + + Requirements: + (A) The line contains adjacent strings such that ALL of the validation checks + listed in StringMerger._validate_msg(...)'s docstring pass. + OR + (B) The line contains a string which uses line continuation backslashes. + + Transformations: + Depending on which of the two requirements above where met, either: + + (A) The string group associated with the target string is merged. + OR + (B) All line-continuation backslashes are removed from the target string. + + Collaborations: + StringMerger provides custom split information to StringSplitter. + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + string_indices = [] + idx = 0 + while is_valid_index(idx): + leaf = LL[idx] + if ( + leaf.type == token.STRING + and is_valid_index(idx + 1) + and LL[idx + 1].type == token.STRING + ): + if not is_part_of_annotation(leaf): + string_indices.append(idx) + + # Advance to the next non-STRING leaf. + idx += 2 + while is_valid_index(idx) and LL[idx].type == token.STRING: + idx += 1 + + elif leaf.type == token.STRING and "\\\n" in leaf.value: + string_indices.append(idx) + # Advance to the next non-STRING leaf. + idx += 1 + while is_valid_index(idx) and LL[idx].type == token.STRING: + idx += 1 + + else: + idx += 1 + + if string_indices: + return Ok(string_indices) + else: + return TErr("This line has no strings that need merging.") + + def do_transform( + self, line: Line, string_indices: List[int] + ) -> Iterator[TResult[Line]]: + new_line = line + + rblc_result = self._remove_backslash_line_continuation_chars( + new_line, string_indices + ) + if isinstance(rblc_result, Ok): + new_line = rblc_result.ok() + + msg_result = self._merge_string_group(new_line, string_indices) + if isinstance(msg_result, Ok): + new_line = msg_result.ok() + + if isinstance(rblc_result, Err) and isinstance(msg_result, Err): + msg_cant_transform = msg_result.err() + rblc_cant_transform = rblc_result.err() + cant_transform = CannotTransform( + "StringMerger failed to merge any strings in this line." + ) + + # Chain the errors together using `__cause__`. + msg_cant_transform.__cause__ = rblc_cant_transform + cant_transform.__cause__ = msg_cant_transform + + yield Err(cant_transform) + else: + yield Ok(new_line) + + @staticmethod + def _remove_backslash_line_continuation_chars( + line: Line, string_indices: List[int] + ) -> TResult[Line]: + """ + Merge strings that were split across multiple lines using + line-continuation backslashes. + + Returns: + Ok(new_line), if @line contains backslash line-continuation + characters. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + indices_to_transform = [] + for string_idx in string_indices: + string_leaf = LL[string_idx] + if ( + string_leaf.type == token.STRING + and "\\\n" in string_leaf.value + and not has_triple_quotes(string_leaf.value) + ): + indices_to_transform.append(string_idx) + + if not indices_to_transform: + return TErr( + "Found no string leaves that contain backslash line continuation" + " characters." + ) + + new_line = line.clone() + new_line.comments = line.comments.copy() + append_leaves(new_line, line, LL) + + for string_idx in indices_to_transform: + new_string_leaf = new_line.leaves[string_idx] + new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") + + return Ok(new_line) + + def _merge_string_group( + self, line: Line, string_indices: List[int] + ) -> TResult[Line]: + """ + Merges string groups (i.e. set of adjacent strings). + + Each index from `string_indices` designates one string group's first + leaf in `line.leaves`. + + Returns: + Ok(new_line), if ALL of the validation checks found in + _validate_msg(...) pass. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # A dict of {string_idx: tuple[num_of_strings, string_leaf]}. + merged_string_idx_dict: Dict[int, Tuple[int, Leaf]] = {} + for string_idx in string_indices: + vresult = self._validate_msg(line, string_idx) + if isinstance(vresult, Err): + continue + merged_string_idx_dict[string_idx] = self._merge_one_string_group( + LL, string_idx, is_valid_index + ) + + if not merged_string_idx_dict: + return TErr("No string group is merged") + + # Build the final line ('new_line') that this method will later return. + new_line = line.clone() + previous_merged_string_idx = -1 + previous_merged_num_of_strings = -1 + for i, leaf in enumerate(LL): + if i in merged_string_idx_dict: + previous_merged_string_idx = i + previous_merged_num_of_strings, string_leaf = merged_string_idx_dict[i] + new_line.append(string_leaf) + + if ( + previous_merged_string_idx + <= i + < previous_merged_string_idx + previous_merged_num_of_strings + ): + for comment_leaf in line.comments_after(LL[i]): + new_line.append(comment_leaf, preformatted=True) + continue + + append_leaves(new_line, line, [leaf]) + + return Ok(new_line) + + def _merge_one_string_group( + self, LL: List[Leaf], string_idx: int, is_valid_index: Callable[[int], bool] + ) -> Tuple[int, Leaf]: + """ + Merges one string group where the first string in the group is + `LL[string_idx]`. + + Returns: + A tuple of `(num_of_strings, leaf)` where `num_of_strings` is the + number of strings merged and `leaf` is the newly merged string + to be replaced in the new line. + """ + # If the string group is wrapped inside an Atom node, we must make sure + # to later replace that Atom with our new (merged) string leaf. + atom_node = LL[string_idx].parent + + # We will place BREAK_MARK in between every two substrings that we + # merge. We will then later go through our final result and use the + # various instances of BREAK_MARK we find to add the right values to + # the custom split map. + BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" + + QUOTE = LL[string_idx].value[-1] + + def make_naked(string: str, string_prefix: str) -> str: + """Strip @string (i.e. make it a "naked" string) + + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + A string that is identical to @string except that + @string_prefix has been stripped, the surrounding QUOTE + characters have been removed, and any remaining QUOTE + characters have been escaped. + """ + assert_is_leaf_string(string) + if "f" in string_prefix: + string = _toggle_fexpr_quotes(string, QUOTE) + # After quotes toggling, quotes in expressions won't be escaped + # because quotes can't be reused in f-strings. So we can simply + # let the escaping logic below run without knowing f-string + # expressions. + + RE_EVEN_BACKSLASHES = r"(?:(?= 0 + ), "Logic error while filling the custom string breakpoint cache." + + temp_string = temp_string[mark_idx + len(BREAK_MARK) :] + breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 + custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) + + string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) + + if atom_node is not None: + # If not all children of the atom node are merged (this can happen + # when there is a standalone comment in the middle) ... + if non_string_idx - string_idx < len(atom_node.children): + # We need to replace the old STRING leaves with the new string leaf. + first_child_idx = LL[string_idx].remove() + for idx in range(string_idx + 1, non_string_idx): + LL[idx].remove() + if first_child_idx is not None: + atom_node.insert_child(first_child_idx, string_leaf) + else: + # Else replace the atom node with the new string leaf. + replace_child(atom_node, string_leaf) + + self.add_custom_splits(string_leaf.value, custom_splits) + return num_of_strings, string_leaf + + @staticmethod + def _validate_msg(line: Line, string_idx: int) -> TResult[None]: + """Validate (M)erge (S)tring (G)roup + + Transform-time string validation logic for _merge_string_group(...). + + Returns: + * Ok(None), if ALL validation checks (listed below) pass. + OR + * Err(CannotTransform), if any of the following are true: + - The target string group does not contain ANY stand-alone comments. + - The target string is not in a string group (i.e. it has no + adjacent strings). + - The string group has more than one inline comment. + - The string group has an inline comment that appears to be a pragma. + - The set of all string prefixes in the string group is of + length greater than one and is not equal to {"", "f"}. + - The string group consists of raw strings. + - The string group is stringified type annotations. We don't want to + process stringified type annotations since pyright doesn't support + them spanning multiple string values. (NOTE: mypy, pytype, pyre do + support them, so we can change if pyright also gains support in the + future. See https://github.com/microsoft/pyright/issues/4359.) + """ + # We first check for "inner" stand-alone comments (i.e. stand-alone + # comments that have a string leaf before them AND after them). + for inc in [1, -1]: + i = string_idx + found_sa_comment = False + is_valid_index = is_valid_index_factory(line.leaves) + while is_valid_index(i) and line.leaves[i].type in [ + token.STRING, + STANDALONE_COMMENT, + ]: + if line.leaves[i].type == STANDALONE_COMMENT: + found_sa_comment = True + elif found_sa_comment: + return TErr( + "StringMerger does NOT merge string groups which contain " + "stand-alone comments." + ) + + i += inc + + num_of_inline_string_comments = 0 + set_of_prefixes = set() + num_of_strings = 0 + for leaf in line.leaves[string_idx:]: + if leaf.type != token.STRING: + # If the string group is trailed by a comma, we count the + # comments trailing the comma to be one of the string group's + # comments. + if leaf.type == token.COMMA and id(leaf) in line.comments: + num_of_inline_string_comments += 1 + break + + if has_triple_quotes(leaf.value): + return TErr("StringMerger does NOT merge multiline strings.") + + num_of_strings += 1 + prefix = get_string_prefix(leaf.value).lower() + if "r" in prefix: + return TErr("StringMerger does NOT merge raw strings.") + + set_of_prefixes.add(prefix) + + if id(leaf) in line.comments: + num_of_inline_string_comments += 1 + if contains_pragma_comment(line.comments[id(leaf)]): + return TErr("Cannot merge strings which have pragma comments.") + + if num_of_strings < 2: + return TErr( + f"Not enough strings to merge (num_of_strings={num_of_strings})." + ) + + if num_of_inline_string_comments > 1: + return TErr( + f"Too many inline string comments ({num_of_inline_string_comments})." + ) + + if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: + return TErr(f"Too many different prefixes ({set_of_prefixes}).") + + return Ok(None) + + +class StringParenStripper(StringTransformer): + """StringTransformer that strips surrounding parentheses from strings. + + Requirements: + The line contains a string which is surrounded by parentheses and: + - The target string is NOT the only argument to a function call. + - The target string is NOT a "pointless" string. + - If the target string contains a PERCENT, the brackets are not + preceded or followed by an operator with higher precedence than + PERCENT. + + Transformations: + The parentheses mentioned in the 'Requirements' section are stripped. + + Collaborations: + StringParenStripper has its own inherent usefulness, but it is also + relied on to clean up the parentheses created by StringParenWrapper (in + the event that they are no longer needed). + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + string_indices = [] + + idx = -1 + while True: + idx += 1 + if idx >= len(LL): + break + leaf = LL[idx] + + # Should be a string... + if leaf.type != token.STRING: + continue + + # If this is a "pointless" string... + if ( + leaf.parent + and leaf.parent.parent + and leaf.parent.parent.type == syms.simple_stmt + ): + continue + + # Should be preceded by a non-empty LPAR... + if ( + not is_valid_index(idx - 1) + or LL[idx - 1].type != token.LPAR + or is_empty_lpar(LL[idx - 1]) + ): + continue + + # That LPAR should NOT be preceded by a function name or a closing + # bracket (which could be a function which returns a function or a + # list/dictionary that contains a function)... + if is_valid_index(idx - 2) and ( + LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS + ): + continue + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + next_idx = string_parser.parse(LL, string_idx) + + # if the leaves in the parsed string include a PERCENT, we need to + # make sure the initial LPAR is NOT preceded by an operator with + # higher or equal precedence to PERCENT + if is_valid_index(idx - 2): + # mypy can't quite follow unless we name this + before_lpar = LL[idx - 2] + if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( + ( + before_lpar.type + in { + token.STAR, + token.AT, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.TILDE, + token.DOUBLESTAR, + token.AWAIT, + token.LSQB, + token.LPAR, + } + ) + or ( + # only unary PLUS/MINUS + before_lpar.parent + and before_lpar.parent.type == syms.factor + and (before_lpar.type in {token.PLUS, token.MINUS}) + ) + ): + continue + + # Should be followed by a non-empty RPAR... + if ( + is_valid_index(next_idx) + and LL[next_idx].type == token.RPAR + and not is_empty_rpar(LL[next_idx]) + ): + # That RPAR should NOT be followed by anything with higher + # precedence than PERCENT + if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { + token.DOUBLESTAR, + token.LSQB, + token.LPAR, + token.DOT, + }: + continue + + string_indices.append(string_idx) + idx = string_idx + while idx < len(LL) - 1 and LL[idx + 1].type == token.STRING: + idx += 1 + + if string_indices: + return Ok(string_indices) + return TErr("This line has no strings wrapped in parens.") + + def do_transform( + self, line: Line, string_indices: List[int] + ) -> Iterator[TResult[Line]]: + LL = line.leaves + + string_and_rpar_indices: List[int] = [] + for string_idx in string_indices: + string_parser = StringParser() + rpar_idx = string_parser.parse(LL, string_idx) + + should_transform = True + for leaf in (LL[string_idx - 1], LL[rpar_idx]): + if line.comments_after(leaf): + # Should not strip parentheses which have comments attached + # to them. + should_transform = False + break + if should_transform: + string_and_rpar_indices.extend((string_idx, rpar_idx)) + + if string_and_rpar_indices: + yield Ok(self._transform_to_new_line(line, string_and_rpar_indices)) + else: + yield Err( + CannotTransform("All string groups have comments attached to them.") + ) + + def _transform_to_new_line( + self, line: Line, string_and_rpar_indices: List[int] + ) -> Line: + LL = line.leaves + + new_line = line.clone() + new_line.comments = line.comments.copy() + + previous_idx = -1 + # We need to sort the indices, since string_idx and its matching + # rpar_idx may not come in order, e.g. in + # `("outer" % ("inner".join(items)))`, the "inner" string's + # string_idx is smaller than "outer" string's rpar_idx. + for idx in sorted(string_and_rpar_indices): + leaf = LL[idx] + lpar_or_rpar_idx = idx - 1 if leaf.type == token.STRING else idx + append_leaves(new_line, line, LL[previous_idx + 1 : lpar_or_rpar_idx]) + if leaf.type == token.STRING: + string_leaf = Leaf(token.STRING, LL[idx].value) + LL[lpar_or_rpar_idx].remove() # Remove lpar. + replace_child(LL[idx], string_leaf) + new_line.append(string_leaf) + else: + LL[lpar_or_rpar_idx].remove() # This is a rpar. + + previous_idx = idx + + # Append the leaves after the last idx: + append_leaves(new_line, line, LL[idx + 1 :]) + + return new_line + + +class BaseStringSplitter(StringTransformer): + """ + Abstract class for StringTransformers which transform a Line's strings by splitting + them or placing them on their own lines where necessary to avoid going over + the configured line length. + + Requirements: + * The target string value is responsible for the line going over the + line length limit. It follows that after all of black's other line + split methods have been exhausted, this line (or one of the resulting + lines after all line splits are performed) would still be over the + line_length limit unless we split this string. + AND + * The target string is NOT a "pointless" string (i.e. a string that has + no parent or siblings). + AND + * The target string is not followed by an inline comment that appears + to be a pragma. + AND + * The target string is not a multiline (i.e. triple-quote) string. + """ + + STRING_OPERATORS: Final = [ + token.EQEQUAL, + token.GREATER, + token.GREATEREQUAL, + token.LESS, + token.LESSEQUAL, + token.NOTEQUAL, + token.PERCENT, + token.PLUS, + token.STAR, + ] + + @abstractmethod + def do_splitter_match(self, line: Line) -> TMatchResult: + """ + BaseStringSplitter asks its clients to override this method instead of + `StringTransformer.do_match(...)`. + + Follows the same protocol as `StringTransformer.do_match(...)`. + + Refer to `help(StringTransformer.do_match)` for more information. + """ + + def do_match(self, line: Line) -> TMatchResult: + match_result = self.do_splitter_match(line) + if isinstance(match_result, Err): + return match_result + + string_indices = match_result.ok() + assert len(string_indices) == 1, ( + f"{self.__class__.__name__} should only find one match at a time, found" + f" {len(string_indices)}" + ) + string_idx = string_indices[0] + vresult = self._validate(line, string_idx) + if isinstance(vresult, Err): + return vresult + + return match_result + + def _validate(self, line: Line, string_idx: int) -> TResult[None]: + """ + Checks that @line meets all of the requirements listed in this classes' + docstring. Refer to `help(BaseStringSplitter)` for a detailed + description of those requirements. + + Returns: + * Ok(None), if ALL of the requirements are met. + OR + * Err(CannotTransform), if ANY of the requirements are NOT met. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + + max_string_length = self._get_max_string_length(line, string_idx) + if len(string_leaf.value) <= max_string_length: + return TErr( + "The string itself is not what is causing this line to be too long." + ) + + if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ + token.STRING, + token.NEWLINE, + ]: + return TErr( + f"This string ({string_leaf.value}) appears to be pointless (i.e. has" + " no parent)." + ) + + if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( + line.comments[id(line.leaves[string_idx])] + ): + return TErr( + "Line appears to end with an inline pragma comment. Splitting the line" + " could modify the pragma's behavior." + ) + + if has_triple_quotes(string_leaf.value): + return TErr("We cannot split multiline strings.") + + return Ok(None) + + def _get_max_string_length(self, line: Line, string_idx: int) -> int: + """ + Calculates the max string length used when attempting to determine + whether or not the target string is responsible for causing the line to + go over the line length limit. + + WARNING: This method is tightly coupled to both StringSplitter and + (especially) StringParenWrapper. There is probably a better way to + accomplish what is being done here. + + Returns: + max_string_length: such that `line.leaves[string_idx].value > + max_string_length` implies that the target string IS responsible + for causing this line to exceed the line length limit. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # We use the shorthand "WMA4" in comments to abbreviate "We must + # account for". When giving examples, we use STRING to mean some/any + # valid string. + # + # Finally, we use the following convenience variables: + # + # P: The leaf that is before the target string leaf. + # N: The leaf that is after the target string leaf. + # NN: The leaf that is after N. + + # WMA4 the whitespace at the beginning of the line. + offset = line.depth * 4 + + if is_valid_index(string_idx - 1): + p_idx = string_idx - 1 + if ( + LL[string_idx - 1].type == token.LPAR + and LL[string_idx - 1].value == "" + and string_idx >= 2 + ): + # If the previous leaf is an empty LPAR placeholder, we should skip it. + p_idx -= 1 + + P = LL[p_idx] + if P.type in self.STRING_OPERATORS: + # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). + offset += len(str(P)) + 1 + + if P.type == token.COMMA: + # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. + offset += 3 + + if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: + # This conditional branch is meant to handle dictionary keys, + # variable assignments, 'return STRING' statement lines, and + # 'else STRING' ternary expression lines. + + # WMA4 a single space. + offset += 1 + + # WMA4 the lengths of any leaves that came before that space, + # but after any closing bracket before that space. + for leaf in reversed(LL[: p_idx + 1]): + offset += len(str(leaf)) + if leaf.type in CLOSING_BRACKETS: + break + + if is_valid_index(string_idx + 1): + N = LL[string_idx + 1] + if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: + # If the next leaf is an empty RPAR placeholder, we should skip it. + N = LL[string_idx + 2] + + if N.type == token.COMMA: + # WMA4 a single comma at the end of the string (e.g `STRING,`). + offset += 1 + + if is_valid_index(string_idx + 2): + NN = LL[string_idx + 2] + + if N.type == token.DOT and NN.type == token.NAME: + # This conditional branch is meant to handle method calls invoked + # off of a string literal up to and including the LPAR character. + + # WMA4 the '.' character. + offset += 1 + + if ( + is_valid_index(string_idx + 3) + and LL[string_idx + 3].type == token.LPAR + ): + # WMA4 the left parenthesis character. + offset += 1 + + # WMA4 the length of the method's name. + offset += len(NN.value) + + has_comments = False + for comment_leaf in line.comments_after(LL[string_idx]): + if not has_comments: + has_comments = True + # WMA4 two spaces before the '#' character. + offset += 2 + + # WMA4 the length of the inline comment. + offset += len(comment_leaf.value) + + max_string_length = self.line_length - offset + return max_string_length + + @staticmethod + def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the "prefer paren wrap" statement + requirements listed in the 'Requirements' section of the StringParenWrapper + class's docstring. + OR + None, otherwise. + """ + # The line must start with a string. + if LL[0].type != token.STRING: + return None + + # If the string is surrounded by commas (or is the first/last child)... + prev_sibling = LL[0].prev_sibling + next_sibling = LL[0].next_sibling + if not prev_sibling and not next_sibling and parent_type(LL[0]) == syms.atom: + # If it's an atom string, we need to check the parent atom's siblings. + parent = LL[0].parent + assert parent is not None # For type checkers. + prev_sibling = parent.prev_sibling + next_sibling = parent.next_sibling + if (not prev_sibling or prev_sibling.type == token.COMMA) and ( + not next_sibling or next_sibling.type == token.COMMA + ): + return 0 + + return None + + +def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]: + """ + Yields spans corresponding to expressions in a given f-string. + Spans are half-open ranges (left inclusive, right exclusive). + Assumes the input string is a valid f-string, but will not crash if the input + string is invalid. + """ + stack: List[int] = [] # our curly paren stack + i = 0 + while i < len(s): + if s[i] == "{": + # if we're in a string part of the f-string, ignore escaped curly braces + if not stack and i + 1 < len(s) and s[i + 1] == "{": + i += 2 + continue + stack.append(i) + i += 1 + continue + + if s[i] == "}": + if not stack: + i += 1 + continue + j = stack.pop() + # we've made it back out of the expression! yield the span + if not stack: + yield (j, i + 1) + i += 1 + continue + + # if we're in an expression part of the f-string, fast forward through strings + # note that backslashes are not legal in the expression portion of f-strings + if stack: + delim = None + if s[i : i + 3] in ("'''", '"""'): + delim = s[i : i + 3] + elif s[i] in ("'", '"'): + delim = s[i] + if delim: + i += len(delim) + while i < len(s) and s[i : i + len(delim)] != delim: + i += 1 + i += len(delim) + continue + i += 1 + + +def fstring_contains_expr(s: str) -> bool: + return any(iter_fexpr_spans(s)) + + +def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str: + """ + Toggles quotes used in f-string expressions that are `old_quote`. + + f-string expressions can't contain backslashes, so we need to toggle the + quotes if the f-string itself will end up using the same quote. We can + simply toggle without escaping because, quotes can't be reused in f-string + expressions. They will fail to parse. + + NOTE: If PEP 701 is accepted, above statement will no longer be true. + Though if quotes can be reused, we can simply reuse them without updates or + escaping, once Black figures out how to parse the new grammar. + """ + new_quote = "'" if old_quote == '"' else '"' + parts = [] + previous_index = 0 + for start, end in iter_fexpr_spans(fstring): + parts.append(fstring[previous_index:start]) + parts.append(fstring[start:end].replace(old_quote, new_quote)) + previous_index = end + parts.append(fstring[previous_index:]) + return "".join(parts) + + +class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that splits "atom" strings (i.e. strings which exist on + lines by themselves). + + Requirements: + * The line consists ONLY of a single string (possibly prefixed by a + string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE + a trailing comma. + AND + * All of the requirements listed in BaseStringSplitter's docstring. + + Transformations: + The string mentioned in the 'Requirements' section is split into as + many substrings as necessary to adhere to the configured line length. + + In the final set of substrings, no substring should be smaller than + MIN_SUBSTR_SIZE characters. + + The string will ONLY be split on spaces (i.e. each new substring should + start with a space). Note that the string will NOT be split on a space + which is escaped with a backslash. + + If the string is an f-string, it will NOT be split in the middle of an + f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x + else bar()} is an f-expression). + + If the string that is being split has an associated set of custom split + records and those custom splits will NOT result in any line going over + the configured line length, those custom splits are used. Otherwise the + string is split as late as possible (from left-to-right) while still + adhering to the transformation rules listed above. + + Collaborations: + StringSplitter relies on StringMerger to construct the appropriate + CustomSplit objects and add them to the custom split map. + """ + + MIN_SUBSTR_SIZE: Final = 6 + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if self._prefer_paren_wrap_match(LL) is not None: + return TErr("Line needs to be wrapped in parens first.") + + is_valid_index = is_valid_index_factory(LL) + + idx = 0 + + # The first two leaves MAY be the 'not in' keywords... + if ( + is_valid_index(idx) + and is_valid_index(idx + 1) + and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] + and str(LL[idx]) + str(LL[idx + 1]) == "not in" + ): + idx += 2 + # Else the first leaf MAY be a string operator symbol or the 'in' keyword... + elif is_valid_index(idx) and ( + LL[idx].type in self.STRING_OPERATORS + or LL[idx].type == token.NAME + and str(LL[idx]) == "in" + ): + idx += 1 + + # The next/first leaf MAY be an empty LPAR... + if is_valid_index(idx) and is_empty_lpar(LL[idx]): + idx += 1 + + # The next/first leaf MUST be a string... + if not is_valid_index(idx) or LL[idx].type != token.STRING: + return TErr("Line does not start with a string.") + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by an empty RPAR... + if is_valid_index(idx) and is_empty_rpar(LL[idx]): + idx += 1 + + # That string / empty RPAR leaf MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if is_valid_index(idx): + return TErr("This line does not end with a string.") + + return Ok([string_idx]) + + def do_transform( + self, line: Line, string_indices: List[int] + ) -> Iterator[TResult[Line]]: + LL = line.leaves + assert len(string_indices) == 1, ( + f"{self.__class__.__name__} should only find one match at a time, found" + f" {len(string_indices)}" + ) + string_idx = string_indices[0] + + QUOTE = LL[string_idx].value[-1] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + prefix = get_string_prefix(LL[string_idx].value).lower() + + # We MAY choose to drop the 'f' prefix from substrings that don't + # contain any f-expressions, but ONLY if the original f-string + # contains at least one f-expression. Otherwise, we will alter the AST + # of the program. + drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( + LL[string_idx].value + ) + + first_string_line = True + + string_op_leaves = self._get_string_operator_leaves(LL) + string_op_leaves_length = ( + sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1 + if string_op_leaves + else 0 + ) + + def maybe_append_string_operators(new_line: Line) -> None: + """ + Side Effects: + If @line starts with a string operator and this is the first + line we are constructing, this function appends the string + operator to @new_line and replaces the old string operator leaf + in the node structure. Otherwise this function does nothing. + """ + maybe_prefix_leaves = string_op_leaves if first_string_line else [] + for i, prefix_leaf in enumerate(maybe_prefix_leaves): + replace_child(LL[i], prefix_leaf) + new_line.append(prefix_leaf) + + ends_with_comma = ( + is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA + ) + + def max_last_string() -> int: + """ + Returns: + The max allowed length of the string value used for the last + line we will construct. + """ + result = self.line_length + result -= line.depth * 4 + result -= 1 if ends_with_comma else 0 + result -= string_op_leaves_length + return result + + # --- Calculate Max Break Index (for string value) + # We start with the line length limit + max_break_idx = self.line_length + # The last index of a string of length N is N-1. + max_break_idx -= 1 + # Leading whitespace is not present in the string value (e.g. Leaf.value). + max_break_idx -= line.depth * 4 + if max_break_idx < 0: + yield TErr( + f"Unable to split {LL[string_idx].value} at such high of a line depth:" + f" {line.depth}" + ) + return + + # Check if StringMerger registered any custom splits. + custom_splits = self.pop_custom_splits(LL[string_idx].value) + # We use them ONLY if none of them would produce lines that exceed the + # line limit. + use_custom_breakpoints = bool( + custom_splits + and all(csplit.break_idx <= max_break_idx for csplit in custom_splits) + ) + + # Temporary storage for the remaining chunk of the string line that + # can't fit onto the line currently being constructed. + rest_value = LL[string_idx].value + + def more_splits_should_be_made() -> bool: + """ + Returns: + True iff `rest_value` (the remaining string value from the last + split), should be split again. + """ + if use_custom_breakpoints: + return len(custom_splits) > 1 + else: + return len(rest_value) > max_last_string() + + string_line_results: List[Ok[Line]] = [] + while more_splits_should_be_made(): + if use_custom_breakpoints: + # Custom User Split (manual) + csplit = custom_splits.pop(0) + break_idx = csplit.break_idx + else: + # Algorithmic Split (automatic) + max_bidx = max_break_idx - string_op_leaves_length + maybe_break_idx = self._get_break_idx(rest_value, max_bidx) + if maybe_break_idx is None: + # If we are unable to algorithmically determine a good split + # and this string has custom splits registered to it, we + # fall back to using them--which means we have to start + # over from the beginning. + if custom_splits: + rest_value = LL[string_idx].value + string_line_results = [] + first_string_line = True + use_custom_breakpoints = True + continue + + # Otherwise, we stop splitting here. + break + + break_idx = maybe_break_idx + + # --- Construct `next_value` + next_value = rest_value[:break_idx] + QUOTE + + # HACK: The following 'if' statement is a hack to fix the custom + # breakpoint index in the case of either: (a) substrings that were + # f-strings but will have the 'f' prefix removed OR (b) substrings + # that were not f-strings but will now become f-strings because of + # redundant use of the 'f' prefix (i.e. none of the substrings + # contain f-expressions but one or more of them had the 'f' prefix + # anyway; in which case, we will prepend 'f' to _all_ substrings). + # + # There is probably a better way to accomplish what is being done + # here... + # + # If this substring is an f-string, we _could_ remove the 'f' + # prefix, and the current custom split did NOT originally use a + # prefix... + if ( + use_custom_breakpoints + and not csplit.has_prefix + and ( + # `next_value == prefix + QUOTE` happens when the custom + # split is an empty string. + next_value == prefix + QUOTE + or next_value != self._normalize_f_string(next_value, prefix) + ) + ): + # Then `csplit.break_idx` will be off by one after removing + # the 'f' prefix. + break_idx += 1 + next_value = rest_value[:break_idx] + QUOTE + + if drop_pointless_f_prefix: + next_value = self._normalize_f_string(next_value, prefix) + + # --- Construct `next_leaf` + next_leaf = Leaf(token.STRING, next_value) + insert_str_child(next_leaf) + self._maybe_normalize_string_quotes(next_leaf) + + # --- Construct `next_line` + next_line = line.clone() + maybe_append_string_operators(next_line) + next_line.append(next_leaf) + string_line_results.append(Ok(next_line)) + + rest_value = prefix + QUOTE + rest_value[break_idx:] + first_string_line = False + + yield from string_line_results + + if drop_pointless_f_prefix: + rest_value = self._normalize_f_string(rest_value, prefix) + + rest_leaf = Leaf(token.STRING, rest_value) + insert_str_child(rest_leaf) + + # NOTE: I could not find a test case that verifies that the following + # line is actually necessary, but it seems to be. Otherwise we risk + # not normalizing the last substring, right? + self._maybe_normalize_string_quotes(rest_leaf) + + last_line = line.clone() + maybe_append_string_operators(last_line) + + # If there are any leaves to the right of the target string... + if is_valid_index(string_idx + 1): + # We use `temp_value` here to determine how long the last line + # would be if we were to append all the leaves to the right of the + # target string to the last string line. + temp_value = rest_value + for leaf in LL[string_idx + 1 :]: + temp_value += str(leaf) + if leaf.type == token.LPAR: + break + + # Try to fit them all on the same line with the last substring... + if ( + len(temp_value) <= max_last_string() + or LL[string_idx + 1].type == token.COMMA + ): + last_line.append(rest_leaf) + append_leaves(last_line, line, LL[string_idx + 1 :]) + yield Ok(last_line) + # Otherwise, place the last substring on one line and everything + # else on a line below that... + else: + last_line.append(rest_leaf) + yield Ok(last_line) + + non_string_line = line.clone() + append_leaves(non_string_line, line, LL[string_idx + 1 :]) + yield Ok(non_string_line) + # Else the target string was the last leaf... + else: + last_line.append(rest_leaf) + last_line.comments = line.comments.copy() + yield Ok(last_line) + + def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an \\N{...} expression (which is NOT + allowed). + """ + # True - the previous backslash was unescaped + # False - the previous backslash was escaped *or* there was no backslash + previous_was_unescaped_backslash = False + it = iter(enumerate(string)) + for idx, c in it: + if c == "\\": + previous_was_unescaped_backslash = not previous_was_unescaped_backslash + continue + if not previous_was_unescaped_backslash or c != "N": + previous_was_unescaped_backslash = False + continue + previous_was_unescaped_backslash = False + + begin = idx - 1 # the position of backslash before \N{...} + for idx, c in it: + if c == "}": + end = idx + break + else: + # malformed nameescape expression? + # should have been detected by AST parsing earlier... + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + yield begin, end + + def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an f-expression (which is NOT + allowed). + """ + if "f" not in get_string_prefix(string).lower(): + return + yield from iter_fexpr_spans(string) + + def _get_illegal_split_indices(self, string: str) -> Set[Index]: + illegal_indices: Set[Index] = set() + iterators = [ + self._iter_fexpr_slices(string), + self._iter_nameescape_slices(string), + ] + for it in iterators: + for begin, end in it: + illegal_indices.update(range(begin, end + 1)) + return illegal_indices + + def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: + """ + This method contains the algorithm that StringSplitter uses to + determine which character to split each string at. + + Args: + @string: The substring that we are attempting to split. + @max_break_idx: The ideal break index. We will return this value if it + meets all the necessary conditions. In the likely event that it + doesn't we will try to find the closest index BELOW @max_break_idx + that does. If that fails, we will expand our search by also + considering all valid indices ABOVE @max_break_idx. + + Pre-Conditions: + * assert_is_leaf_string(@string) + * 0 <= @max_break_idx < len(@string) + + Returns: + break_idx, if an index is able to be found that meets all of the + conditions listed in the 'Transformations' section of this classes' + docstring. + OR + None, otherwise. + """ + is_valid_index = is_valid_index_factory(string) + + assert is_valid_index(max_break_idx) + assert_is_leaf_string(string) + + _illegal_split_indices = self._get_illegal_split_indices(string) + + def breaks_unsplittable_expression(i: Index) -> bool: + """ + Returns: + True iff returning @i would result in the splitting of an + unsplittable expression (which is NOT allowed). + """ + return i in _illegal_split_indices + + def passes_all_checks(i: Index) -> bool: + """ + Returns: + True iff ALL of the conditions listed in the 'Transformations' + section of this classes' docstring would be be met by returning @i. + """ + is_space = string[i] == " " + + is_not_escaped = True + j = i - 1 + while is_valid_index(j) and string[j] == "\\": + is_not_escaped = not is_not_escaped + j -= 1 + + is_big_enough = ( + len(string[i:]) >= self.MIN_SUBSTR_SIZE + and len(string[:i]) >= self.MIN_SUBSTR_SIZE + ) + return ( + is_space + and is_not_escaped + and is_big_enough + and not breaks_unsplittable_expression(i) + ) + + # First, we check all indices BELOW @max_break_idx. + break_idx = max_break_idx + while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): + break_idx -= 1 + + if not passes_all_checks(break_idx): + # If that fails, we check all indices ABOVE @max_break_idx. + # + # If we are able to find a valid index here, the next line is going + # to be longer than the specified line length, but it's probably + # better than doing nothing at all. + break_idx = max_break_idx + 1 + while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): + break_idx += 1 + + if not is_valid_index(break_idx) or not passes_all_checks(break_idx): + return None + + return break_idx + + def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: + if self.normalize_strings: + leaf.value = normalize_string_quotes(leaf.value) + + def _normalize_f_string(self, string: str, prefix: str) -> str: + """ + Pre-Conditions: + * assert_is_leaf_string(@string) + + Returns: + * If @string is an f-string that contains no f-expressions, we + return a string identical to @string except that the 'f' prefix + has been stripped and all double braces (i.e. '{{' or '}}') have + been normalized (i.e. turned into '{' or '}'). + OR + * Otherwise, we return @string. + """ + assert_is_leaf_string(string) + + if "f" in prefix and not fstring_contains_expr(string): + new_prefix = prefix.replace("f", "") + + temp = string[len(prefix) :] + temp = re.sub(r"\{\{", "{", temp) + temp = re.sub(r"\}\}", "}", temp) + new_string = temp + + return f"{new_prefix}{new_string}" + else: + return string + + def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]: + LL = list(leaves) + + string_op_leaves = [] + i = 0 + while LL[i].type in self.STRING_OPERATORS + [token.NAME]: + prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) + string_op_leaves.append(prefix_leaf) + i += 1 + return string_op_leaves + + +class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that wraps strings in parens and then splits at the LPAR. + + Requirements: + All of the requirements listed in BaseStringSplitter's docstring in + addition to the requirements listed below: + + * The line is a return/yield statement, which returns/yields a string. + OR + * The line is part of a ternary expression (e.g. `x = y if cond else + z`) such that the line starts with `else `, where is + some string. + OR + * The line is an assert statement, which ends with a string. + OR + * The line is an assignment statement (e.g. `x = ` or `x += + `) such that the variable is being assigned the value of some + string. + OR + * The line is a dictionary key assignment where some valid key is being + assigned the value of some string. + OR + * The line is an lambda expression and the value is a string. + OR + * The line starts with an "atom" string that prefers to be wrapped in + parens. It's preferred to be wrapped when the string is surrounded by + commas (or is the first/last child). + + Transformations: + The chosen string is wrapped in parentheses and then split at the LPAR. + + We then have one line which ends with an LPAR and another line that + starts with the chosen string. The latter line is then split again at + the RPAR. This results in the RPAR (and possibly a trailing comma) + being placed on its own line. + + NOTE: If any leaves exist to the right of the chosen string (except + for a trailing comma, which would be placed after the RPAR), those + leaves are placed inside the parentheses. In effect, the chosen + string is not necessarily being "wrapped" by parentheses. We can, + however, count on the LPAR being placed directly before the chosen + string. + + In other words, StringParenWrapper creates "atom" strings. These + can then be split again by StringSplitter, if necessary. + + Collaborations: + In the event that a string line split by StringParenWrapper is + changed such that it no longer needs to be given its own line, + StringParenWrapper relies on StringParenStripper to clean up the + parentheses it created. + + For "atom" strings that prefers to be wrapped in parens, it requires + StringSplitter to hold the split until the string is wrapped in parens. + """ + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if line.leaves[-1].type in OPENING_BRACKETS: + return TErr( + "Cannot wrap parens around a line that ends in an opening bracket." + ) + + string_idx = ( + self._return_match(LL) + or self._else_match(LL) + or self._assert_match(LL) + or self._assign_match(LL) + or self._dict_or_lambda_match(LL) + or self._prefer_paren_wrap_match(LL) + ) + + if string_idx is not None: + string_value = line.leaves[string_idx].value + # If the string has no spaces... + if " " not in string_value: + # And will still violate the line length limit when split... + max_string_length = self.line_length - ((line.depth + 1) * 4) + if len(string_value) > max_string_length: + # And has no associated custom splits... + if not self.has_custom_splits(string_value): + # Then we should NOT put this string on its own line. + return TErr( + "We do not wrap long strings in parentheses when the" + " resultant line would still be over the specified line" + " length and can't be split further by StringSplitter." + ) + return Ok([string_idx]) + + return TErr("This line does not contain any non-atomic strings.") + + @staticmethod + def _return_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the return/yield statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a return/yield statement and the first leaf + # contains either the "return" or "yield" keywords... + if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ + 0 + ].value in ["return", "yield"]: + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _else_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the ternary expression + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a ternary expression and the first leaf + # contains the "else" keyword... + if ( + parent_type(LL[0]) == syms.test + and LL[0].type == token.NAME + and LL[0].value == "else" + ): + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _assert_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assert statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an assert statement and the first leaf + # contains the "assert" keyword... + if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a comma... + if leaf.type == token.COMMA: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That comma MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _assign_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assignment statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an expression statement or is a function + # argument AND the first leaf contains a variable name... + if ( + parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] + and LL[0].type == token.NAME + ): + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find either an '=' or '+=' symbol... + if leaf.type in [token.EQUAL, token.PLUSEQUAL]: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That symbol MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # The next leaf MAY be a comma iff this line is apart + # of a function argument... + if ( + parent_type(LL[0]) == syms.argument + and is_valid_index(idx) + and LL[idx].type == token.COMMA + ): + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the dictionary key assignment + statement or lambda expression requirements listed in the + 'Requirements' section of this classes' docstring. + OR + None, otherwise. + """ + # If this line is a part of a dictionary key assignment or lambda expression... + parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)] + if syms.dictsetmaker in parent_types or syms.lambdef in parent_types: + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a colon, it can either be dict's or lambda's colon... + if leaf.type == token.COLON and i < len(LL) - 1: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That colon MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + def do_transform( + self, line: Line, string_indices: List[int] + ) -> Iterator[TResult[Line]]: + LL = line.leaves + assert len(string_indices) == 1, ( + f"{self.__class__.__name__} should only find one match at a time, found" + f" {len(string_indices)}" + ) + string_idx = string_indices[0] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + comma_idx = -1 + ends_with_comma = False + if LL[comma_idx].type == token.COMMA: + ends_with_comma = True + + leaves_to_steal_comments_from = [LL[string_idx]] + if ends_with_comma: + leaves_to_steal_comments_from.append(LL[comma_idx]) + + # --- First Line + first_line = line.clone() + left_leaves = LL[:string_idx] + + # We have to remember to account for (possibly invisible) LPAR and RPAR + # leaves that already wrapped the target string. If these leaves do + # exist, we will replace them with our own LPAR and RPAR leaves. + old_parens_exist = False + if left_leaves and left_leaves[-1].type == token.LPAR: + old_parens_exist = True + leaves_to_steal_comments_from.append(left_leaves[-1]) + left_leaves.pop() + + append_leaves(first_line, line, left_leaves) + + lpar_leaf = Leaf(token.LPAR, "(") + if old_parens_exist: + replace_child(LL[string_idx - 1], lpar_leaf) + else: + insert_str_child(lpar_leaf) + first_line.append(lpar_leaf) + + # We throw inline comments that were originally to the right of the + # target string to the top line. They will now be shown to the right of + # the LPAR. + for leaf in leaves_to_steal_comments_from: + for comment_leaf in line.comments_after(leaf): + first_line.append(comment_leaf, preformatted=True) + + yield Ok(first_line) + + # --- Middle (String) Line + # We only need to yield one (possibly too long) string line, since the + # `StringSplitter` will break it down further if necessary. + string_value = LL[string_idx].value + string_line = Line( + mode=line.mode, + depth=line.depth + 1, + inside_brackets=True, + should_split_rhs=line.should_split_rhs, + magic_trailing_comma=line.magic_trailing_comma, + ) + string_leaf = Leaf(token.STRING, string_value) + insert_str_child(string_leaf) + string_line.append(string_leaf) + + old_rpar_leaf = None + if is_valid_index(string_idx + 1): + right_leaves = LL[string_idx + 1 :] + if ends_with_comma: + right_leaves.pop() + + if old_parens_exist: + assert right_leaves and right_leaves[-1].type == token.RPAR, ( + "Apparently, old parentheses do NOT exist?!" + f" (left_leaves={left_leaves}, right_leaves={right_leaves})" + ) + old_rpar_leaf = right_leaves.pop() + elif right_leaves and right_leaves[-1].type == token.RPAR: + # Special case for lambda expressions as dict's value, e.g.: + # my_dict = { + # "key": lambda x: f"formatted: {x}, + # } + # After wrapping the dict's value with parentheses, the string is + # followed by a RPAR but its opening bracket is lambda's, not + # the string's: + # "key": (lambda x: f"formatted: {x}), + opening_bracket = right_leaves[-1].opening_bracket + if opening_bracket is not None and opening_bracket in left_leaves: + index = left_leaves.index(opening_bracket) + if ( + index > 0 + and index < len(left_leaves) - 1 + and left_leaves[index - 1].type == token.COLON + and left_leaves[index + 1].value == "lambda" + ): + right_leaves.pop() + + append_leaves(string_line, line, right_leaves) + + yield Ok(string_line) + + # --- Last Line + last_line = line.clone() + last_line.bracket_tracker = first_line.bracket_tracker + + new_rpar_leaf = Leaf(token.RPAR, ")") + if old_rpar_leaf is not None: + replace_child(old_rpar_leaf, new_rpar_leaf) + else: + insert_str_child(new_rpar_leaf) + last_line.append(new_rpar_leaf) + + # If the target string ended with a comma, we place this comma to the + # right of the RPAR on the last line. + if ends_with_comma: + comma_leaf = Leaf(token.COMMA, ",") + replace_child(LL[comma_idx], comma_leaf) + last_line.append(comma_leaf) + + yield Ok(last_line) + + +class StringParser: + """ + A state machine that aids in parsing a string's "trailer", which can be + either non-existent, an old-style formatting sequence (e.g. `% varX` or `% + (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, + varY)`). + + NOTE: A new StringParser object MUST be instantiated for each string + trailer we need to parse. + + Examples: + We shall assume that `line` equals the `Line` object that corresponds + to the following line of python code: + ``` + x = "Some {}.".format("String") + some_other_string + ``` + + Furthermore, we will assume that `string_idx` is some index such that: + ``` + assert line.leaves[string_idx].value == "Some {}." + ``` + + The following code snippet then holds: + ``` + string_parser = StringParser() + idx = string_parser.parse(line.leaves, string_idx) + assert line.leaves[idx].type == token.PLUS + ``` + """ + + DEFAULT_TOKEN: Final = 20210605 + + # String Parser States + START: Final = 1 + DOT: Final = 2 + NAME: Final = 3 + PERCENT: Final = 4 + SINGLE_FMT_ARG: Final = 5 + LPAR: Final = 6 + RPAR: Final = 7 + DONE: Final = 8 + + # Lookup Table for Next State + _goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = { + # A string trailer may start with '.' OR '%'. + (START, token.DOT): DOT, + (START, token.PERCENT): PERCENT, + (START, DEFAULT_TOKEN): DONE, + # A '.' MUST be followed by an attribute or method name. + (DOT, token.NAME): NAME, + # A method name MUST be followed by an '(', whereas an attribute name + # is the last symbol in the string trailer. + (NAME, token.LPAR): LPAR, + (NAME, DEFAULT_TOKEN): DONE, + # A '%' symbol can be followed by an '(' or a single argument (e.g. a + # string or variable name). + (PERCENT, token.LPAR): LPAR, + (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, + # If a '%' symbol is followed by a single argument, that argument is + # the last leaf in the string trailer. + (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, + # If present, a ')' symbol is the last symbol in a string trailer. + # (NOTE: LPARS and nested RPARS are not included in this lookup table, + # since they are treated as a special case by the parsing logic in this + # classes' implementation.) + (RPAR, DEFAULT_TOKEN): DONE, + } + + def __init__(self) -> None: + self._state = self.START + self._unmatched_lpars = 0 + + def parse(self, leaves: List[Leaf], string_idx: int) -> int: + """ + Pre-conditions: + * @leaves[@string_idx].type == token.STRING + + Returns: + The index directly after the last leaf which is apart of the string + trailer, if a "trailer" exists. + OR + @string_idx + 1, if no string "trailer" exists. + """ + assert leaves[string_idx].type == token.STRING + + idx = string_idx + 1 + while idx < len(leaves) and self._next_state(leaves[idx]): + idx += 1 + return idx + + def _next_state(self, leaf: Leaf) -> bool: + """ + Pre-conditions: + * On the first call to this function, @leaf MUST be the leaf that + was directly after the string leaf in question (e.g. if our target + string is `line.leaves[i]` then the first call to this method must + be `line.leaves[i + 1]`). + * On the next call to this function, the leaf parameter passed in + MUST be the leaf directly following @leaf. + + Returns: + True iff @leaf is apart of the string's trailer. + """ + # We ignore empty LPAR or RPAR leaves. + if is_empty_par(leaf): + return True + + next_token = leaf.type + if next_token == token.LPAR: + self._unmatched_lpars += 1 + + current_state = self._state + + # The LPAR parser state is a special case. We will return True until we + # find the matching RPAR token. + if current_state == self.LPAR: + if next_token == token.RPAR: + self._unmatched_lpars -= 1 + if self._unmatched_lpars == 0: + self._state = self.RPAR + # Otherwise, we use a lookup table to determine the next state. + else: + # If the lookup table matches the current state to the next + # token, we use the lookup table. + if (current_state, next_token) in self._goto: + self._state = self._goto[current_state, next_token] + else: + # Otherwise, we check if a the current state was assigned a + # default. + if (current_state, self.DEFAULT_TOKEN) in self._goto: + self._state = self._goto[current_state, self.DEFAULT_TOKEN] + # If no default has been assigned, then this parser has a logic + # error. + else: + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + + if self._state == self.DONE: + return False + + return True + + +def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: + """ + Factory for a convenience function that is used to orphan @string_leaf + and then insert multiple new leaves into the same part of the node + structure that @string_leaf had originally occupied. + + Examples: + Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = + string_leaf.parent`. Assume the node `N` has the following + original structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(STRING, '"foo"'), + ] + ) + + We then run the code snippet shown below. + ``` + insert_str_child = insert_str_child_factory(string_leaf) + + lpar = Leaf(token.LPAR, '(') + insert_str_child(lpar) + + bar = Leaf(token.STRING, '"bar"') + insert_str_child(bar) + + rpar = Leaf(token.RPAR, ')') + insert_str_child(rpar) + ``` + + After which point, it follows that `string_leaf.parent is None` and + the node `N` now has the following structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(LPAR, '('), + Leaf(STRING, '"bar"'), + Leaf(RPAR, ')'), + ] + ) + """ + string_parent = string_leaf.parent + string_child_idx = string_leaf.remove() + + def insert_str_child(child: LN) -> None: + nonlocal string_child_idx + + assert string_parent is not None + assert string_child_idx is not None + + string_parent.insert_child(string_child_idx, child) + string_child_idx += 1 + + return insert_str_child + + +def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: + """ + Examples: + ``` + my_list = [1, 2, 3] + + is_valid_index = is_valid_index_factory(my_list) + + assert is_valid_index(0) + assert is_valid_index(2) + + assert not is_valid_index(3) + assert not is_valid_index(-1) + ``` + """ + + def is_valid_index(idx: int) -> bool: + """ + Returns: + True iff @idx is positive AND seq[@idx] does NOT raise an + IndexError. + """ + return 0 <= idx < len(seq) + + return is_valid_index diff --git a/.venv/lib/python3.11/site-packages/blackd/__init__.py b/.venv/lib/python3.11/site-packages/blackd/__init__.py new file mode 100644 index 0000000..ba4750b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/blackd/__init__.py @@ -0,0 +1,227 @@ +import asyncio +import logging +from concurrent.futures import Executor, ProcessPoolExecutor +from datetime import datetime +from functools import partial +from multiprocessing import freeze_support +from typing import Set, Tuple + +try: + from aiohttp import web + + from .middlewares import cors +except ImportError as ie: + raise ImportError( + f"aiohttp dependency is not installed: {ie}. " + + "Please re-install black with the '[d]' extra install " + + "to obtain aiohttp_cors: `pip install black[d]`" + ) from None + +import click + +import black +from _black_version import version as __version__ +from black.concurrency import maybe_install_uvloop + +# This is used internally by tests to shut down the server prematurely +_stop_signal = asyncio.Event() + +# Request headers +PROTOCOL_VERSION_HEADER = "X-Protocol-Version" +LINE_LENGTH_HEADER = "X-Line-Length" +PYTHON_VARIANT_HEADER = "X-Python-Variant" +SKIP_SOURCE_FIRST_LINE = "X-Skip-Source-First-Line" +SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization" +SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma" +PREVIEW = "X-Preview" +FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe" +DIFF_HEADER = "X-Diff" + +BLACK_HEADERS = [ + PROTOCOL_VERSION_HEADER, + LINE_LENGTH_HEADER, + PYTHON_VARIANT_HEADER, + SKIP_SOURCE_FIRST_LINE, + SKIP_STRING_NORMALIZATION_HEADER, + SKIP_MAGIC_TRAILING_COMMA, + PREVIEW, + FAST_OR_SAFE_HEADER, + DIFF_HEADER, +] + +# Response headers +BLACK_VERSION_HEADER = "X-Black-Version" + + +class InvalidVariantHeader(Exception): + pass + + +@click.command(context_settings={"help_option_names": ["-h", "--help"]}) +@click.option( + "--bind-host", type=str, help="Address to bind the server to.", default="localhost" +) +@click.option("--bind-port", type=int, help="Port to listen on", default=45484) +@click.version_option(version=black.__version__) +def main(bind_host: str, bind_port: int) -> None: + logging.basicConfig(level=logging.INFO) + app = make_app() + ver = black.__version__ + black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}") + web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None) + + +def make_app() -> web.Application: + app = web.Application( + middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))] + ) + executor = ProcessPoolExecutor() + app.add_routes([web.post("/", partial(handle, executor=executor))]) + return app + + +async def handle(request: web.Request, executor: Executor) -> web.Response: + headers = {BLACK_VERSION_HEADER: __version__} + try: + if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1": + return web.Response( + status=501, text="This server only supports protocol version 1" + ) + try: + line_length = int( + request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH) + ) + except ValueError: + return web.Response(status=400, text="Invalid line length header value") + + if PYTHON_VARIANT_HEADER in request.headers: + value = request.headers[PYTHON_VARIANT_HEADER] + try: + pyi, versions = parse_python_variant_header(value) + except InvalidVariantHeader as e: + return web.Response( + status=400, + text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}", + ) + else: + pyi = False + versions = set() + + skip_string_normalization = bool( + request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False) + ) + skip_magic_trailing_comma = bool( + request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False) + ) + skip_source_first_line = bool( + request.headers.get(SKIP_SOURCE_FIRST_LINE, False) + ) + preview = bool(request.headers.get(PREVIEW, False)) + fast = False + if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast": + fast = True + mode = black.FileMode( + target_versions=versions, + is_pyi=pyi, + line_length=line_length, + skip_source_first_line=skip_source_first_line, + string_normalization=not skip_string_normalization, + magic_trailing_comma=not skip_magic_trailing_comma, + preview=preview, + ) + req_bytes = await request.content.read() + charset = request.charset if request.charset is not None else "utf8" + req_str = req_bytes.decode(charset) + then = datetime.utcnow() + + header = "" + if skip_source_first_line: + first_newline_position: int = req_str.find("\n") + 1 + header = req_str[:first_newline_position] + req_str = req_str[first_newline_position:] + + loop = asyncio.get_event_loop() + formatted_str = await loop.run_in_executor( + executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode) + ) + + # Preserve CRLF line endings + if req_str[req_str.find("\n") - 1] == "\r": + formatted_str = formatted_str.replace("\n", "\r\n") + # If, after swapping line endings, nothing changed, then say so + if formatted_str == req_str: + raise black.NothingChanged + + # Put the source first line back + req_str = header + req_str + formatted_str = header + formatted_str + + # Only output the diff in the HTTP response + only_diff = bool(request.headers.get(DIFF_HEADER, False)) + if only_diff: + now = datetime.utcnow() + src_name = f"In\t{then} +0000" + dst_name = f"Out\t{now} +0000" + loop = asyncio.get_event_loop() + formatted_str = await loop.run_in_executor( + executor, + partial(black.diff, req_str, formatted_str, src_name, dst_name), + ) + + return web.Response( + content_type=request.content_type, + charset=charset, + headers=headers, + text=formatted_str, + ) + except black.NothingChanged: + return web.Response(status=204, headers=headers) + except black.InvalidInput as e: + return web.Response(status=400, headers=headers, text=str(e)) + except Exception as e: + logging.exception("Exception during handling a request") + return web.Response(status=500, headers=headers, text=str(e)) + + +def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]: + if value == "pyi": + return True, set() + else: + versions = set() + for version in value.split(","): + if version.startswith("py"): + version = version[len("py") :] + if "." in version: + major_str, *rest = version.split(".") + else: + major_str = version[0] + rest = [version[1:]] if len(version) > 1 else [] + try: + major = int(major_str) + if major not in (2, 3): + raise InvalidVariantHeader("major version must be 2 or 3") + if len(rest) > 0: + minor = int(rest[0]) + if major == 2: + raise InvalidVariantHeader("Python 2 is not supported") + else: + # Default to lowest supported minor version. + minor = 7 if major == 2 else 3 + version_str = f"PY{major}{minor}" + if major == 3 and not hasattr(black.TargetVersion, version_str): + raise InvalidVariantHeader(f"3.{minor} is not supported") + versions.add(black.TargetVersion[version_str]) + except (KeyError, ValueError): + raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None + return False, versions + + +def patched_main() -> None: + maybe_install_uvloop() + freeze_support() + black.patch_click() + main() + + +if __name__ == "__main__": + patched_main() diff --git a/.venv/lib/python3.11/site-packages/blackd/__main__.py b/.venv/lib/python3.11/site-packages/blackd/__main__.py new file mode 100644 index 0000000..b5a4b13 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/blackd/__main__.py @@ -0,0 +1,3 @@ +import blackd + +blackd.patched_main() diff --git a/.venv/lib/python3.11/site-packages/blackd/middlewares.py b/.venv/lib/python3.11/site-packages/blackd/middlewares.py new file mode 100644 index 0000000..370e0ae --- /dev/null +++ b/.venv/lib/python3.11/site-packages/blackd/middlewares.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar + +from aiohttp.web_request import Request +from aiohttp.web_response import StreamResponse + +if TYPE_CHECKING: + F = TypeVar("F", bound=Callable[..., Any]) + middleware: Callable[[F], F] +else: + try: + from aiohttp.web_middlewares import middleware + except ImportError: + # @middleware is deprecated and its behaviour is the default since aiohttp 4.0 + # so if it doesn't exist anymore, define a no-op for forward compatibility. + middleware = lambda x: x # noqa: E731 + +Handler = Callable[[Request], Awaitable[StreamResponse]] +Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] + + +def cors(allow_headers: Iterable[str]) -> Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + is_options = request.method == "OPTIONS" + is_preflight = is_options and "Access-Control-Request-Method" in request.headers + if is_preflight: + resp = StreamResponse() + else: + resp = await handler(request) + + origin = request.headers.get("Origin") + if not origin: + return resp + + resp.headers["Access-Control-Allow-Origin"] = "*" + resp.headers["Access-Control-Expose-Headers"] = "*" + if is_options: + resp.headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers) + resp.headers["Access-Control-Allow-Methods"] = ", ".join( + ("OPTIONS", "POST") + ) + + return resp + + return impl diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/LICENSE new file mode 100644 index 0000000..467c38e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2014-2017, Mozilla Foundation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/METADATA b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/METADATA new file mode 100644 index 0000000..cf72d93 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/METADATA @@ -0,0 +1,1222 @@ +Metadata-Version: 2.1 +Name: bleach +Version: 6.0.0 +Summary: An easy safelist-based HTML-sanitizing tool. +Home-page: https://github.com/mozilla/bleach +Maintainer: Will Kahn-Greene +Maintainer-email: willkg@mozilla.com +License: Apache Software License +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: six (>=1.9.0) +Requires-Dist: webencodings +Provides-Extra: css +Requires-Dist: tinycss2 (<1.2,>=1.1.0) ; extra == 'css' + +====== +Bleach +====== + +.. image:: https://github.com/mozilla/bleach/workflows/Test/badge.svg + :target: https://github.com/mozilla/bleach/actions?query=workflow%3ATest + +.. image:: https://github.com/mozilla/bleach/workflows/Lint/badge.svg + :target: https://github.com/mozilla/bleach/actions?query=workflow%3ALint + +.. image:: https://badge.fury.io/py/bleach.svg + :target: http://badge.fury.io/py/bleach + +Bleach is an allowed-list-based HTML sanitizing library that escapes or strips +markup and attributes. + +Bleach can also linkify text safely, applying filters that Django's ``urlize`` +filter cannot, and optionally setting ``rel`` attributes, even on links already +in the text. + +Bleach is intended for sanitizing text from *untrusted* sources. If you find +yourself jumping through hoops to allow your site administrators to do lots of +things, you're probably outside the use cases. Either trust those users, or +don't. + +Because it relies on html5lib_, Bleach is as good as modern browsers at dealing +with weird, quirky HTML fragments. And *any* of Bleach's methods will fix +unbalanced or mis-nested tags. + +The version on GitHub_ is the most up-to-date and contains the latest bug +fixes. You can find full documentation on `ReadTheDocs`_. + +:Code: https://github.com/mozilla/bleach +:Documentation: https://bleach.readthedocs.io/ +:Issue tracker: https://github.com/mozilla/bleach/issues +:License: Apache License v2; see LICENSE file + + +Reporting Bugs +============== + +For regular bugs, please report them `in our issue tracker +`_. + +If you believe that you've found a security vulnerability, please `file a secure +bug report in our bug tracker +`_ +or send an email to *security AT mozilla DOT org*. + +For more information on security-related bug disclosure and the PGP key to use +for sending encrypted mail or to verify responses received from that address, +please read our wiki page at +``_. + + +Security +======== + +Bleach is a security-focused library. + +We have a responsible security vulnerability reporting process. Please use +that if you're reporting a security issue. + +Security issues are fixed in private. After we land such a fix, we'll do a +release. + +For every release, we mark security issues we've fixed in the ``CHANGES`` in +the **Security issues** section. We include any relevant CVE links. + + +Installing Bleach +================= + +Bleach is available on PyPI_, so you can install it with ``pip``:: + + $ pip install bleach + + +Upgrading Bleach +================ + +.. warning:: + + Before doing any upgrades, read through `Bleach Changes + `_ for backwards + incompatible changes, newer versions, etc. + + Bleach follows `semver 2`_ versioning. Vendored libraries will not + be changed in patch releases. + + +Basic use +========= + +The simplest way to use Bleach is: + +.. code-block:: python + + >>> import bleach + + >>> bleach.clean('an example') + u'an <script>evil()</script> example' + + >>> bleach.linkify('an http://example.com url') + u'an http://example.com url' + + +Code of Conduct +=============== + +This project and repository is governed by Mozilla's code of conduct and +etiquette guidelines. For more details please see the `CODE_OF_CONDUCT.md +`_ + + +.. _html5lib: https://github.com/html5lib/html5lib-python +.. _GitHub: https://github.com/mozilla/bleach +.. _ReadTheDocs: https://bleach.readthedocs.io/ +.. _PyPI: https://pypi.org/project/bleach/ +.. _semver 2: https://semver.org/ + + +Bleach changes +============== + +Version 6.0.0 (January 23rd, 2023) +---------------------------------- + +**Backwards incompatible changes** + +* ``bleach.clean``, ``bleach.sanitizer.Cleaner``, + ``bleach.html5lib_shim.BleachHTMLParser``: the ``tags`` and ``protocols`` + arguments were changed from lists to sets. + + Old pre-6.0.0: + + .. code-block:: python + + bleach.clean( + "some text", + tags=["a", "p", "img"], + # ^ ^ list + protocols=["http", "https"], + # ^ ^ list + ) + + + New 6.0.0 and later: + + .. code-block:: python + + bleach.clean( + "some text", + tags={"a", "p", "img"}, + # ^ ^ set + protocols={"http", "https"}, + # ^ ^ set + ) + +* ``bleach.linkify``, ``bleach.linkifier.Linker``: the ``skip_tags`` and + ``recognized_tags`` arguments were changed from lists to sets. + + Old pre-6.0.0: + + .. code-block:: python + + bleach.linkify( + "some text", + skip_tags=["pre"], + # ^ ^ list + ) + + linker = Linker( + skip_tags=["pre"], + # ^ ^ list + recognized_tags=html5lib_shim.HTML_TAGS + ["custom-element"], + # ^ ^ ^ list + # | + # | list concatenation + ) + + New 6.0.0 and later: + + .. code-block:: python + + bleach.linkify( + "some text", + skip_tags={"pre"}, + # ^ ^ set + ) + + linker = Linker( + skip_tags={"pre"}, + # ^ ^ set + recognized_tags=html5lib_shim.HTML_TAGS | {"custom-element"}, + # ^ ^ ^ set + # | + # | union operator + ) + +* ``bleach.sanitizer.BleachSanitizerFilter``: ``strip_allowed_elements`` is now + ``strip_allowed_tags``. We now use "tags" everywhere rather than a mishmash + of "tags" in some places and "elements" in others. + + +**Security fixes** + +None + + +**Bug fixes** + +* Add support for Python 3.11. (#675) + +* Fix API weirness in ``BleachSanitizerFilter``. (#649) + + We're using "tags" instead of "elements" everywhere--no more weird + overloading of "elements" anymore. + + Also, it no longer calls the superclass constructor. + +* Add warning when ``css_sanitizer`` isn't set, but the ``style`` + attribute is allowed. (#676) + +* Fix linkify handling of character entities. (#501) + +* Rework dev dependencies to use ``requirements-dev.txt`` and + ``requirements-flake8.txt`` instead of extras. + +* Fix project infrastructure to be tox-based so it's easier to have CI + run the same things we're running in development and with flake8 + in an isolated environment. + +* Update action versions in CI. + +* Switch to f-strings where possible. Make tests parametrized to be + easier to read/maintain. + + +Version 5.0.1 (June 27th, 2022) +------------------------------- + +**Security fixes** + +None + + +**Bug fixes** + +* Add missing comma to tinycss2 require. Thank you, @shadchin! + +* Add url parse tests based on wpt url tests. (#688) + +* Support scheme-less urls if "https" is in allow list. (#662) + +* Handle escaping ``<`` in edge cases where it doesn't start a tag. (#544) + +* Fix reference warnings in docs. (#660) + +* Correctly urlencode email address parts. Thank you, @larseggert! (#659) + + +Version 5.0.0 (April 7th, 2022) +------------------------------- + +**Backwards incompatible changes** + +* ``clean`` and ``linkify`` now preserve the order of HTML attributes. Thank + you, @askoretskly! (#566) + +* Drop support for Python 3.6. Thank you, @hugovk! (#629) + +* CSS sanitization in style tags is completely different now. If you're using + Bleach ``clean`` to sanitize css in style tags, you'll need to update your + code and you'll need to install the ``css`` extras:: + + pip install 'bleach[css]' + + See `the documentation on sanitizing CSS for how to do it + `_. (#633) + +**Security fixes** + +None + +**Bug fixes** + +* Rework dev dependencies. We no longer have + ``requirements-dev.in``/``requirements-dev.txt``. Instead, we're using + ``dev`` extras. + + See `development docs `_ + for more details. (#620) + +* Add newline when dropping block-level tags. Thank you, @jvanasco! (#369) + + +Version 4.1.0 (August 25th, 2021) +--------------------------------- + +**Features** + +* Python 3.9 support + +**Security fixes** + +None + +**Bug fixes** + +* Update sanitizer clean to use vendored 3.6.14 stdlib urllib.parse to + fix test failures on Python 3.9. (#536) + + +Version 4.0.0 (August 3rd, 2021) +-------------------------------- + +**Backwards incompatible changes** + +* Drop support for unsupported Python versions <3.6. (#520) + +**Security fixes** + +None + +**Features** + +* fix attribute name in the linkify docs (thanks @CheesyFeet!) + + +Version 3.3.1 (July 14th, 2021) +------------------------------- + +**Security fixes** + +None + +**Features** + +* add more tests for CVE-2021-23980 / GHSA-vv2x-vrpj-qqpq +* bump python version to 3.8 for tox doc, vendorverify, and lint targets +* update bug report template tag +* update vendorverify script to detect and fail when extra files are vendored +* update release process docs to check vendorverify passes locally + +**Bug fixes** + +* remove extra vendored django present in the v3.3.0 whl (#595) +* duplicate h1 header doc fix (thanks Nguyễn Gia Phong / @McSinyx!) + + +Version 3.3.0 (February 1st, 2021) +---------------------------------- + +**Backwards incompatible changes** + +* clean escapes HTML comments even when strip_comments=False + +**Security fixes** + +* Fix bug 1621692 / GHSA-m6xf-fq7q-8743. See the advisory for details. + +**Features** + +None + +**Bug fixes** + +None + + +Version 3.2.3 (January 26th, 2021) +---------------------------------- + +**Security fixes** + +None + +**Features** + +None + +**Bug fixes** + +* fix clean and linkify raising ValueErrors for certain inputs. Thank you @Google-Autofuzz. + + +Version 3.2.2 (January 20th, 2021) +---------------------------------- + +**Security fixes** + +None + +**Features** + +* Migrate CI to Github Actions. Thank you @hugovk. + +**Bug fixes** + +* fix linkify raising an IndexError on certain inputs. Thank you @Google-Autofuzz. + + +Version 3.2.1 (September 18th, 2020) +------------------------------------ + +**Security fixes** + +None + +**Features** + +None + +**Bug fixes** + +* change linkifier to add rel="nofollow" as documented. Thank you @mitar. +* suppress html5lib sanitizer DeprecationWarnings (#557) + + +Version 3.2.0 (September 16th, 2020) +------------------------------------ + +**Security fixes** + +None + +**Features** + +None + +**Bug fixes** + +* ``html5lib`` dependency to version 1.1.0. Thank you Sam Sneddon. +* update tests_website terminology. Thank you Thomas Grainger. + + +Version 3.1.5 (April 29th, 2020) +-------------------------------- + +**Security fixes** + +None + +**Features** + +None + +**Bug fixes** + +* replace missing ``setuptools`` dependency with ``packaging``. Thank you Benjamin Peterson. + + +Version 3.1.4 (March 24th, 2020) +-------------------------------- + +**Security fixes** + +* ``bleach.clean`` behavior parsing style attributes could result in a + regular expression denial of service (ReDoS). + + Calls to ``bleach.clean`` with an allowed tag with an allowed + ``style`` attribute were vulnerable to ReDoS. For example, + ``bleach.clean(..., attributes={'a': ['style']})``. + + This issue was confirmed in Bleach versions v3.1.3, v3.1.2, v3.1.1, + v3.1.0, v3.0.0, v2.1.4, and v2.1.3. Earlier versions used a similar + regular expression and should be considered vulnerable too. + + Anyone using Bleach <=v3.1.3 is encouraged to upgrade. + + https://bugzilla.mozilla.org/show_bug.cgi?id=1623633 + +**Backwards incompatible changes** + +* Style attributes with dashes, or single or double quoted values are + cleaned instead of passed through. + +**Features** + +None + +**Bug fixes** + +None + + +Version 3.1.3 (March 17th, 2020) +-------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +* Drop support for Python 3.4. Thank you, @hugovk! + +* Drop deprecated ``setup.py test`` support. Thank you, @jdufresne! (#507) + +**Features** + +* Add support for Python 3.8. Thank you, @jdufresne! + +* Add support for PyPy 7. Thank you, @hugovk! + +* Add pypy3 testing to tox and travis. Thank you, @jdufresne! + +**Bug fixes** + +* Add relative link to code of conduct. (#442) + +* Fix typo: curren -> current in tests/test_clean.py Thank you, timgates42! (#504) + +* Fix handling of non-ascii style attributes. Thank you, @sekineh! (#426) + +* Simplify tox configuration. Thank you, @jdufresne! + +* Make documentation reproducible. Thank you, @lamby! + +* Fix typos in code comments. Thank you, @zborboa-g! + +* Fix exception value testing. Thank you, @mastizada! + +* Fix parser-tags NoneType exception. Thank you, @bope! + +* Improve TLD support in linkify. Thank you, @pc-coholic! + + +Version 3.1.2 (March 11th, 2020) +-------------------------------- + +**Security fixes** + +* ``bleach.clean`` behavior parsing embedded MathML and SVG content + with RCDATA tags did not match browser behavior and could result in + a mutation XSS. + + Calls to ``bleach.clean`` with ``strip=False`` and ``math`` or + ``svg`` tags and one or more of the RCDATA tags ``script``, + ``noscript``, ``style``, ``noframes``, ``iframe``, ``noembed``, or + ``xmp`` in the allowed tags whitelist were vulnerable to a mutation + XSS. + + This security issue was confirmed in Bleach version v3.1.1. Earlier + versions are likely affected too. + + Anyone using Bleach <=v3.1.1 is encouraged to upgrade. + + https://bugzilla.mozilla.org/show_bug.cgi?id=1621692 + +**Backwards incompatible changes** + +None + +**Features** + +None + +**Bug fixes** + +None + + +Version 3.1.1 (February 13th, 2020) +----------------------------------- + +**Security fixes** + +* ``bleach.clean`` behavior parsing ``noscript`` tags did not match + browser behavior. + + Calls to ``bleach.clean`` allowing ``noscript`` and one or more of + the raw text tags (``title``, ``textarea``, ``script``, ``style``, + ``noembed``, ``noframes``, ``iframe``, and ``xmp``) were vulnerable + to a mutation XSS. + + This security issue was confirmed in Bleach versions v2.1.4, v3.0.2, + and v3.1.0. Earlier versions are probably affected too. + + Anyone using Bleach <=v3.1.0 is highly encouraged to upgrade. + + https://bugzilla.mozilla.org/show_bug.cgi?id=1615315 + +**Backwards incompatible changes** + +None + +**Features** + +None + +**Bug fixes** + +None + + +Version 3.1.0 (January 9th, 2019) +--------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +None + +**Features** + +* Add ``recognized_tags`` argument to the linkify ``Linker`` class. This + fixes issues when linkifying on its own and having some tags get escaped. + It defaults to a list of HTML5 tags. Thank you, Chad Birch! (#409) + +**Bug fixes** + +* Add ``six>=1.9`` to requirements. Thank you, Dave Shawley (#416) + +* Fix cases where attribute names could have invalid characters in them. + (#419) + +* Fix problems with ``LinkifyFilter`` not being able to match links + across ``&``. (#422) + +* Fix ``InputStreamWithMemory`` when the ``BleachHTMLParser`` is + parsing ``meta`` tags. (#431) + +* Fix doctests. (#357) + + +Version 3.0.2 (October 11th, 2018) +---------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +None + +**Features** + +None + +**Bug fixes** + +* Merge ``Characters`` tokens after sanitizing them. This fixes issues in the + ``LinkifyFilter`` where it was only linkifying parts of urls. (#374) + + +Version 3.0.1 (October 9th, 2018) +--------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +None + +**Features** + +* Support Python 3.7. It supported Python 3.7 just fine, but we added 3.7 to + the list of Python environments we test so this is now officially supported. + (#377) + +**Bug fixes** + +* Fix ``list`` object has no attribute ``lower`` in ``clean``. (#398) +* Fix ``abbr`` getting escaped in ``linkify``. (#400) + + +Version 3.0.0 (October 3rd, 2018) +--------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +* A bunch of functions were moved from one module to another. + + These were moved from ``bleach.sanitizer`` to ``bleach.html5lib_shim``: + + * ``convert_entity`` + * ``convert_entities`` + * ``match_entity`` + * ``next_possible_entity`` + * ``BleachHTMLSerializer`` + * ``BleachHTMLTokenizer`` + * ``BleachHTMLParser`` + + These functions and classes weren't documented and aren't part of the + public API, but people read code and might be using them so we're + considering it an incompatible API change. + + If you're using them, you'll need to update your code. + +**Features** + +* Bleach no longer depends on html5lib. html5lib==1.0.1 is now vendored into + Bleach. You can remove it from your requirements file if none of your other + requirements require html5lib. + + This means Bleach will now work fine with other libraries that depend on + html5lib regardless of what version of html5lib they require. (#386) + +**Bug fixes** + +* Fixed tags getting added when using clean or linkify. This was a + long-standing regression from the Bleach 2.0 rewrite. (#280, #392) + +* Fixed ```` getting replaced with a string. Now it gets escaped or + stripped depending on whether it's in the allowed tags or not. (#279) + + +Version 2.1.4 (August 16th, 2018) +--------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +* Dropped support for Python 3.3. (#328) + +**Features** + +None + +**Bug fixes** + +* Handle ambiguous ampersands in correctly. (#359) + + +Version 2.1.3 (March 5th, 2018) +------------------------------- + +**Security fixes** + +* Attributes that have URI values weren't properly sanitized if the + values contained character entities. Using character entities, it + was possible to construct a URI value with a scheme that was not + allowed that would slide through unsanitized. + + This security issue was introduced in Bleach 2.1. Anyone using + Bleach 2.1 is highly encouraged to upgrade. + + https://bugzilla.mozilla.org/show_bug.cgi?id=1442745 + +**Backwards incompatible changes** + +None + +**Features** + +None + +**Bug fixes** + +* Fixed some other edge cases for attribute URI value sanitizing and + improved testing of this code. + + +Version 2.1.2 (December 7th, 2017) +---------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +None + +**Features** + +None + +**Bug fixes** + +* Support html5lib-python 1.0.1. (#337) + +* Add deprecation warning for supporting html5lib-python < 1.0. + +* Switch to semver. + + +Version 2.1.1 (October 2nd, 2017) +--------------------------------- + +**Security fixes** + +None + +**Backwards incompatible changes** + +None + +**Features** + +None + +**Bug fixes** + +* Fix ``setup.py`` opening files when ``LANG=``. (#324) + + +Version 2.1 (September 28th, 2017) +---------------------------------- + +**Security fixes** + +* Convert control characters (backspace particularly) to "?" preventing + malicious copy-and-paste situations. (#298) + + See ``_ for more details. + + This affects all previous versions of Bleach. Check the comments on that + issue for ways to alleviate the issue if you can't upgrade to Bleach 2.1. + + +**Backwards incompatible changes** + +* Redid versioning. ``bleach.VERSION`` is no longer available. Use the string + version at ``bleach.__version__`` and parse it with + ``pkg_resources.parse_version``. (#307) + +* clean, linkify: linkify and clean should only accept text types; thank you, + Janusz! (#292) + +* clean, linkify: accept only unicode or utf-8-encoded str (#176) + + +**Features** + + +**Bug fixes** + +* ``bleach.clean()`` no longer unescapes entities including ones that are missing + a ``;`` at the end which can happen in urls and other places. (#143) + +* linkify: fix http links inside of mailto links; thank you, sedrubal! (#300) + +* clarify security policy in docs (#303) + +* fix dependency specification for html5lib 1.0b8, 1.0b9, and 1.0b10; thank you, + Zoltán! (#268) + +* add Bleach vs. html5lib comparison to README; thank you, Stu Cox! (#278) + +* fix KeyError exceptions on tags without href attr; thank you, Alex Defsen! + (#273) + +* add test website and scripts to test ``bleach.clean()`` output in browser; + thank you, Greg Guthe! + + +Version 2.0 (March 8th, 2017) +----------------------------- + +**Security fixes** + +* None + + +**Backwards incompatible changes** + +* Removed support for Python 2.6. (#206) + +* Removed support for Python 3.2. (#224) + +* Bleach no longer supports html5lib < 0.99999999 (8 9s). + + This version is a rewrite to use the new sanitizing API since the old + one was dropped in html5lib 0.99999999 (8 9s). + + If you're using 0.9999999 (7 9s) upgrade to 0.99999999 (8 9s) or higher. + + If you're using 1.0b8 (equivalent to 0.9999999 (7 9s)), upgrade to 1.0b9 + (equivalent to 0.99999999 (8 9s)) or higher. + +* ``bleach.clean`` and friends were rewritten + + ``clean`` was reimplemented as an html5lib filter and happens at a different + step in the HTML parsing -> traversing -> serializing process. Because of + that, there are some differences in clean's output as compared with previous + versions. + + Amongst other things, this version will add end tags even if the tag in + question is to be escaped. + +* ``bleach.clean`` and friends attribute callables now take three arguments: + tag, attribute name and attribute value. Previously they only took attribute + name and attribute value. + + All attribute callables will need to be updated. + +* ``bleach.linkify`` was rewritten + + ``linkify`` was reimplemented as an html5lib Filter. As such, it no longer + accepts a ``tokenizer`` argument. + + The callback functions for adjusting link attributes now takes a namespaced + attribute. + + Previously you'd do something like this:: + + def check_protocol(attrs, is_new): + if not attrs.get('href', '').startswith('http:', 'https:')): + return None + return attrs + + Now it's more like this:: + + def check_protocol(attrs, is_new): + if not attrs.get((None, u'href'), u'').startswith(('http:', 'https:')): + # ^^^^^^^^^^^^^^^ + return None + return attrs + + Further, you need to make sure you're always using unicode values. If you + don't then html5lib will raise an assertion error that the value is not + unicode. + + All linkify filters will need to be updated. + +* ``bleach.linkify`` and friends had a ``skip_pre`` argument--that's been + replaced with a more general ``skip_tags`` argument. + + Before, you might do:: + + bleach.linkify(some_text, skip_pre=True) + + The equivalent with Bleach 2.0 is:: + + bleach.linkify(some_text, skip_tags=['pre']) + + You can skip other tags, too, like ``style`` or ``script`` or other places + where you don't want linkification happening. + + All uses of linkify that use ``skip_pre`` will need to be updated. + + +**Changes** + +* Supports Python 3.6. + +* Supports html5lib >= 0.99999999 (8 9s). + +* There's a ``bleach.sanitizer.Cleaner`` class that you can instantiate with your + favorite clean settings for easy reuse. + +* There's a ``bleach.linkifier.Linker`` class that you can instantiate with your + favorite linkify settings for easy reuse. + +* There's a ``bleach.linkifier.LinkifyFilter`` which is an htm5lib filter that + you can pass as a filter to ``bleach.sanitizer.Cleaner`` allowing you to clean + and linkify in one pass. + +* ``bleach.clean`` and friends can now take a callable as an attributes arg value. + +* Tons of bug fixes. + +* Cleaned up tests. + +* Documentation fixes. + + +Version 1.5 (November 4th, 2016) +-------------------------------- + +**Security fixes** + +* None + +**Backwards incompatible changes** + +* clean: The list of ``ALLOWED_PROTOCOLS`` now defaults to http, https and + mailto. + + Previously it was a long list of protocols something like ed2k, ftp, http, + https, irc, mailto, news, gopher, nntp, telnet, webcal, xmpp, callto, feed, + urn, aim, rsync, tag, ssh, sftp, rtsp, afs, data. (#149) + +**Changes** + +* clean: Added ``protocols`` to arguments list to let you override the list of + allowed protocols. Thank you, Andreas Malecki! (#149) + +* linkify: Fix a bug involving periods at the end of an email address. Thank you, + Lorenz Schori! (#219) + +* linkify: Fix linkification of non-ascii ports. Thank you Alexandre, Macabies! + (#207) + +* linkify: Fix linkify inappropriately removing node tails when dropping nodes. + (#132) + +* Fixed a test that failed periodically. (#161) + +* Switched from nose to py.test. (#204) + +* Add test matrix for all supported Python and html5lib versions. (#230) + +* Limit to html5lib ``>=0.999,!=0.9999,!=0.99999,<0.99999999`` because 0.9999 + and 0.99999 are busted. + +* Add support for ``python setup.py test``. (#97) + + +Version 1.4.3 (May 23rd, 2016) +------------------------------ + +**Security fixes** + +* None + +**Changes** + +* Limit to html5lib ``>=0.999,<0.99999999`` because of impending change to + sanitizer api. #195 + + +Version 1.4.2 (September 11, 2015) +---------------------------------- + +**Changes** + +* linkify: Fix hang in linkify with ``parse_email=True``. (#124) + +* linkify: Fix crash in linkify when removing a link that is a first-child. (#136) + +* Updated TLDs. + +* linkify: Don't remove exterior brackets when linkifying. (#146) + + +Version 1.4.1 (December 15, 2014) +--------------------------------- + +**Changes** + +* Consistent order of attributes in output. + +* Python 3.4 support. + + +Version 1.4 (January 12, 2014) +------------------------------ + +**Changes** + +* linkify: Update linkify to use etree type Treewalker instead of simpletree. + +* Updated html5lib to version ``>=0.999``. + +* Update all code to be compatible with Python 3 and 2 using six. + +* Switch to Apache License. + + +Version 1.3 +----------- + +* Used by Python 3-only fork. + + +Version 1.2.2 (May 18, 2013) +---------------------------- + +* Pin html5lib to version 0.95 for now due to major API break. + + +Version 1.2.1 (February 19, 2013) +--------------------------------- + +* ``clean()`` no longer considers ``feed:`` an acceptable protocol due to + inconsistencies in browser behavior. + + +Version 1.2 (January 28, 2013) +------------------------------ + +* ``linkify()`` has changed considerably. Many keyword arguments have been + replaced with a single callbacks list. Please see the documentation for more + information. + +* Bleach will no longer consider unacceptable protocols when linkifying. + +* ``linkify()`` now takes a tokenizer argument that allows it to skip + sanitization. + +* ``delinkify()`` is gone. + +* Removed exception handling from ``_render``. ``clean()`` and ``linkify()`` may + now throw. + +* ``linkify()`` correctly ignores case for protocols and domain names. + +* ``linkify()`` correctly handles markup within an tag. + + +Version 1.1.5 +------------- + + +Version 1.1.4 +------------- + + +Version 1.1.3 (July 10, 2012) +----------------------------- + +* Fix parsing bare URLs when parse_email=True. + + +Version 1.1.2 (June 1, 2012) +---------------------------- + +* Fix hang in style attribute sanitizer. (#61) + +* Allow ``/`` in style attribute values. + + +Version 1.1.1 (February 17, 2012) +--------------------------------- + +* Fix tokenizer for html5lib 0.9.5. + + +Version 1.1.0 (October 24, 2011) +-------------------------------- + +* ``linkify()`` now understands port numbers. (#38) + +* Documented character encoding behavior. (#41) + +* Add an optional target argument to ``linkify()``. + +* Add ``delinkify()`` method. (#45) + +* Support subdomain whitelist for ``delinkify()``. (#47, #48) + + +Version 1.0.4 (September 2, 2011) +--------------------------------- + +* Switch to SemVer git tags. + +* Make ``linkify()`` smarter about trailing punctuation. (#30) + +* Pass ``exc_info`` to logger during rendering issues. + +* Add wildcard key for attributes. (#19) + +* Make ``linkify()`` use the ``HTMLSanitizer`` tokenizer. (#36) + +* Fix URLs wrapped in parentheses. (#23) + +* Make ``linkify()`` UTF-8 safe. (#33) + + +Version 1.0.3 (June 14, 2011) +----------------------------- + +* ``linkify()`` works with 3rd level domains. (#24) + +* ``clean()`` supports vendor prefixes in style values. (#31, #32) + +* Fix ``linkify()`` email escaping. + + +Version 1.0.2 (June 6, 2011) +---------------------------- + +* ``linkify()`` supports email addresses. + +* ``clean()`` supports callables in attributes filter. + + +Version 1.0.1 (April 12, 2011) +------------------------------ + +* ``linkify()`` doesn't drop trailing slashes. (#21) +* ``linkify()`` won't linkify 'libgl.so.1'. (#22) diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/RECORD b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/RECORD new file mode 100644 index 0000000..3b93630 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/RECORD @@ -0,0 +1,103 @@ +bleach-6.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +bleach-6.0.0.dist-info/LICENSE,sha256=vsIjjBSaYyuPsmgT9oes6rq4AyfzJwdpwsFhV4g9MTA,569 +bleach-6.0.0.dist-info/METADATA,sha256=9rTe871FE18onbZcdlZFtTfPai2HySOuYtDZYd3f09o,29799 +bleach-6.0.0.dist-info/RECORD,, +bleach-6.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bleach-6.0.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +bleach-6.0.0.dist-info/top_level.txt,sha256=dcv0wKIySB0zMjAEXLwY4V0-3IN9UZQGAT1wDmfQICY,7 +bleach/__init__.py,sha256=4BQh_aKiLYDvtix_nDA4r66fn2HQOeRirhNqxYaZIL0,3649 +bleach/__pycache__/__init__.cpython-311.pyc,, +bleach/__pycache__/callbacks.cpython-311.pyc,, +bleach/__pycache__/css_sanitizer.cpython-311.pyc,, +bleach/__pycache__/html5lib_shim.cpython-311.pyc,, +bleach/__pycache__/linkifier.cpython-311.pyc,, +bleach/__pycache__/parse_shim.cpython-311.pyc,, +bleach/__pycache__/sanitizer.cpython-311.pyc,, +bleach/_vendor/README.rst,sha256=eXeKT2JdZB4WX1kuhTa8W9Jp9VXtwIKFxo5RUL5exmM,2160 +bleach/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bleach/_vendor/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/__pycache__/parse.cpython-311.pyc,, +bleach/_vendor/html5lib-1.1.dist-info/AUTHORS.rst,sha256=DrNAMifoDpuQyJn-KW-H6K8Tt2a5rKnV2UF4-DRrGUI,983 +bleach/_vendor/html5lib-1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +bleach/_vendor/html5lib-1.1.dist-info/LICENSE,sha256=FqOZkWGekvGGgJMtoqkZn999ld8-yu3FLqBiGKq6_W8,1084 +bleach/_vendor/html5lib-1.1.dist-info/METADATA,sha256=Y3w-nd_22HQnQRy3yypVsV_ke2FF94uUD4-vGpc2DnI,16076 +bleach/_vendor/html5lib-1.1.dist-info/RECORD,sha256=u-y_W5lhdsHC1OSMnA4bCi3-11IgQ_FAIW6viMu8_LA,3486 +bleach/_vendor/html5lib-1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bleach/_vendor/html5lib-1.1.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +bleach/_vendor/html5lib-1.1.dist-info/top_level.txt,sha256=XEX6CHpskSmvjJB4tP6m4Q5NYXhIf_0ceMc0PNbzJPQ,9 +bleach/_vendor/html5lib/__init__.py,sha256=pWnYcfZ69wNLrdQL7bpr49FUi8O8w0KhKCOHsyRgYGQ,1143 +bleach/_vendor/html5lib/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/_ihatexml.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/_inputstream.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/_tokenizer.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/_utils.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/constants.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/html5parser.cpython-311.pyc,, +bleach/_vendor/html5lib/__pycache__/serializer.cpython-311.pyc,, +bleach/_vendor/html5lib/_ihatexml.py,sha256=ifOwF7pXqmyThIXc3boWc96s4MDezqRrRVp7FwDYUFs,16728 +bleach/_vendor/html5lib/_inputstream.py,sha256=IKuMiY8rzb7pqIGCpbvTqsxysLEpgEHWYvYEFu4LUAI,32300 +bleach/_vendor/html5lib/_tokenizer.py,sha256=WvJQa2Mli4NtTmhLXkX8Jy5FcWttqCaiDTiKyaw8D-k,77028 +bleach/_vendor/html5lib/_trie/__init__.py,sha256=nqfgO910329BEVJ5T4psVwQtjd2iJyEXQ2-X8c1YxwU,109 +bleach/_vendor/html5lib/_trie/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/html5lib/_trie/__pycache__/_base.cpython-311.pyc,, +bleach/_vendor/html5lib/_trie/__pycache__/py.cpython-311.pyc,, +bleach/_vendor/html5lib/_trie/_base.py,sha256=CaybYyMro8uERQYjby2tTeSUatnWDfWroUN9N7ety5w,1013 +bleach/_vendor/html5lib/_trie/py.py,sha256=zg7RZSHxJ8mLmuI_7VEIV8AomISrgkvqCP477AgXaG0,1763 +bleach/_vendor/html5lib/_utils.py,sha256=AxAJSG15eyarCgKMnlUwzs1X6jFHXqEvhlYEOxAFmis,4919 +bleach/_vendor/html5lib/constants.py,sha256=Ll-yzLU_jcjyAI_h57zkqZ7aQWE5t5xA4y_jQgoUUhw,83464 +bleach/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bleach/_vendor/html5lib/filters/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/base.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/lint.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/__pycache__/whitespace.cpython-311.pyc,, +bleach/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=lViZc2JMCclXi_5gduvmdzrRxtO5Xo9ONnbHBVCsykU,919 +bleach/_vendor/html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 +bleach/_vendor/html5lib/filters/inject_meta_charset.py,sha256=egDXUEHXmAG9504xz0K6ALDgYkvUrC2q15YUVeNlVQg,2945 +bleach/_vendor/html5lib/filters/lint.py,sha256=upXATs6By7cot7o0bnNqR15sPq2Fn6Vnjvoy3gyO_rY,3631 +bleach/_vendor/html5lib/filters/optionaltags.py,sha256=8lWT75J0aBOHmPgfmqTHSfPpPMp01T84NKu0CRedxcE,10588 +bleach/_vendor/html5lib/filters/sanitizer.py,sha256=XGNSdzIqDTaHot1V-rRj1V_XOolApJ7n95tHP9JcgNU,26885 +bleach/_vendor/html5lib/filters/whitespace.py,sha256=8eWqZxd4UC4zlFGW6iyY6f-2uuT8pOCSALc3IZt7_t4,1214 +bleach/_vendor/html5lib/html5parser.py,sha256=w5hZJh0cvD3g4CS196DiTmuGpSKCMYe1GS46-yf_WZQ,117174 +bleach/_vendor/html5lib/serializer.py,sha256=K2kfoLyMPMFPfdusfR30SrxNkf0mJB92-P5_RntyaaI,15747 +bleach/_vendor/html5lib/treeadapters/__init__.py,sha256=18hyI-at2aBsdKzpwRwa5lGF1ipgctaTYXoU9En2ZQg,650 +bleach/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-311.pyc,, +bleach/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-311.pyc,, +bleach/_vendor/html5lib/treeadapters/genshi.py,sha256=CH27pAsDKmu4ZGkAUrwty7u0KauGLCZRLPMzaO3M5vo,1715 +bleach/_vendor/html5lib/treeadapters/sax.py,sha256=BKS8woQTnKiqeffHsxChUqL4q2ZR_wb5fc9MJ3zQC8s,1776 +bleach/_vendor/html5lib/treebuilders/__init__.py,sha256=AysSJyvPfikCMMsTVvaxwkgDieELD5dfR8FJIAuq7hY,3592 +bleach/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/html5lib/treebuilders/__pycache__/base.cpython-311.pyc,, +bleach/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-311.pyc,, +bleach/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-311.pyc,, +bleach/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-311.pyc,, +bleach/_vendor/html5lib/treebuilders/base.py,sha256=oeZNGEB-kt90YJGVH05gb5a8E7ids2AbYwGRsVCieWk,14553 +bleach/_vendor/html5lib/treebuilders/dom.py,sha256=22whb0C71zXIsai5mamg6qzBEiigcBIvaDy4Asw3at0,8925 +bleach/_vendor/html5lib/treebuilders/etree.py,sha256=EbmHx-wQ-11MVucTPtF7Ul92-mQGN3Udu_KfDn-Ifhk,12824 +bleach/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=OazDHZGO_q4FnVs4Dhs4hzzn2JwGAOs-rfV8LAlUGW4,14754 +bleach/_vendor/html5lib/treewalkers/__init__.py,sha256=OBPtc1TU5mGyy18QDMxKEyYEz0wxFUUNj5v0-XgmYhY,5719 +bleach/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-311.pyc,, +bleach/_vendor/html5lib/treewalkers/__pycache__/base.cpython-311.pyc,, +bleach/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-311.pyc,, +bleach/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-311.pyc,, +bleach/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-311.pyc,, +bleach/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-311.pyc,, +bleach/_vendor/html5lib/treewalkers/base.py,sha256=ouiOsuSzvI0KgzdWP8PlxIaSNs9falhbiinAEc_UIJY,7476 +bleach/_vendor/html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413 +bleach/_vendor/html5lib/treewalkers/etree.py,sha256=gkD4tfEfRWPsEGvgHHJxZmKZXUvBzVVGz3v5C_MIiOE,4539 +bleach/_vendor/html5lib/treewalkers/etree_lxml.py,sha256=eLedbn6nPjlpebibsWVijey7WEpzDwxU3ubwUoudBuA,6345 +bleach/_vendor/html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309 +bleach/_vendor/parse.py,sha256=Rq-WbjO2JHrh1X2UWRFaPrRs2p-AnJ8U4FKrwv6NrLI,39023 +bleach/_vendor/parse.py.SHA256SUM,sha256=-AaiqN-9otw_X0vFjKkbKWFvkp68iLME92_wI-8-vm0,75 +bleach/_vendor/vendor.txt,sha256=6FFZyenumgWqnhLgbCa4yzL4HVNaSUDC2DHNyR5Fy6w,184 +bleach/_vendor/vendor_install.sh,sha256=x_Pn4dkfzPMJCZKwHHFxp0EAL5RsIfz-HSdTWHuI4yA,453 +bleach/callbacks.py,sha256=JNTGiM5_3bKsGltpR9ZYEz_C_b7-vfDlTTdQCirbdyc,752 +bleach/css_sanitizer.py,sha256=QFMxRKBUMSuNvYkVpB2WRBQO609eFbU-p9P_LhU6jtM,2526 +bleach/html5lib_shim.py,sha256=LRockXiQam4HK-Df-X7fjrW1OF63UIHgTNo7-tSrFeY,22779 +bleach/linkifier.py,sha256=k7sOk4WILQNoLVhDye_GAOzhe0ZiWXLIKKAk2UB4zvY,22350 +bleach/parse_shim.py,sha256=VDPOdBOKbuDEceKVvfoggcr6A332bkcq4Z8jMtOJlAQ,50 +bleach/sanitizer.py,sha256=JqDuTINOybpc_eHBzG_H7cnkHdFskZGbfsaBc-hDPH8,21934 diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/REQUESTED b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/WHEEL new file mode 100644 index 0000000..57e3d84 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/top_level.txt new file mode 100644 index 0000000..a02d600 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach-6.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +bleach diff --git a/.venv/lib/python3.11/site-packages/bleach/__init__.py b/.venv/lib/python3.11/site-packages/bleach/__init__.py new file mode 100644 index 0000000..4e87eb8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach/__init__.py @@ -0,0 +1,125 @@ +from bleach.linkifier import ( + DEFAULT_CALLBACKS, + Linker, +) +from bleach.sanitizer import ( + ALLOWED_ATTRIBUTES, + ALLOWED_PROTOCOLS, + ALLOWED_TAGS, + Cleaner, +) + + +# yyyymmdd +__releasedate__ = "20230123" +# x.y.z or x.y.z.dev0 -- semver +__version__ = "6.0.0" + + +__all__ = ["clean", "linkify"] + + +def clean( + text, + tags=ALLOWED_TAGS, + attributes=ALLOWED_ATTRIBUTES, + protocols=ALLOWED_PROTOCOLS, + strip=False, + strip_comments=True, + css_sanitizer=None, +): + """Clean an HTML fragment of malicious content and return it + + This function is a security-focused function whose sole purpose is to + remove malicious content from a string such that it can be displayed as + content in a web page. + + This function is not designed to use to transform content to be used in + non-web-page contexts. + + Example:: + + import bleach + + better_text = bleach.clean(yucky_text) + + + .. Note:: + + If you're cleaning a lot of text and passing the same argument values or + you want more configurability, consider using a + :py:class:`bleach.sanitizer.Cleaner` instance. + + :arg str text: the text to clean + + :arg set tags: set of allowed tags; defaults to + ``bleach.sanitizer.ALLOWED_TAGS`` + + :arg dict attributes: allowed attributes; can be a callable, list or dict; + defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES`` + + :arg list protocols: allowed list of protocols for links; defaults + to ``bleach.sanitizer.ALLOWED_PROTOCOLS`` + + :arg bool strip: whether or not to strip disallowed elements + + :arg bool strip_comments: whether or not to strip HTML comments + + :arg CSSSanitizer css_sanitizer: instance with a "sanitize_css" method for + sanitizing style attribute values and style text; defaults to None + + :returns: cleaned text as unicode + + """ + cleaner = Cleaner( + tags=tags, + attributes=attributes, + protocols=protocols, + strip=strip, + strip_comments=strip_comments, + css_sanitizer=css_sanitizer, + ) + return cleaner.clean(text) + + +def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_tags=None, parse_email=False): + """Convert URL-like strings in an HTML fragment to links + + This function converts strings that look like URLs, domain names and email + addresses in text that may be an HTML fragment to links, while preserving: + + 1. links already in the string + 2. urls found in attributes + 3. email addresses + + linkify does a best-effort approach and tries to recover from bad + situations due to crazy text. + + .. Note:: + + If you're linking a lot of text and passing the same argument values or + you want more configurability, consider using a + :py:class:`bleach.linkifier.Linker` instance. + + .. Note:: + + If you have text that you want to clean and then linkify, consider using + the :py:class:`bleach.linkifier.LinkifyFilter` as a filter in the clean + pass. That way you're not parsing the HTML twice. + + :arg str text: the text to linkify + + :arg list callbacks: list of callbacks to run when adjusting tag attributes; + defaults to ``bleach.linkifier.DEFAULT_CALLBACKS`` + + :arg list skip_tags: list of tags that you don't want to linkify the + contents of; for example, you could set this to ``['pre']`` to skip + linkifying contents of ``pre`` tags + + :arg bool parse_email: whether or not to linkify email addresses + + :returns: linkified text as unicode + + """ + linker = Linker(callbacks=callbacks, skip_tags=skip_tags, parse_email=parse_email) + return linker.linkify(text) diff --git a/.venv/lib/python3.11/site-packages/bleach/_vendor/README.rst b/.venv/lib/python3.11/site-packages/bleach/_vendor/README.rst new file mode 100644 index 0000000..e53aede --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach/_vendor/README.rst @@ -0,0 +1,61 @@ +======================= +Vendored library policy +======================= + +To simplify Bleach development, we're now vendoring certain libraries that +we use. + +Vendored libraries must follow these rules: + +1. Vendored libraries must be pure Python--no compiling. +2. Source code for the libary is included in this directory. +3. License must be included in this repo and in the Bleach distribution. +4. Requirements of the library become requirements of Bleach. +5. No modifications to the library may be made. + + +Adding/Updating a vendored library +================================== + +Way to vendor a library or update a version: + +1. Update ``vendor.txt`` with the library, version, and hash. You can use + `hashin `_. +2. Remove all old files and directories of the old version. +3. Run ``pip_install_vendor.sh`` and check everything it produced in including + the ``.dist-info`` directory and contents. +4. Update the bleach minor version in the next release. + + +Reviewing a change involving a vendored library +=============================================== + +Way to verify a vendored library addition/update: + +1. Pull down the branch. +2. Delete all the old files and directories of the old version. +3. Run ``pip_install_vendor.sh``. +4. Run ``git diff`` and verify there are no changes. + + +NB: the current ``vendor.txt`` was generated with pip 20.2.3, which might be necessary to reproduce the dist-info + + +Removing/Unvendoring a vendored library +======================================= + +A vendored library might be removed for any of the following reasons: + +* it violates the vendoring policy (e.g. an incompatible license + change) +* a suitable replacement is found +* bleach has the resources to test and QA new bleach releases against + multiple versions of the previously vendored library + +To unvendor a library: + +1. Remove the library and its hashes from ``vendor.txt``. +2. Remove library files and directories from this directory. +3. Run ``install_vendor.sh`` and check the previously vendored library including + the ``.dist-info`` directory and contents is not installed. +4. Update the bleach minor version in the next release. diff --git a/.venv/lib/python3.11/site-packages/bleach/_vendor/__init__.py b/.venv/lib/python3.11/site-packages/bleach/_vendor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/AUTHORS.rst b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/AUTHORS.rst new file mode 100644 index 0000000..9040139 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/AUTHORS.rst @@ -0,0 +1,66 @@ +Credits +======= + +``html5lib`` is written and maintained by: + +- James Graham +- Sam Sneddon +- Łukasz Langa +- Will Kahn-Greene + + +Patches and suggestions +----------------------- +(In chronological order, by first commit:) + +- Anne van Kesteren +- Lachlan Hunt +- lantis63 +- Sam Ruby +- Thomas Broyer +- Tim Fletcher +- Mark Pilgrim +- Ryan King +- Philip Taylor +- Edward Z. Yang +- fantasai +- Philip Jägenstedt +- Ms2ger +- Mohammad Taha Jahangir +- Andy Wingo +- Andreas Madsack +- Karim Valiev +- Juan Carlos Garcia Segovia +- Mike West +- Marc DM +- Simon Sapin +- Michael[tm] Smith +- Ritwik Gupta +- Marc Abramowitz +- Tony Lopes +- lilbludevil +- Kevin +- Drew Hubl +- Austin Kumbera +- Jim Baker +- Jon Dufresne +- Donald Stufft +- Alex Gaynor +- Nik Nyby +- Jakub Wilk +- Sigmund Cherem +- Gabi Davar +- Florian Mounier +- neumond +- Vitalik Verhovodov +- Kovid Goyal +- Adam Chainz +- John Vandenberg +- Eric Amorde +- Benedikt Morbach +- Jonathan Vanasco +- Tom Most +- Ville Skyttä +- Hugo van Kemenade +- Mark Vasilkov + diff --git a/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/LICENSE new file mode 100644 index 0000000..c87fa7a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2006-2013 James Graham and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/METADATA b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/METADATA new file mode 100644 index 0000000..ee83c1f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/bleach/_vendor/html5lib-1.1.dist-info/METADATA @@ -0,0 +1,552 @@ +Metadata-Version: 2.1 +Name: html5lib +Version: 1.1 +Summary: HTML parser based on the WHATWG HTML specification +Home-page: https://github.com/html5lib/html5lib-python +Maintainer: James Graham +Maintainer-email: james@hoppipolla.co.uk +License: MIT License +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* +Requires-Dist: six (>=1.9) +Requires-Dist: webencodings +Provides-Extra: all +Requires-Dist: genshi ; extra == 'all' +Requires-Dist: chardet (>=2.2) ; extra == 'all' +Requires-Dist: lxml ; (platform_python_implementation == 'CPython') and extra == 'all' +Provides-Extra: chardet +Requires-Dist: chardet (>=2.2) ; extra == 'chardet' +Provides-Extra: genshi +Requires-Dist: genshi ; extra == 'genshi' +Provides-Extra: lxml +Requires-Dist: lxml ; (platform_python_implementation == 'CPython') and extra == 'lxml' + +html5lib +======== + +.. image:: https://travis-ci.org/html5lib/html5lib-python.svg?branch=master + :target: https://travis-ci.org/html5lib/html5lib-python + + +html5lib is a pure-python library for parsing HTML. It is designed to +conform to the WHATWG HTML specification, as is implemented by all major +web browsers. + + +Usage +----- + +Simple usage follows this pattern: + +.. code-block:: python + + import html5lib + with open("mydocument.html", "rb") as f: + document = html5lib.parse(f) + +or: + +.. code-block:: python + + import html5lib + document = html5lib.parse("

    Hello World!") + +By default, the ``document`` will be an ``xml.etree`` element instance. +Whenever possible, html5lib chooses the accelerated ``ElementTree`` +implementation (i.e. ``xml.etree.cElementTree`` on Python 2.x). + +Two other tree types are supported: ``xml.dom.minidom`` and +``lxml.etree``. To use an alternative format, specify the name of +a treebuilder: + +.. code-block:: python + + import html5lib + with open("mydocument.html", "rb") as f: + lxml_etree_document = html5lib.parse(f, treebuilder="lxml") + +When using with ``urllib2`` (Python 2), the charset from HTTP should be +pass into html5lib as follows: + +.. code-block:: python + + from contextlib import closing + from urllib2 import urlopen + import html5lib + + with closing(urlopen("http://example.com/")) as f: + document = html5lib.parse(f, transport_encoding=f.info().getparam("charset")) + +When using with ``urllib.request`` (Python 3), the charset from HTTP +should be pass into html5lib as follows: + +.. code-block:: python + + from urllib.request import urlopen + import html5lib + + with urlopen("http://example.com/") as f: + document = html5lib.parse(f, transport_encoding=f.info().get_content_charset()) + +To have more control over the parser, create a parser object explicitly. +For instance, to make the parser raise exceptions on parse errors, use: + +.. code-block:: python + + import html5lib + with open("mydocument.html", "rb") as f: + parser = html5lib.HTMLParser(strict=True) + document = parser.parse(f) + +When you're instantiating parser objects explicitly, pass a treebuilder +class as the ``tree`` keyword argument to use an alternative document +format: + +.. code-block:: python + + import html5lib + parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom")) + minidom_document = parser.parse("

    Hello World!") + +More documentation is available at https://html5lib.readthedocs.io/. + + +Installation +------------ + +html5lib works on CPython 2.7+, CPython 3.5+ and PyPy. To install: + +.. code-block:: bash + + $ pip install html5lib + +The goal is to support a (non-strict) superset of the versions that `pip +supports +`_. + +Optional Dependencies +--------------------- + +The following third-party libraries may be used for additional +functionality: + +- ``lxml`` is supported as a tree format (for both building and + walking) under CPython (but *not* PyPy where it is known to cause + segfaults); + +- ``genshi`` has a treewalker (but not builder); and + +- ``chardet`` can be used as a fallback when character encoding cannot + be determined. + + +Bugs +---- + +Please report any bugs on the `issue tracker +`_. + + +Tests +----- + +Unit tests require the ``pytest`` and ``mock`` libraries and can be +run using the ``py.test`` command in the root directory. + +Test data are contained in a separate `html5lib-tests +`_ repository and included +as a submodule, thus for git checkouts they must be initialized:: + + $ git submodule init + $ git submodule update + +If you have all compatible Python implementations available on your +system, you can run tests on all of them using the ``tox`` utility, +which can be found on PyPI. + + +Questions? +---------- + +There's a mailing list available for support on Google Groups, +`html5lib-discuss `_, +though you may get a quicker response asking on IRC in `#whatwg on +irc.freenode.net `_. + +Change Log +---------- + +1.1 +~~~ + +UNRELEASED + +Breaking changes: + +* Drop support for Python 3.3. (#358) +* Drop support for Python 3.4. (#421) + +Deprecations: + +* Deprecate the ``html5lib`` sanitizer (``html5lib.serialize(sanitize=True)`` and + ``html5lib.filters.sanitizer``). We recommend users migrate to `Bleach + `. Please let us know if Bleach doesn't suffice for your + use. (#443) + +Other changes: + +* Try to import from ``collections.abc`` to remove DeprecationWarning and ensure + ``html5lib`` keeps working in future Python versions. (#403) +* Drop optional ``datrie`` dependency. (#442) + + +1.0.1 +~~~~~ + +Released on December 7, 2017 + +Breaking changes: + +* Drop support for Python 2.6. (#330) (Thank you, Hugo, Will Kahn-Greene!) +* Remove ``utils/spider.py`` (#353) (Thank you, Jon Dufresne!) + +Features: + +* Improve documentation. (#300, #307) (Thank you, Jon Dufresne, Tom Most, + Will Kahn-Greene!) +* Add iframe seamless boolean attribute. (Thank you, Ritwik Gupta!) +* Add itemscope as a boolean attribute. (#194) (Thank you, Jonathan Vanasco!) +* Support Python 3.6. (#333) (Thank you, Jon Dufresne!) +* Add CI support for Windows using AppVeyor. (Thank you, John Vandenberg!) +* Improve testing and CI and add code coverage (#323, #334), (Thank you, Jon + Dufresne, John Vandenberg, Sam Sneddon, Will Kahn-Greene!) +* Semver-compliant version number. + +Bug fixes: + +* Add support for setuptools < 18.5 to support environment markers. (Thank you, + John Vandenberg!) +* Add explicit dependency for six >= 1.9. (Thank you, Eric Amorde!) +* Fix regexes to work with Python 3.7 regex adjustments. (#318, #379) (Thank + you, Benedikt Morbach, Ville Skyttä, Mark Vasilkov!) +* Fix alphabeticalattributes filter namespace bug. (#324) (Thank you, Will + Kahn-Greene!) +* Include license file in generated wheel package. (#350) (Thank you, Jon + Dufresne!) +* Fix annotation-xml typo. (#339) (Thank you, Will Kahn-Greene!) +* Allow uppercase hex chararcters in CSS colour check. (#377) (Thank you, + Komal Dembla, Hugo!) + + +1.0 +~~~ + +Released and unreleased on December 7, 2017. Badly packaged release. + + +0.999999999/1.0b10 +~~~~~~~~~~~~~~~~~~ + +Released on July 15, 2016 + +* Fix attribute order going to the tree builder to be document order + instead of reverse document order(!). + + +0.99999999/1.0b9 +~~~~~~~~~~~~~~~~ + +Released on July 14, 2016 + +* **Added ordereddict as a mandatory dependency on Python 2.6.** + +* Added ``lxml``, ``genshi``, ``datrie``, ``charade``, and ``all`` + extras that will do the right thing based on the specific + interpreter implementation. + +* Now requires the ``mock`` package for the testsuite. + +* Cease supporting DATrie under PyPy. + +* **Remove PullDOM support, as this hasn't ever been properly + tested, doesn't entirely work, and as far as I can tell is + completely unused by anyone.** + +* Move testsuite to ``py.test``. + +* **Fix #124: move to webencodings for decoding the input byte stream; + this makes html5lib compliant with the Encoding Standard, and + introduces a required dependency on webencodings.** + +* **Cease supporting Python 3.2 (in both CPython and PyPy forms).** + +* **Fix comments containing double-dash with lxml 3.5 and above.** + +* **Use scripting disabled by default (as we don't implement + scripting).** + +* **Fix #11, avoiding the XSS bug potentially caused by serializer + allowing attribute values to be escaped out of in old browser versions, + changing the quote_attr_values option on serializer to take one of + three values, "always" (the old True value), "legacy" (the new option, + and the new default), and "spec" (the old False value, and the old + default).** + +* **Fix #72 by rewriting the sanitizer to apply only to treewalkers + (instead of the tokenizer); as such, this will require amending all + callers of it to use it via the treewalker API.** + +* **Drop support of charade, now that chardet is supported once more.** + +* **Replace the charset keyword argument on parse and related methods + with a set of keyword arguments: override_encoding, transport_encoding, + same_origin_parent_encoding, likely_encoding, and default_encoding.** + +* **Move filters._base, treebuilder._base, and treewalkers._base to .base + to clarify their status as public.** + +* **Get rid of the sanitizer package. Merge sanitizer.sanitize into the + sanitizer.htmlsanitizer module and move that to sanitizer. This means + anyone who used sanitizer.sanitize or sanitizer.HTMLSanitizer needs no + code changes.** + +* **Rename treewalkers.lxmletree to .etree_lxml and + treewalkers.genshistream to .genshi to have a consistent API.** + +* Move a whole load of stuff (inputstream, ihatexml, trie, tokenizer, + utils) to be underscore prefixed to clarify their status as private. + + +0.9999999/1.0b8 +~~~~~~~~~~~~~~~ + +Released on September 10, 2015 + +* Fix #195: fix the sanitizer to drop broken URLs (it threw an + exception between 0.9999 and 0.999999). + + +0.999999/1.0b7 +~~~~~~~~~~~~~~ + +Released on July 7, 2015 + +* Fix #189: fix the sanitizer to allow relative URLs again (as it did + prior to 0.9999/1.0b5). + + +0.99999/1.0b6 +~~~~~~~~~~~~~ + +Released on April 30, 2015 + +* Fix #188: fix the sanitizer to not throw an exception when sanitizing + bogus data URLs. + + +0.9999/1.0b5 +~~~~~~~~~~~~ + +Released on April 29, 2015 + +* Fix #153: Sanitizer fails to treat some attributes as URLs. Despite how + this sounds, this has no known security implications. No known version + of IE (5.5 to current), Firefox (3 to current), Safari (6 to current), + Chrome (1 to current), or Opera (12 to current) will run any script + provided in these attributes. + +* Pass error message to the ParseError exception in strict parsing mode. + +* Allow data URIs in the sanitizer, with a whitelist of content-types. + +* Add support for Python implementations that don't support lone + surrogates (read: Jython). Fixes #2. + +* Remove localization of error messages. This functionality was totally + unused (and untested that everything was localizable), so we may as + well follow numerous browsers in not supporting translating technical + strings. + +* Expose treewalkers.pprint as a public API. + +* Add a documentEncoding property to HTML5Parser, fix #121. + + +0.999 +~~~~~ + +Released on December 23, 2013 + +* Fix #127: add work-around for CPython issue #20007: .read(0) on + http.client.HTTPResponse drops the rest of the content. + +* Fix #115: lxml treewalker can now deal with fragments containing, at + their root level, text nodes with non-ASCII characters on Python 2. + + +0.99 +~~~~ + +Released on September 10, 2013 + +* No library changes from 1.0b3; released as 0.99 as pip has changed + behaviour from 1.4 to avoid installing pre-release versions per + PEP 440. + + +1.0b3 +~~~~~ + +Released on July 24, 2013 + +* Removed ``RecursiveTreeWalker`` from ``treewalkers._base``. Any + implementation using it should be moved to + ``NonRecursiveTreeWalker``, as everything bundled with html5lib has + for years. + +* Fix #67 so that ``BufferedStream`` to correctly returns a bytes + object, thereby fixing any case where html5lib is passed a + non-seekable RawIOBase-like object. + + +1.0b2 +~~~~~ + +Released on June 27, 2013 + +* Removed reordering of attributes within the serializer. There is now + an ``alphabetical_attributes`` option which preserves the previous + behaviour through a new filter. This allows attribute order to be + preserved through html5lib if the tree builder preserves order. + +* Removed ``dom2sax`` from DOM treebuilders. It has been replaced by + ``treeadapters.sax.to_sax`` which is generic and supports any + treewalker; it also resolves all known bugs with ``dom2sax``. + +* Fix treewalker assertions on hitting bytes strings on + Python 2. Previous to 1.0b1, treewalkers coped with mixed + bytes/unicode data on Python 2; this reintroduces this prior + behaviour on Python 2. Behaviour is unchanged on Python 3. + + +1.0b1 +~~~~~ + +Released on May 17, 2013 + +* Implementation updated to implement the `HTML specification + `_ as of 5th May + 2013 (`SVN `_ revision r7867). + +* Python 3.2+ supported in a single codebase using the ``six`` library. + +* Removed support for Python 2.5 and older. + +* Removed the deprecated Beautiful Soup 3 treebuilder. + ``beautifulsoup4`` can use ``html5lib`` as a parser instead. Note that + since it doesn't support namespaces, foreign content like SVG and + MathML is parsed incorrectly. + +* Removed ``simpletree`` from the package. The default tree builder is + now ``etree`` (using the ``xml.etree.cElementTree`` implementation if + available, and ``xml.etree.ElementTree`` otherwise). + +* Removed the ``XHTMLSerializer`` as it never actually guaranteed its + output was well-formed XML, and hence provided little of use. + +* Removed default DOM treebuilder, so ``html5lib.treebuilders.dom`` is no + longer supported. ``html5lib.treebuilders.getTreeBuilder("dom")`` will + return the default DOM treebuilder, which uses ``xml.dom.minidom``. + +* Optional heuristic character encoding detection now based on + ``charade`` for Python 2.6 - 3.3 compatibility. + +* Optional ``Genshi`` treewalker support fixed. + +* Many bugfixes, including: + + * #33: null in attribute value breaks XML AttValue; + + * #4: nested, indirect descendant,