mirror of
https://github.com/Balshgit/public.git
synced 2025-09-11 18:00:42 +03:00
Merge branch 'master' of github.com:Balshgit/public
This commit is contained in:
commit
d3d862eaef
1
examples/celery-rabbit-example/.gitignore
vendored
Normal file
1
examples/celery-rabbit-example/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
!.env
|
34
examples/celery-rabbit-example/Dockerfile
Normal file
34
examples/celery-rabbit-example/Dockerfile
Normal file
@ -0,0 +1,34 @@
|
||||
FROM python:3.8.6-buster
|
||||
|
||||
ENV PYTHONFAULTHANDLER=1 \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONHASHSEED=random \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
# pip:
|
||||
PIP_NO_CACHE_DIR=off \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=on \
|
||||
PIP_DEFAULT_TIMEOUT=100
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
bash \
|
||||
build-essential \
|
||||
curl \
|
||||
gettext \
|
||||
git \
|
||||
libpq-dev \
|
||||
nano
|
||||
|
||||
WORKDIR /code
|
||||
|
||||
# Copy and install dependencies:
|
||||
COPY requirements.txt /code/
|
||||
RUN python -m pip install --upgrade pip
|
||||
RUN pip install --no-cache-dir -r /code/requirements.txt
|
||||
|
||||
# Copy source files:
|
||||
COPY . /code/
|
||||
# COPY app.py /code/
|
||||
|
||||
|
||||
|
6
examples/celery-rabbit-example/README.md
Normal file
6
examples/celery-rabbit-example/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
# celery first example
|
||||
|
||||
Steps:
|
||||
1. Run `docker-compose up`
|
||||
2. Show logs
|
||||
3. In a new terminal run `docker-compose exec worker python`
|
3
examples/celery-rabbit-example/celery_config/__init__.py
Normal file
3
examples/celery-rabbit-example/celery_config/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
# from app_celery import app as my_celery_app
|
||||
#
|
||||
# __all__ = ('my_celery_app', )
|
25
examples/celery-rabbit-example/celery_config/app_celery.py
Normal file
25
examples/celery-rabbit-example/celery_config/app_celery.py
Normal file
@ -0,0 +1,25 @@
|
||||
from celery import Celery
|
||||
from pathlib import Path
|
||||
from decouple import AutoConfig
|
||||
|
||||
BASE_DIR = Path.cwd().parent
|
||||
config = AutoConfig(search_path=BASE_DIR.joinpath('config'))
|
||||
|
||||
|
||||
RABBITMQ_DEFAULT_USER = config('RABBITMQ_DEFAULT_USER')
|
||||
RABBITMQ_DEFAULT_PASS = config('RABBITMQ_DEFAULT_PASS')
|
||||
RABBITMQ_PORT = config('RABBITMQ_PORT', cast=int, default=5672)
|
||||
RABBITMQ_HOST = config('RABBITMQ_HOST')
|
||||
|
||||
|
||||
app_celery_instance = Celery(
|
||||
'tasks',
|
||||
broker='amqp://{login}:{password}@{host}:{port}'.format(
|
||||
login=RABBITMQ_DEFAULT_USER,
|
||||
password=RABBITMQ_DEFAULT_PASS,
|
||||
host=RABBITMQ_HOST,
|
||||
port=RABBITMQ_PORT,
|
||||
),
|
||||
# TODO: try to get async results with and without backend configured
|
||||
backend='rpc://',
|
||||
)
|
6
examples/celery-rabbit-example/config/.env
Normal file
6
examples/celery-rabbit-example/config/.env
Normal file
@ -0,0 +1,6 @@
|
||||
# RabbitMQ settings:
|
||||
|
||||
RABBITMQ_DEFAULT_USER=rabbit_admin
|
||||
RABBITMQ_DEFAULT_PASS=mypass
|
||||
RABBITMQ_PORT=5672
|
||||
RABBITMQ_HOST=rabbitmq_host
|
28
examples/celery-rabbit-example/docker-compose.yml
Normal file
28
examples/celery-rabbit-example/docker-compose.yml
Normal file
@ -0,0 +1,28 @@
|
||||
version: '3.7'
|
||||
|
||||
services:
|
||||
|
||||
rabbitmq:
|
||||
hostname: rabbitmq_host
|
||||
image: 'rabbitmq:3.8.18-management-alpine'
|
||||
container_name: first_rabbit
|
||||
env_file: config/.env
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8080:15672
|
||||
- 5672:5672
|
||||
|
||||
worker:
|
||||
container_name: first_celery
|
||||
build: .
|
||||
command: celery --app=my_app:app_celery_instance worker --loglevel=INFO
|
||||
env_file: config/.env
|
||||
depends_on:
|
||||
- rabbitmq
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: celery_network
|
||||
driver: bridge
|
||||
|
14
examples/celery-rabbit-example/my_app.py
Normal file
14
examples/celery-rabbit-example/my_app.py
Normal file
@ -0,0 +1,14 @@
|
||||
from celery_config.app_celery import app_celery_instance
|
||||
|
||||
|
||||
@app_celery_instance.task
|
||||
def add(first: int, second: int) -> int:
|
||||
print(first + second)
|
||||
return first + second
|
||||
|
||||
|
||||
# TODO: try with `@app.task(throws=(ZeroDivisionError,))`
|
||||
@app_celery_instance.task
|
||||
def div(first: int, second: int) -> float:
|
||||
# TODO: show how errors work
|
||||
return first / second
|
2
examples/celery-rabbit-example/requirements.txt
Normal file
2
examples/celery-rabbit-example/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
celery==5.0.2
|
||||
python-decouple==3.3
|
241
examples/celery_progress_demo/.venv/bin/Activate.ps1
Normal file
241
examples/celery_progress_demo/.venv/bin/Activate.ps1
Normal file
@ -0,0 +1,241 @@
|
||||
<#
|
||||
.Synopsis
|
||||
Activate a Python virtual environment for the current PowerShell session.
|
||||
|
||||
.Description
|
||||
Pushes the python executable for a virtual environment to the front of the
|
||||
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||
in a Python virtual environment. Makes use of the command line switches as
|
||||
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||
|
||||
.Parameter VenvDir
|
||||
Path to the directory that contains the virtual environment to activate. The
|
||||
default value for this is the parent of the directory that the Activate.ps1
|
||||
script is located within.
|
||||
|
||||
.Parameter Prompt
|
||||
The prompt prefix to display when this virtual environment is activated. By
|
||||
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||
|
||||
.Example
|
||||
Activate.ps1
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Verbose
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and shows extra information about the activation as it executes.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||
Activates the Python virtual environment located in the specified location.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Prompt "MyPython"
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and prefixes the current prompt with the specified string (surrounded in
|
||||
parentheses) while the virtual environment is active.
|
||||
|
||||
.Notes
|
||||
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||
execution policy for the user. You can do this by issuing the following PowerShell
|
||||
command:
|
||||
|
||||
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
For more information on Execution Policies:
|
||||
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||
|
||||
#>
|
||||
Param(
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$VenvDir,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$Prompt
|
||||
)
|
||||
|
||||
<# Function declarations --------------------------------------------------- #>
|
||||
|
||||
<#
|
||||
.Synopsis
|
||||
Remove all shell session elements added by the Activate script, including the
|
||||
addition of the virtual environment's Python executable from the beginning of
|
||||
the PATH variable.
|
||||
|
||||
.Parameter NonDestructive
|
||||
If present, do not remove this function from the global namespace for the
|
||||
session.
|
||||
|
||||
#>
|
||||
function global:deactivate ([switch]$NonDestructive) {
|
||||
# Revert to original values
|
||||
|
||||
# The prior prompt:
|
||||
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
|
||||
# The prior PYTHONHOME:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
}
|
||||
|
||||
# The prior PATH:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||
}
|
||||
|
||||
# Just remove the VIRTUAL_ENV altogether:
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV
|
||||
}
|
||||
|
||||
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||
}
|
||||
|
||||
# Leave deactivate function in the global namespace if requested:
|
||||
if (-not $NonDestructive) {
|
||||
Remove-Item -Path function:deactivate
|
||||
}
|
||||
}
|
||||
|
||||
<#
|
||||
.Description
|
||||
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||
given folder, and returns them in a map.
|
||||
|
||||
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||
then it is considered a `key = value` line. The left hand string is the key,
|
||||
the right hand is the value.
|
||||
|
||||
If the value starts with a `'` or a `"` then the first and last character is
|
||||
stripped from the value before being captured.
|
||||
|
||||
.Parameter ConfigDir
|
||||
Path to the directory that contains the `pyvenv.cfg` file.
|
||||
#>
|
||||
function Get-PyVenvConfig(
|
||||
[String]
|
||||
$ConfigDir
|
||||
) {
|
||||
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||
|
||||
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||
|
||||
# An empty map will be returned if no config file is found.
|
||||
$pyvenvConfig = @{ }
|
||||
|
||||
if ($pyvenvConfigPath) {
|
||||
|
||||
Write-Verbose "File exists, parse `key = value` lines"
|
||||
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||
|
||||
$pyvenvConfigContent | ForEach-Object {
|
||||
$keyval = $PSItem -split "\s*=\s*", 2
|
||||
if ($keyval[0] -and $keyval[1]) {
|
||||
$val = $keyval[1]
|
||||
|
||||
# Remove extraneous quotations around a string value.
|
||||
if ("'""".Contains($val.Substring(0, 1))) {
|
||||
$val = $val.Substring(1, $val.Length - 2)
|
||||
}
|
||||
|
||||
$pyvenvConfig[$keyval[0]] = $val
|
||||
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||
}
|
||||
}
|
||||
}
|
||||
return $pyvenvConfig
|
||||
}
|
||||
|
||||
|
||||
<# Begin Activate script --------------------------------------------------- #>
|
||||
|
||||
# Determine the containing directory of this script
|
||||
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||
|
||||
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||
|
||||
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||
# First, get the location of the virtual environment, it might not be
|
||||
# VenvExecDir if specified on the command line.
|
||||
if ($VenvDir) {
|
||||
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||
Write-Verbose "VenvDir=$VenvDir"
|
||||
}
|
||||
|
||||
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||
# as `prompt`.
|
||||
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||
|
||||
# Next, set the prompt from the command line, or the config file, or
|
||||
# just use the name of the virtual environment folder.
|
||||
if ($Prompt) {
|
||||
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||
$Prompt = $pyvenvCfg['prompt'];
|
||||
}
|
||||
else {
|
||||
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)"
|
||||
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||
}
|
||||
}
|
||||
|
||||
Write-Verbose "Prompt = '$Prompt'"
|
||||
Write-Verbose "VenvDir='$VenvDir'"
|
||||
|
||||
# Deactivate any currently active virtual environment, but leave the
|
||||
# deactivate function in place.
|
||||
deactivate -nondestructive
|
||||
|
||||
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||
# that there is an activated venv.
|
||||
$env:VIRTUAL_ENV = $VenvDir
|
||||
|
||||
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
|
||||
Write-Verbose "Setting prompt to '$Prompt'"
|
||||
|
||||
# Set the prompt to include the env name
|
||||
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||
|
||||
function global:prompt {
|
||||
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||
_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
}
|
||||
|
||||
# Clear PYTHONHOME
|
||||
if (Test-Path -Path Env:PYTHONHOME) {
|
||||
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
Remove-Item -Path Env:PYTHONHOME
|
||||
}
|
||||
|
||||
# Add the venv to the PATH
|
||||
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
76
examples/celery_progress_demo/.venv/bin/activate
Normal file
76
examples/celery_progress_demo/.venv/bin/activate
Normal file
@ -0,0 +1,76 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# you cannot run it directly
|
||||
|
||||
deactivate () {
|
||||
# reset old environment variables
|
||||
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# This should detect bash and zsh, which have a hash command that must
|
||||
# be called to get it to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
||||
hash -r
|
||||
fi
|
||||
|
||||
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
VIRTUAL_ENV="/home/balsh/Techschool/Python-20/celery_progress_demo/.venv"
|
||||
export VIRTUAL_ENV
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||
if [ "x(.venv) " != x ] ; then
|
||||
PS1="(.venv) ${PS1:-}"
|
||||
else
|
||||
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
|
||||
# special case for Aspen magic directories
|
||||
# see https://aspen.io/
|
||||
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
|
||||
else
|
||||
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
|
||||
fi
|
||||
fi
|
||||
export PS1
|
||||
fi
|
||||
|
||||
# This should detect bash and zsh, which have a hash command that must
|
||||
# be called to get it to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
||||
hash -r
|
||||
fi
|
37
examples/celery_progress_demo/.venv/bin/activate.csh
Normal file
37
examples/celery_progress_demo/.venv/bin/activate.csh
Normal file
@ -0,0 +1,37 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV "/home/balsh/Techschool/Python-20/celery_progress_demo/.venv"
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH"
|
||||
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||
|
||||
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||
if (".venv" != "") then
|
||||
set env_name = ".venv"
|
||||
else
|
||||
if (`basename "VIRTUAL_ENV"` == "__") then
|
||||
# special case for Aspen magic directories
|
||||
# see https://aspen.io/
|
||||
set env_name = `basename \`dirname "$VIRTUAL_ENV"\``
|
||||
else
|
||||
set env_name = `basename "$VIRTUAL_ENV"`
|
||||
endif
|
||||
endif
|
||||
set prompt = "[$env_name] $prompt"
|
||||
unset env_name
|
||||
endif
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
75
examples/celery_progress_demo/.venv/bin/activate.fish
Normal file
75
examples/celery_progress_demo/.venv/bin/activate.fish
Normal file
@ -0,0 +1,75 @@
|
||||
# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org)
|
||||
# you cannot run it directly
|
||||
|
||||
function deactivate -d "Exit virtualenv and return to normal shell environment"
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
functions -e fish_prompt
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
if test "$argv[1]" != "nondestructive"
|
||||
# Self destruct!
|
||||
functions -e deactivate
|
||||
end
|
||||
end
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV "/home/balsh/Techschool/Python-20/celery_progress_demo/.venv"
|
||||
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# fish uses a function instead of an env var to generate the prompt.
|
||||
|
||||
# save the current fish_prompt function as the function _old_fish_prompt
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
# with the original prompt function renamed, we can override with our own.
|
||||
function fish_prompt
|
||||
# Save the return status of the last command
|
||||
set -l old_status $status
|
||||
|
||||
# Prompt override?
|
||||
if test -n "(.venv) "
|
||||
printf "%s%s" "(.venv) " (set_color normal)
|
||||
else
|
||||
# ...Otherwise, prepend env
|
||||
set -l _checkbase (basename "$VIRTUAL_ENV")
|
||||
if test $_checkbase = "__"
|
||||
# special case for Aspen magic directories
|
||||
# see https://aspen.io/
|
||||
printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal)
|
||||
else
|
||||
printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal)
|
||||
end
|
||||
end
|
||||
|
||||
# Restore the return status of the previous command.
|
||||
echo "exit $old_status" | .
|
||||
_old_fish_prompt
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
end
|
8
examples/celery_progress_demo/.venv/bin/celery
Executable file
8
examples/celery_progress_demo/.venv/bin/celery
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from celery.__main__ import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
examples/celery_progress_demo/.venv/bin/confusable_homoglyphs
Executable file
8
examples/celery_progress_demo/.venv/bin/confusable_homoglyphs
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from confusable_homoglyphs.cli import cli
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli())
|
8
examples/celery_progress_demo/.venv/bin/django-admin
Executable file
8
examples/celery_progress_demo/.venv/bin/django-admin
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from django.core.management import execute_from_command_line
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(execute_from_command_line())
|
21
examples/celery_progress_demo/.venv/bin/django-admin.py
Executable file
21
examples/celery_progress_demo/.venv/bin/django-admin.py
Executable file
@ -0,0 +1,21 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# When the django-admin.py deprecation ends, remove this script.
|
||||
import warnings
|
||||
|
||||
from django.core import management
|
||||
|
||||
try:
|
||||
from django.utils.deprecation import RemovedInDjango40Warning
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
'django-admin.py was deprecated in Django 3.1 and removed in Django '
|
||||
'4.0. Please manually remove this script from your virtual environment '
|
||||
'and use django-admin instead.'
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
warnings.warn(
|
||||
'django-admin.py is deprecated in favor of django-admin.',
|
||||
RemovedInDjango40Warning,
|
||||
)
|
||||
management.execute_from_command_line()
|
8
examples/celery_progress_demo/.venv/bin/easy_install
Executable file
8
examples/celery_progress_demo/.venv/bin/easy_install
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from setuptools.command.easy_install import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
examples/celery_progress_demo/.venv/bin/easy_install-3.8
Executable file
8
examples/celery_progress_demo/.venv/bin/easy_install-3.8
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from setuptools.command.easy_install import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
examples/celery_progress_demo/.venv/bin/gunicorn
Executable file
8
examples/celery_progress_demo/.venv/bin/gunicorn
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from gunicorn.app.wsgiapp import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
8
examples/celery_progress_demo/.venv/bin/pip
Executable file
8
examples/celery_progress_demo/.venv/bin/pip
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
examples/celery_progress_demo/.venv/bin/pip3
Executable file
8
examples/celery_progress_demo/.venv/bin/pip3
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
8
examples/celery_progress_demo/.venv/bin/pip3.8
Executable file
8
examples/celery_progress_demo/.venv/bin/pip3.8
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
1
examples/celery_progress_demo/.venv/bin/python
Symbolic link
1
examples/celery_progress_demo/.venv/bin/python
Symbolic link
@ -0,0 +1 @@
|
||||
python3
|
1
examples/celery_progress_demo/.venv/bin/python3
Symbolic link
1
examples/celery_progress_demo/.venv/bin/python3
Symbolic link
@ -0,0 +1 @@
|
||||
/usr/local/bin/python3
|
8
examples/celery_progress_demo/.venv/bin/sqlformat
Executable file
8
examples/celery_progress_demo/.venv/bin/sqlformat
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/balsh/Techschool/Python-20/celery_progress_demo/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from sqlparse.__main__ import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1 @@
|
||||
pip
|
@ -0,0 +1,27 @@
|
||||
Copyright (c) Django Software Foundation and individual contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of Django nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,265 @@
|
||||
Django is licensed under the three-clause BSD license; see the file
|
||||
LICENSE for details.
|
||||
|
||||
Django includes code from the Python standard library, which is licensed under
|
||||
the Python license, a permissive open source license. The copyright and license
|
||||
is included below for compliance with Python's terms.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2001-present Python Software Foundation; All Rights Reserved
|
||||
|
||||
A. HISTORY OF THE SOFTWARE
|
||||
==========================
|
||||
|
||||
Python was created in the early 1990s by Guido van Rossum at Stichting
|
||||
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
|
||||
as a successor of a language called ABC. Guido remains Python's
|
||||
principal author, although it includes many contributions from others.
|
||||
|
||||
In 1995, Guido continued his work on Python at the Corporation for
|
||||
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
|
||||
in Reston, Virginia where he released several versions of the
|
||||
software.
|
||||
|
||||
In May 2000, Guido and the Python core development team moved to
|
||||
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
|
||||
year, the PythonLabs team moved to Digital Creations, which became
|
||||
Zope Corporation. In 2001, the Python Software Foundation (PSF, see
|
||||
https://www.python.org/psf/) was formed, a non-profit organization
|
||||
created specifically to own Python-related Intellectual Property.
|
||||
Zope Corporation was a sponsoring member of the PSF.
|
||||
|
||||
All Python releases are Open Source (see http://www.opensource.org for
|
||||
the Open Source Definition). Historically, most, but not all, Python
|
||||
releases have also been GPL-compatible; the table below summarizes
|
||||
the various releases.
|
||||
|
||||
Release Derived Year Owner GPL-
|
||||
from compatible? (1)
|
||||
|
||||
0.9.0 thru 1.2 1991-1995 CWI yes
|
||||
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
|
||||
1.6 1.5.2 2000 CNRI no
|
||||
2.0 1.6 2000 BeOpen.com no
|
||||
1.6.1 1.6 2001 CNRI yes (2)
|
||||
2.1 2.0+1.6.1 2001 PSF no
|
||||
2.0.1 2.0+1.6.1 2001 PSF yes
|
||||
2.1.1 2.1+2.0.1 2001 PSF yes
|
||||
2.1.2 2.1.1 2002 PSF yes
|
||||
2.1.3 2.1.2 2002 PSF yes
|
||||
2.2 and above 2.1.1 2001-now PSF yes
|
||||
|
||||
Footnotes:
|
||||
|
||||
(1) GPL-compatible doesn't mean that we're distributing Python under
|
||||
the GPL. All Python licenses, unlike the GPL, let you distribute
|
||||
a modified version without making your changes open source. The
|
||||
GPL-compatible licenses make it possible to combine Python with
|
||||
other software that is released under the GPL; the others don't.
|
||||
|
||||
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
|
||||
because its license has a choice of law clause. According to
|
||||
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
|
||||
is "not incompatible" with the GPL.
|
||||
|
||||
Thanks to the many outside volunteers who have worked under Guido's
|
||||
direction to make these releases possible.
|
||||
|
||||
|
||||
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
|
||||
===============================================================
|
||||
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
|
||||
-------------------------------------------
|
||||
|
||||
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
|
||||
|
||||
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
|
||||
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
|
||||
Individual or Organization ("Licensee") accessing and otherwise using
|
||||
this software in source or binary form and its associated
|
||||
documentation ("the Software").
|
||||
|
||||
2. Subject to the terms and conditions of this BeOpen Python License
|
||||
Agreement, BeOpen hereby grants Licensee a non-exclusive,
|
||||
royalty-free, world-wide license to reproduce, analyze, test, perform
|
||||
and/or display publicly, prepare derivative works, distribute, and
|
||||
otherwise use the Software alone or in any derivative version,
|
||||
provided, however, that the BeOpen Python License is retained in the
|
||||
Software, alone or in any derivative version prepared by Licensee.
|
||||
|
||||
3. BeOpen is making the Software available to Licensee on an "AS IS"
|
||||
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
|
||||
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
|
||||
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
|
||||
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
5. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
6. This License Agreement shall be governed by and interpreted in all
|
||||
respects by the law of the State of California, excluding conflict of
|
||||
law provisions. Nothing in this License Agreement shall be deemed to
|
||||
create any relationship of agency, partnership, or joint venture
|
||||
between BeOpen and Licensee. This License Agreement does not grant
|
||||
permission to use BeOpen trademarks or trade names in a trademark
|
||||
sense to endorse or promote products or services of Licensee, or any
|
||||
third party. As an exception, the "BeOpen Python" logos available at
|
||||
http://www.pythonlabs.com/logos.html may be used according to the
|
||||
permissions granted on that web page.
|
||||
|
||||
7. By copying, installing or otherwise using the software, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
|
||||
---------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Corporation for National
|
||||
Research Initiatives, having an office at 1895 Preston White Drive,
|
||||
Reston, VA 20191 ("CNRI"), and the Individual or Organization
|
||||
("Licensee") accessing and otherwise using Python 1.6.1 software in
|
||||
source or binary form and its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, CNRI
|
||||
hereby grants Licensee a nonexclusive, royalty-free, world-wide
|
||||
license to reproduce, analyze, test, perform and/or display publicly,
|
||||
prepare derivative works, distribute, and otherwise use Python 1.6.1
|
||||
alone or in any derivative version, provided, however, that CNRI's
|
||||
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
|
||||
1995-2001 Corporation for National Research Initiatives; All Rights
|
||||
Reserved" are retained in Python 1.6.1 alone or in any derivative
|
||||
version prepared by Licensee. Alternately, in lieu of CNRI's License
|
||||
Agreement, Licensee may substitute the following text (omitting the
|
||||
quotes): "Python 1.6.1 is made available subject to the terms and
|
||||
conditions in CNRI's License Agreement. This Agreement together with
|
||||
Python 1.6.1 may be located on the Internet using the following
|
||||
unique, persistent identifier (known as a handle): 1895.22/1013. This
|
||||
Agreement may also be obtained from a proxy server on the Internet
|
||||
using the following URL: http://hdl.handle.net/1895.22/1013".
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python 1.6.1 or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python 1.6.1.
|
||||
|
||||
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
|
||||
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. This License Agreement shall be governed by the federal
|
||||
intellectual property law of the United States, including without
|
||||
limitation the federal copyright law, and, to the extent such
|
||||
U.S. federal law does not apply, by the law of the Commonwealth of
|
||||
Virginia, excluding Virginia's conflict of law provisions.
|
||||
Notwithstanding the foregoing, with regard to derivative works based
|
||||
on Python 1.6.1 that incorporate non-separable material that was
|
||||
previously distributed under the GNU General Public License (GPL), the
|
||||
law of the Commonwealth of Virginia shall govern this License
|
||||
Agreement only as to issues arising under or with respect to
|
||||
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
|
||||
License Agreement shall be deemed to create any relationship of
|
||||
agency, partnership, or joint venture between CNRI and Licensee. This
|
||||
License Agreement does not grant permission to use CNRI trademarks or
|
||||
trade name in a trademark sense to endorse or promote products or
|
||||
services of Licensee, or any third party.
|
||||
|
||||
8. By clicking on the "ACCEPT" button where indicated, or by copying,
|
||||
installing or otherwise using Python 1.6.1, Licensee agrees to be
|
||||
bound by the terms and conditions of this License Agreement.
|
||||
|
||||
ACCEPT
|
||||
|
||||
|
||||
CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
|
||||
--------------------------------------------------
|
||||
|
||||
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
|
||||
The Netherlands. All rights reserved.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software and its
|
||||
documentation for any purpose and without fee is hereby granted,
|
||||
provided that the above copyright notice appear in all copies and that
|
||||
both that copyright notice and this permission notice appear in
|
||||
supporting documentation, and that the name of Stichting Mathematisch
|
||||
Centrum or CWI not be used in advertising or publicity pertaining to
|
||||
distribution of the software without specific, written prior
|
||||
permission.
|
||||
|
||||
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
|
||||
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
|
||||
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
@ -0,0 +1,97 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: Django
|
||||
Version: 3.2.5
|
||||
Summary: A high-level Python Web framework that encourages rapid development and clean, pragmatic design.
|
||||
Home-page: https://www.djangoproject.com/
|
||||
Author: Django Software Foundation
|
||||
Author-email: foundation@djangoproject.com
|
||||
License: BSD-3-Clause
|
||||
Project-URL: Documentation, https://docs.djangoproject.com/
|
||||
Project-URL: Release notes, https://docs.djangoproject.com/en/stable/releases/
|
||||
Project-URL: Funding, https://www.djangoproject.com/fundraising/
|
||||
Project-URL: Source, https://github.com/django/django
|
||||
Project-URL: Tracker, https://code.djangoproject.com/
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Framework :: Django
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Topic :: Internet :: WWW/HTTP
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
|
||||
Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Requires-Python: >=3.6
|
||||
Requires-Dist: asgiref (<4,>=3.3.2)
|
||||
Requires-Dist: pytz
|
||||
Requires-Dist: sqlparse (>=0.2.2)
|
||||
Provides-Extra: argon2
|
||||
Requires-Dist: argon2-cffi (>=19.1.0) ; extra == 'argon2'
|
||||
Provides-Extra: bcrypt
|
||||
Requires-Dist: bcrypt ; extra == 'bcrypt'
|
||||
|
||||
======
|
||||
Django
|
||||
======
|
||||
|
||||
Django is a high-level Python Web framework that encourages rapid development
|
||||
and clean, pragmatic design. Thanks for checking it out.
|
||||
|
||||
All documentation is in the "``docs``" directory and online at
|
||||
https://docs.djangoproject.com/en/stable/. If you're just getting started,
|
||||
here's how we recommend you read the docs:
|
||||
|
||||
* First, read ``docs/intro/install.txt`` for instructions on installing Django.
|
||||
|
||||
* Next, work through the tutorials in order (``docs/intro/tutorial01.txt``,
|
||||
``docs/intro/tutorial02.txt``, etc.).
|
||||
|
||||
* If you want to set up an actual deployment server, read
|
||||
``docs/howto/deployment/index.txt`` for instructions.
|
||||
|
||||
* You'll probably want to read through the topical guides (in ``docs/topics``)
|
||||
next; from there you can jump to the HOWTOs (in ``docs/howto``) for specific
|
||||
problems, and check out the reference (``docs/ref``) for gory details.
|
||||
|
||||
* See ``docs/README`` for instructions on building an HTML version of the docs.
|
||||
|
||||
Docs are updated rigorously. If you find any problems in the docs, or think
|
||||
they should be clarified in any way, please take 30 seconds to fill out a
|
||||
ticket here: https://code.djangoproject.com/newticket
|
||||
|
||||
To get more help:
|
||||
|
||||
* Join the ``#django`` channel on ``irc.libera.chat``. Lots of helpful people
|
||||
hang out there. See https://web.libera.chat if you're new to IRC.
|
||||
|
||||
* Join the django-users mailing list, or read the archives, at
|
||||
https://groups.google.com/group/django-users.
|
||||
|
||||
To contribute to Django:
|
||||
|
||||
* Check out https://docs.djangoproject.com/en/dev/internals/contributing/ for
|
||||
information about getting involved.
|
||||
|
||||
To run Django's test suite:
|
||||
|
||||
* Follow the instructions in the "Unit tests" section of
|
||||
``docs/internals/contributing/writing-code/unit-tests.txt``, published online at
|
||||
https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/unit-tests/#running-the-unit-tests
|
||||
|
||||
Supporting the Development of Django
|
||||
====================================
|
||||
|
||||
Django's development depends on your contributions.
|
||||
|
||||
If you depend on Django, remember to support the Django Software Foundation: https://www.djangoproject.com/fundraising/
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.34.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@ -0,0 +1,3 @@
|
||||
[console_scripts]
|
||||
django-admin = django.core.management:execute_from_command_line
|
||||
|
@ -0,0 +1 @@
|
||||
django
|
Binary file not shown.
@ -0,0 +1 @@
|
||||
pip
|
@ -0,0 +1,47 @@
|
||||
Copyright (c) 2015-2016 Ask Solem & contributors. All rights reserved.
|
||||
Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
|
||||
Copyright (c) 2009, 2010, 2011, 2012 Ask Solem, and individual contributors. All rights reserved.
|
||||
Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>. All rights reserved.
|
||||
|
||||
py-amqp is licensed under The BSD License (3 Clause, also known as
|
||||
the new BSD license). The license is an OSI approved Open Source
|
||||
license and is GPL-compatible(1).
|
||||
|
||||
The license text can also be found here:
|
||||
http://www.opensource.org/licenses/BSD-3-Clause
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of Ask Solem, nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS
|
||||
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
Footnotes
|
||||
=========
|
||||
(1) A GPL-compatible license makes it possible to
|
||||
combine Celery with other software that is released
|
||||
under the GPL, it does not mean that we're distributing
|
||||
Celery under the GPL license. The BSD license, unlike the GPL,
|
||||
let you distribute a modified version without making your
|
||||
changes open source.
|
@ -0,0 +1,239 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: amqp
|
||||
Version: 5.0.6
|
||||
Summary: Low-level AMQP client for Python (fork of amqplib).
|
||||
Home-page: http://github.com/celery/py-amqp
|
||||
Author: Barry Pederson
|
||||
Author-email: pyamqp@celeryproject.org
|
||||
Maintainer: Asif Saif Uddin, Matus Valo
|
||||
License: BSD
|
||||
Keywords: amqp rabbitmq cloudamqp messaging
|
||||
Platform: any
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Operating System :: OS Independent
|
||||
Requires-Python: >=3.6
|
||||
Description-Content-Type: text/x-rst
|
||||
Requires-Dist: vine (==5.0.0)
|
||||
|
||||
=====================================================================
|
||||
Python AMQP 0.9.1 client library
|
||||
=====================================================================
|
||||
|
||||
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp|
|
||||
|
||||
:Version: 5.0.6
|
||||
:Web: https://amqp.readthedocs.io/
|
||||
:Download: https://pypi.org/project/amqp/
|
||||
:Source: http://github.com/celery/py-amqp/
|
||||
:Keywords: amqp, rabbitmq
|
||||
|
||||
About
|
||||
=====
|
||||
|
||||
This is a fork of amqplib_ which was originally written by Barry Pederson.
|
||||
It is maintained by the Celery_ project, and used by `kombu`_ as a pure python
|
||||
alternative when `librabbitmq`_ is not available.
|
||||
|
||||
This library should be API compatible with `librabbitmq`_.
|
||||
|
||||
.. _amqplib: https://pypi.org/project/amqplib/
|
||||
.. _Celery: http://celeryproject.org/
|
||||
.. _kombu: https://kombu.readthedocs.io/
|
||||
.. _librabbitmq: https://pypi.org/project/librabbitmq/
|
||||
|
||||
Differences from `amqplib`_
|
||||
===========================
|
||||
|
||||
- Supports draining events from multiple channels (``Connection.drain_events``)
|
||||
- Support for timeouts
|
||||
- Channels are restored after channel error, instead of having to close the
|
||||
connection.
|
||||
- Support for heartbeats
|
||||
|
||||
- ``Connection.heartbeat_tick(rate=2)`` must called at regular intervals
|
||||
(half of the heartbeat value if rate is 2).
|
||||
- Or some other scheme by using ``Connection.send_heartbeat``.
|
||||
- Supports RabbitMQ extensions:
|
||||
- Consumer Cancel Notifications
|
||||
- by default a cancel results in ``ChannelError`` being raised
|
||||
- but not if a ``on_cancel`` callback is passed to ``basic_consume``.
|
||||
- Publisher confirms
|
||||
- ``Channel.confirm_select()`` enables publisher confirms.
|
||||
- ``Channel.events['basic_ack'].append(my_callback)`` adds a callback
|
||||
to be called when a message is confirmed. This callback is then
|
||||
called with the signature ``(delivery_tag, multiple)``.
|
||||
- Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``.
|
||||
- ``Channel.confirm_select()`` enables publisher confirms.
|
||||
- ``Channel.events['basic_ack'].append(my_callback)`` adds a callback
|
||||
to be called when a message is confirmed. This callback is then
|
||||
called with the signature ``(delivery_tag, multiple)``.
|
||||
- Authentication Failure Notifications
|
||||
Instead of just closing the connection abruptly on invalid
|
||||
credentials, py-amqp will raise an ``AccessRefused`` error
|
||||
when connected to rabbitmq-server 3.2.0 or greater.
|
||||
- Support for ``basic_return``
|
||||
- Uses AMQP 0-9-1 instead of 0-8.
|
||||
- ``Channel.access_request`` and ``ticket`` arguments to methods
|
||||
**removed**.
|
||||
- Supports the ``arguments`` argument to ``basic_consume``.
|
||||
- ``internal`` argument to ``exchange_declare`` removed.
|
||||
- ``auto_delete`` argument to ``exchange_declare`` deprecated
|
||||
- ``insist`` argument to ``Connection`` removed.
|
||||
- ``Channel.alerts`` has been removed.
|
||||
- Support for ``Channel.basic_recover_async``.
|
||||
- ``Channel.basic_recover`` deprecated.
|
||||
- Exceptions renamed to have idiomatic names:
|
||||
- ``AMQPException`` -> ``AMQPError``
|
||||
- ``AMQPConnectionException`` -> ConnectionError``
|
||||
- ``AMQPChannelException`` -> ChannelError``
|
||||
- ``Connection.known_hosts`` removed.
|
||||
- ``Connection`` no longer supports redirects.
|
||||
- ``exchange`` argument to ``queue_bind`` can now be empty
|
||||
to use the "default exchange".
|
||||
- Adds ``Connection.is_alive`` that tries to detect
|
||||
whether the connection can still be used.
|
||||
- Adds ``Connection.connection_errors`` and ``.channel_errors``,
|
||||
a list of recoverable errors.
|
||||
- Exposes the underlying socket as ``Connection.sock``.
|
||||
- Adds ``Channel.no_ack_consumers`` to keep track of consumer tags
|
||||
that set the no_ack flag.
|
||||
- Slightly better at error recovery
|
||||
|
||||
Quick overview
|
||||
==============
|
||||
|
||||
Simple producer publishing messages to ``test`` queue using default exchange:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import amqp
|
||||
|
||||
with amqp.Connection('broker.example.com') as c:
|
||||
ch = c.channel()
|
||||
ch.basic_publish(amqp.Message('Hello World'), routing_key='test')
|
||||
|
||||
Producer publishing to ``test_exchange`` exchange with publisher confirms enabled and using virtual_host ``test_vhost``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import amqp
|
||||
|
||||
with amqp.Connection(
|
||||
'broker.example.com', exchange='test_exchange',
|
||||
confirm_publish=True, virtual_host='test_vhost'
|
||||
) as c:
|
||||
ch = c.channel()
|
||||
ch.basic_publish(amqp.Message('Hello World'), routing_key='test')
|
||||
|
||||
Consumer with acknowledgments enabled:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import amqp
|
||||
|
||||
with amqp.Connection('broker.example.com') as c:
|
||||
ch = c.channel()
|
||||
def on_message(message):
|
||||
print('Received message (delivery tag: {}): {}'.format(message.delivery_tag, message.body))
|
||||
ch.basic_ack(message.delivery_tag)
|
||||
ch.basic_consume(queue='test', callback=on_message)
|
||||
while True:
|
||||
c.drain_events()
|
||||
|
||||
|
||||
Consumer with acknowledgments disabled:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import amqp
|
||||
|
||||
with amqp.Connection('broker.example.com') as c:
|
||||
ch = c.channel()
|
||||
def on_message(message):
|
||||
print('Received message (delivery tag: {}): {}'.format(message.delivery_tag, message.body))
|
||||
ch.basic_consume(queue='test', callback=on_message, no_ack=True)
|
||||
while True:
|
||||
c.drain_events()
|
||||
|
||||
Speedups
|
||||
========
|
||||
|
||||
This library has **experimental** support of speedups. Speedups are implemented using Cython. To enable speedups, ``CELERY_ENABLE_SPEEDUPS`` environment variable must be set during building/installation.
|
||||
Currently speedups can be installed:
|
||||
|
||||
1. using source package (using ``--no-binary`` switch):
|
||||
|
||||
.. code:: shell
|
||||
|
||||
CELERY_ENABLE_SPEEDUPS=true pip install --no-binary :all: amqp
|
||||
|
||||
|
||||
2. building directly source code:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
CELERY_ENABLE_SPEEDUPS=true python setup.py install
|
||||
|
||||
Further
|
||||
=======
|
||||
|
||||
- Differences between AMQP 0.8 and 0.9.1
|
||||
|
||||
http://www.rabbitmq.com/amqp-0-8-to-0-9-1.html
|
||||
|
||||
- AMQP 0.9.1 Quick Reference
|
||||
|
||||
http://www.rabbitmq.com/amqp-0-9-1-quickref.html
|
||||
|
||||
- RabbitMQ Extensions
|
||||
|
||||
http://www.rabbitmq.com/extensions.html
|
||||
|
||||
- For more information about AMQP, visit
|
||||
|
||||
http://www.amqp.org
|
||||
|
||||
- For other Python client libraries see:
|
||||
|
||||
http://www.rabbitmq.com/devtools.html#python-dev
|
||||
|
||||
.. |build-status| image:: https://api.travis-ci.com/celery/py-amqp.png?branch=master
|
||||
:alt: Build status
|
||||
:target: https://travis-ci.com/celery/py-amqp
|
||||
|
||||
.. |coverage| image:: https://codecov.io/github/celery/py-amqp/coverage.svg?branch=master
|
||||
:target: https://codecov.io/github/celery/py-amqp?branch=master
|
||||
|
||||
.. |license| image:: https://img.shields.io/pypi/l/amqp.svg
|
||||
:alt: BSD License
|
||||
:target: https://opensource.org/licenses/BSD-3-Clause
|
||||
|
||||
.. |wheel| image:: https://img.shields.io/pypi/wheel/amqp.svg
|
||||
:alt: Python AMQP can be installed via wheel
|
||||
:target: https://pypi.org/project/amqp/
|
||||
|
||||
.. |pyversion| image:: https://img.shields.io/pypi/pyversions/amqp.svg
|
||||
:alt: Supported Python versions.
|
||||
:target: https://pypi.org/project/amqp/
|
||||
|
||||
.. |pyimp| image:: https://img.shields.io/pypi/implementation/amqp.svg
|
||||
:alt: Support Python implementations.
|
||||
:target: https://pypi.org/project/amqp/
|
||||
|
||||
py-amqp as part of the Tidelift Subscription
|
||||
============================================
|
||||
|
||||
The maintainers of py-amqp and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. [Learn more.](https://tidelift.com/subscription/pkg/pypi-amqp?utm_source=pypi-amqp&utm_medium=referral&utm_campaign=readme&utm_term=repo)
|
||||
|
||||
|
||||
|
@ -0,0 +1,39 @@
|
||||
amqp-5.0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
amqp-5.0.6.dist-info/LICENSE,sha256=9e9fEoLq4ZMcdGRfhxm2xps9aizyd7_aJJqCcM1HOvM,2372
|
||||
amqp-5.0.6.dist-info/METADATA,sha256=uZooClF-L109Esb-3gjeqt-SR69fRJaVaiGP_H610QQ,8790
|
||||
amqp-5.0.6.dist-info/RECORD,,
|
||||
amqp-5.0.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
amqp-5.0.6.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
|
||||
amqp-5.0.6.dist-info/top_level.txt,sha256=tWQNmFVhU4UtDgB6Yy2lKqRz7LtOrRcN8_bPFVcVVR8,5
|
||||
amqp/__init__.py,sha256=bkKeN2xNBGYhX34ibhVrUMl1BEWMylpF-RfvuhBAUnY,2365
|
||||
amqp/__pycache__/__init__.cpython-38.pyc,,
|
||||
amqp/__pycache__/abstract_channel.cpython-38.pyc,,
|
||||
amqp/__pycache__/basic_message.cpython-38.pyc,,
|
||||
amqp/__pycache__/channel.cpython-38.pyc,,
|
||||
amqp/__pycache__/connection.cpython-38.pyc,,
|
||||
amqp/__pycache__/exceptions.cpython-38.pyc,,
|
||||
amqp/__pycache__/five.cpython-38.pyc,,
|
||||
amqp/__pycache__/method_framing.cpython-38.pyc,,
|
||||
amqp/__pycache__/platform.cpython-38.pyc,,
|
||||
amqp/__pycache__/protocol.cpython-38.pyc,,
|
||||
amqp/__pycache__/sasl.cpython-38.pyc,,
|
||||
amqp/__pycache__/serialization.cpython-38.pyc,,
|
||||
amqp/__pycache__/spec.cpython-38.pyc,,
|
||||
amqp/__pycache__/transport.cpython-38.pyc,,
|
||||
amqp/__pycache__/types.cpython-38.pyc,,
|
||||
amqp/__pycache__/utils.cpython-38.pyc,,
|
||||
amqp/abstract_channel.py,sha256=qJ-btd8_37svD9xwTSAfC0nfGBmlHFIfOslDtrKoko4,4659
|
||||
amqp/basic_message.py,sha256=A-84F6w9dtbujtzdkXnm3tATkZyXH14GkusJ2gNcsTM,3268
|
||||
amqp/channel.py,sha256=KqbqEi7yj8zK4eBkc95NlEi5lSR_4bqO4G95KRsnOBQ,74115
|
||||
amqp/connection.py,sha256=FTX8elrF0T0fR6pWdBJ2MJUNg3MVuJKZZ7zoQ5qH_zQ,27249
|
||||
amqp/exceptions.py,sha256=yqjoFIRue2rvK7kMdvkKsGOD6dMOzzzT3ZzBwoGWAe4,7166
|
||||
amqp/five.py,sha256=N2j8UuGkMEjFmBM3dbEpN2HnDeka6K_Gq29Fsz2VNcM,178
|
||||
amqp/method_framing.py,sha256=mkM0k9Vv7r9X5VBOjs1M_lrBU6egV1LTwp0FUAeFk4k,6516
|
||||
amqp/platform.py,sha256=cyLevv6E15P9zhMo_fV84p67Q_A8fdsTq9amjvlUwqE,2379
|
||||
amqp/protocol.py,sha256=Di3y6qqhnOV4QtkeYKO-zryfWqwl3F1zUxDOmVSsAp0,291
|
||||
amqp/sasl.py,sha256=yBcLmrDYQniL_fAXCT1mXD6bTYcW3vUm3izIs0XKj2c,5693
|
||||
amqp/serialization.py,sha256=-5QLtS7kIACWx8Y26AK-iSHq0bDME0dd0KP018-D8Ww,16878
|
||||
amqp/spec.py,sha256=2ZjbL4FR4Fv67HA7HUI9hLUIvAv3A4ZH6GRPzrMRyWg,2121
|
||||
amqp/transport.py,sha256=4UxpYEmflIGZW-_aL05AWcUy7Zkr3K4SLSd2nCulk2Q,24048
|
||||
amqp/types.py,sha256=Zcpcl56TTiv-IAdXJsiF3a_3CDW1MvHP5QgImH5EgJ8,14042
|
||||
amqp/utils.py,sha256=JjjY040LwsDUc1zmKP2VTzXBioVXy48DUZtWB8PaPy0,1456
|
@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.34.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@ -0,0 +1 @@
|
||||
amqp
|
@ -0,0 +1,75 @@
|
||||
"""Low-level AMQP client for Python (fork of amqplib)."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
__version__ = '5.0.6'
|
||||
__author__ = 'Barry Pederson'
|
||||
__maintainer__ = 'Asif Saif Uddin, Matus Valo'
|
||||
__contact__ = 'pyamqp@celeryproject.org'
|
||||
__homepage__ = 'http://github.com/celery/py-amqp'
|
||||
__docformat__ = 'restructuredtext'
|
||||
|
||||
# -eof meta-
|
||||
|
||||
version_info_t = namedtuple('version_info_t', (
|
||||
'major', 'minor', 'micro', 'releaselevel', 'serial',
|
||||
))
|
||||
|
||||
# bumpversion can only search for {current_version}
|
||||
# so we have to parse the version here.
|
||||
_temp = re.match(
|
||||
r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
|
||||
VERSION = version_info = version_info_t(
|
||||
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
|
||||
del(_temp)
|
||||
del(re)
|
||||
|
||||
from .basic_message import Message # noqa
|
||||
from .channel import Channel # noqa
|
||||
from .connection import Connection # noqa
|
||||
from .exceptions import (AccessRefused, AMQPError, # noqa
|
||||
AMQPNotImplementedError, ChannelError, ChannelNotOpen,
|
||||
ConnectionError, ConnectionForced, ConsumerCancelled,
|
||||
ContentTooLarge, FrameError, FrameSyntaxError,
|
||||
InternalError, InvalidCommand, InvalidPath,
|
||||
IrrecoverableChannelError,
|
||||
IrrecoverableConnectionError, NoConsumers, NotAllowed,
|
||||
NotFound, PreconditionFailed, RecoverableChannelError,
|
||||
RecoverableConnectionError, ResourceError,
|
||||
ResourceLocked, UnexpectedFrame, error_for_code)
|
||||
from .utils import promise # noqa
|
||||
|
||||
__all__ = (
|
||||
'Connection',
|
||||
'Channel',
|
||||
'Message',
|
||||
'promise',
|
||||
'AMQPError',
|
||||
'ConnectionError',
|
||||
'RecoverableConnectionError',
|
||||
'IrrecoverableConnectionError',
|
||||
'ChannelError',
|
||||
'RecoverableChannelError',
|
||||
'IrrecoverableChannelError',
|
||||
'ConsumerCancelled',
|
||||
'ContentTooLarge',
|
||||
'NoConsumers',
|
||||
'ConnectionForced',
|
||||
'InvalidPath',
|
||||
'AccessRefused',
|
||||
'NotFound',
|
||||
'ResourceLocked',
|
||||
'PreconditionFailed',
|
||||
'FrameError',
|
||||
'FrameSyntaxError',
|
||||
'InvalidCommand',
|
||||
'ChannelNotOpen',
|
||||
'UnexpectedFrame',
|
||||
'ResourceError',
|
||||
'NotAllowed',
|
||||
'AMQPNotImplementedError',
|
||||
'InternalError',
|
||||
'error_for_code',
|
||||
)
|
@ -0,0 +1,150 @@
|
||||
"""Code common to Connection and Channel objects."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>)
|
||||
|
||||
import logging
|
||||
|
||||
from vine import ensure_promise, promise
|
||||
|
||||
from .exceptions import AMQPNotImplementedError, RecoverableConnectionError
|
||||
from .serialization import dumps, loads
|
||||
|
||||
__all__ = ('AbstractChannel',)
|
||||
|
||||
AMQP_LOGGER = logging.getLogger('amqp')
|
||||
|
||||
IGNORED_METHOD_DURING_CHANNEL_CLOSE = """\
|
||||
Received method %s during closing channel %s. This method will be ignored\
|
||||
"""
|
||||
|
||||
|
||||
class AbstractChannel:
|
||||
"""Superclass for Connection and Channel.
|
||||
|
||||
The connection is treated as channel 0, then comes
|
||||
user-created channel objects.
|
||||
|
||||
The subclasses must have a _METHOD_MAP class property, mapping
|
||||
between AMQP method signatures and Python methods.
|
||||
"""
|
||||
|
||||
def __init__(self, connection, channel_id):
|
||||
self.is_closing = False
|
||||
self.connection = connection
|
||||
self.channel_id = channel_id
|
||||
connection.channels[channel_id] = self
|
||||
self.method_queue = [] # Higher level queue for methods
|
||||
self.auto_decode = False
|
||||
self._pending = {}
|
||||
self._callbacks = {}
|
||||
|
||||
self._setup_listeners()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
def send_method(self, sig,
|
||||
format=None, args=None, content=None,
|
||||
wait=None, callback=None, returns_tuple=False):
|
||||
p = promise()
|
||||
conn = self.connection
|
||||
if conn is None:
|
||||
raise RecoverableConnectionError('connection already closed')
|
||||
args = dumps(format, args) if format else ''
|
||||
try:
|
||||
conn.frame_writer(1, self.channel_id, sig, args, content)
|
||||
except StopIteration:
|
||||
raise RecoverableConnectionError('connection already closed')
|
||||
|
||||
# TODO temp: callback should be after write_method ... ;)
|
||||
if callback:
|
||||
p.then(callback)
|
||||
p()
|
||||
if wait:
|
||||
return self.wait(wait, returns_tuple=returns_tuple)
|
||||
return p
|
||||
|
||||
def close(self):
|
||||
"""Close this Channel or Connection."""
|
||||
raise NotImplementedError('Must be overriden in subclass')
|
||||
|
||||
def wait(self, method, callback=None, timeout=None, returns_tuple=False):
|
||||
p = ensure_promise(callback)
|
||||
pending = self._pending
|
||||
prev_p = []
|
||||
if not isinstance(method, list):
|
||||
method = [method]
|
||||
|
||||
for m in method:
|
||||
prev_p.append(pending.get(m))
|
||||
pending[m] = p
|
||||
|
||||
try:
|
||||
while not p.ready:
|
||||
self.connection.drain_events(timeout=timeout)
|
||||
|
||||
if p.value:
|
||||
args, kwargs = p.value
|
||||
args = args[1:] # We are not returning method back
|
||||
return args if returns_tuple else (args and args[0])
|
||||
finally:
|
||||
for i, m in enumerate(method):
|
||||
if prev_p[i] is not None:
|
||||
pending[m] = prev_p[i]
|
||||
else:
|
||||
pending.pop(m, None)
|
||||
|
||||
def dispatch_method(self, method_sig, payload, content):
|
||||
if self.is_closing and method_sig not in (
|
||||
self._ALLOWED_METHODS_WHEN_CLOSING
|
||||
):
|
||||
# When channel.close() was called we must ignore all methods except
|
||||
# Channel.close and Channel.CloseOk
|
||||
AMQP_LOGGER.warning(
|
||||
IGNORED_METHOD_DURING_CHANNEL_CLOSE,
|
||||
method_sig, self.channel_id
|
||||
)
|
||||
return
|
||||
|
||||
if content and \
|
||||
self.auto_decode and \
|
||||
hasattr(content, 'content_encoding'):
|
||||
try:
|
||||
content.body = content.body.decode(content.content_encoding)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
amqp_method = self._METHODS[method_sig]
|
||||
except KeyError:
|
||||
raise AMQPNotImplementedError(
|
||||
f'Unknown AMQP method {method_sig!r}')
|
||||
|
||||
try:
|
||||
listeners = [self._callbacks[method_sig]]
|
||||
except KeyError:
|
||||
listeners = []
|
||||
one_shot = None
|
||||
try:
|
||||
one_shot = self._pending.pop(method_sig)
|
||||
except KeyError:
|
||||
if not listeners:
|
||||
return
|
||||
|
||||
args = []
|
||||
if amqp_method.args:
|
||||
args, _ = loads(amqp_method.args, payload, 4)
|
||||
if amqp_method.content:
|
||||
args.append(content)
|
||||
|
||||
for listener in listeners:
|
||||
listener(*args)
|
||||
|
||||
if one_shot:
|
||||
one_shot(method_sig, *args)
|
||||
|
||||
#: Placeholder, the concrete implementations will have to
|
||||
#: supply their own versions of _METHOD_MAP
|
||||
_METHODS = {}
|
@ -0,0 +1,116 @@
|
||||
"""AMQP Messages."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
from .serialization import GenericContent
|
||||
# Intended to fix #85: ImportError: cannot import name spec
|
||||
# Encountered on python 2.7.3
|
||||
# "The submodules often need to refer to each other. For example, the
|
||||
# surround [sic] module might use the echo module. In fact, such
|
||||
# references are so common that the import statement first looks in
|
||||
# the containing package before looking in the standard module search
|
||||
# path."
|
||||
# Source:
|
||||
# http://stackoverflow.com/a/14216937/4982251
|
||||
from .spec import Basic
|
||||
|
||||
__all__ = ('Message',)
|
||||
|
||||
|
||||
class Message(GenericContent):
|
||||
"""A Message for use with the Channel.basic_* methods.
|
||||
|
||||
Expected arg types
|
||||
|
||||
body: string
|
||||
children: (not supported)
|
||||
|
||||
Keyword properties may include:
|
||||
|
||||
content_type: shortstr
|
||||
MIME content type
|
||||
|
||||
content_encoding: shortstr
|
||||
MIME content encoding
|
||||
|
||||
application_headers: table
|
||||
Message header field table, a dict with string keys,
|
||||
and string | int | Decimal | datetime | dict values.
|
||||
|
||||
delivery_mode: octet
|
||||
Non-persistent (1) or persistent (2)
|
||||
|
||||
priority: octet
|
||||
The message priority, 0 to 9
|
||||
|
||||
correlation_id: shortstr
|
||||
The application correlation identifier
|
||||
|
||||
reply_to: shortstr
|
||||
The destination to reply to
|
||||
|
||||
expiration: shortstr
|
||||
Message expiration specification
|
||||
|
||||
message_id: shortstr
|
||||
The application message identifier
|
||||
|
||||
timestamp: unsigned long
|
||||
The message timestamp
|
||||
|
||||
type: shortstr
|
||||
The message type name
|
||||
|
||||
user_id: shortstr
|
||||
The creating user id
|
||||
|
||||
app_id: shortstr
|
||||
The creating application id
|
||||
|
||||
cluster_id: shortstr
|
||||
Intra-cluster routing identifier
|
||||
|
||||
Unicode bodies are encoded according to the 'content_encoding'
|
||||
argument. If that's None, it's set to 'UTF-8' automatically.
|
||||
|
||||
Example::
|
||||
|
||||
msg = Message('hello world',
|
||||
content_type='text/plain',
|
||||
application_headers={'foo': 7})
|
||||
"""
|
||||
|
||||
CLASS_ID = Basic.CLASS_ID
|
||||
|
||||
#: Instances of this class have these attributes, which
|
||||
#: are passed back and forth as message properties between
|
||||
#: client and server
|
||||
PROPERTIES = [
|
||||
('content_type', 's'),
|
||||
('content_encoding', 's'),
|
||||
('application_headers', 'F'),
|
||||
('delivery_mode', 'o'),
|
||||
('priority', 'o'),
|
||||
('correlation_id', 's'),
|
||||
('reply_to', 's'),
|
||||
('expiration', 's'),
|
||||
('message_id', 's'),
|
||||
('timestamp', 'L'),
|
||||
('type', 's'),
|
||||
('user_id', 's'),
|
||||
('app_id', 's'),
|
||||
('cluster_id', 's')
|
||||
]
|
||||
|
||||
def __init__(self, body='', children=None, channel=None, **properties):
|
||||
super().__init__(**properties)
|
||||
#: set by basic_consume/basic_get
|
||||
self.delivery_info = None
|
||||
self.body = body
|
||||
self.channel = channel
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
return self.properties.get('application_headers')
|
||||
|
||||
@property
|
||||
def delivery_tag(self):
|
||||
return self.delivery_info.get('delivery_tag')
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,776 @@
|
||||
"""AMQP Connections."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
|
||||
import logging
|
||||
import socket
|
||||
import uuid
|
||||
import warnings
|
||||
from array import array
|
||||
from time import monotonic
|
||||
|
||||
from vine import ensure_promise
|
||||
|
||||
from . import __version__, sasl, spec
|
||||
from .abstract_channel import AbstractChannel
|
||||
from .channel import Channel
|
||||
from .exceptions import (AMQPDeprecationWarning, ChannelError, ConnectionError,
|
||||
ConnectionForced, RecoverableChannelError,
|
||||
RecoverableConnectionError, ResourceError,
|
||||
error_for_code)
|
||||
from .method_framing import frame_handler, frame_writer
|
||||
from .transport import Transport
|
||||
|
||||
try:
|
||||
from ssl import SSLError
|
||||
except ImportError: # pragma: no cover
|
||||
class SSLError(Exception): # noqa
|
||||
pass
|
||||
|
||||
W_FORCE_CONNECT = """\
|
||||
The .{attr} attribute on the connection was accessed before
|
||||
the connection was established. This is supported for now, but will
|
||||
be deprecated in amqp 2.2.0.
|
||||
|
||||
Since amqp 2.0 you have to explicitly call Connection.connect()
|
||||
before using the connection.
|
||||
"""
|
||||
|
||||
START_DEBUG_FMT = """
|
||||
Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s
|
||||
""".strip()
|
||||
|
||||
__all__ = ('Connection',)
|
||||
|
||||
AMQP_LOGGER = logging.getLogger('amqp')
|
||||
AMQP_HEARTBEAT_LOGGER = logging.getLogger(
|
||||
'amqp.connection.Connection.heartbeat_tick'
|
||||
)
|
||||
|
||||
#: Default map for :attr:`Connection.library_properties`
|
||||
LIBRARY_PROPERTIES = {
|
||||
'product': 'py-amqp',
|
||||
'product_version': __version__,
|
||||
}
|
||||
|
||||
#: Default map for :attr:`Connection.negotiate_capabilities`
|
||||
NEGOTIATE_CAPABILITIES = {
|
||||
'consumer_cancel_notify': True,
|
||||
'connection.blocked': True,
|
||||
'authentication_failure_close': True,
|
||||
}
|
||||
|
||||
|
||||
class Connection(AbstractChannel):
|
||||
"""AMQP Connection.
|
||||
|
||||
The connection class provides methods for a client to establish a
|
||||
network connection to a server, and for both peers to operate the
|
||||
connection thereafter.
|
||||
|
||||
GRAMMAR::
|
||||
|
||||
connection = open-connection *use-connection close-connection
|
||||
open-connection = C:protocol-header
|
||||
S:START C:START-OK
|
||||
*challenge
|
||||
S:TUNE C:TUNE-OK
|
||||
C:OPEN S:OPEN-OK
|
||||
challenge = S:SECURE C:SECURE-OK
|
||||
use-connection = *channel
|
||||
close-connection = C:CLOSE S:CLOSE-OK
|
||||
/ S:CLOSE C:CLOSE-OK
|
||||
Create a connection to the specified host, which should be
|
||||
a 'host[:port]', such as 'localhost', or '1.2.3.4:5672'
|
||||
(defaults to 'localhost', if a port is not specified then
|
||||
5672 is used)
|
||||
|
||||
Authentication can be controlled by passing one or more
|
||||
`amqp.sasl.SASL` instances as the `authentication` parameter, or
|
||||
setting the `login_method` string to one of the supported methods:
|
||||
'GSSAPI', 'EXTERNAL', 'AMQPLAIN', or 'PLAIN'.
|
||||
Otherwise authentication will be performed using any supported method
|
||||
preferred by the server. Userid and passwords apply to AMQPLAIN and
|
||||
PLAIN authentication, whereas on GSSAPI only userid will be used as the
|
||||
client name. For EXTERNAL authentication both userid and password are
|
||||
ignored.
|
||||
|
||||
The 'ssl' parameter may be simply True/False, or
|
||||
a dictionary of options to pass to :class:`ssl.SSLContext` such as
|
||||
requiring certain certificates. For details, refer ``ssl`` parameter of
|
||||
:class:`~amqp.transport.SSLTransport`.
|
||||
|
||||
The "socket_settings" parameter is a dictionary defining tcp
|
||||
settings which will be applied as socket options.
|
||||
|
||||
When "confirm_publish" is set to True, the channel is put to
|
||||
confirm mode. In this mode, each published message is
|
||||
confirmed using Publisher confirms RabbitMQ extention.
|
||||
"""
|
||||
|
||||
Channel = Channel
|
||||
|
||||
#: Mapping of protocol extensions to enable.
|
||||
#: The server will report these in server_properties[capabilities],
|
||||
#: and if a key in this map is present the client will tell the
|
||||
#: server to either enable or disable the capability depending
|
||||
#: on the value set in this map.
|
||||
#: For example with:
|
||||
#: negotiate_capabilities = {
|
||||
#: 'consumer_cancel_notify': True,
|
||||
#: }
|
||||
#: The client will enable this capability if the server reports
|
||||
#: support for it, but if the value is False the client will
|
||||
#: disable the capability.
|
||||
negotiate_capabilities = NEGOTIATE_CAPABILITIES
|
||||
|
||||
#: These are sent to the server to announce what features
|
||||
#: we support, type of client etc.
|
||||
library_properties = LIBRARY_PROPERTIES
|
||||
|
||||
#: Final heartbeat interval value (in float seconds) after negotiation
|
||||
heartbeat = None
|
||||
|
||||
#: Original heartbeat interval value proposed by client.
|
||||
client_heartbeat = None
|
||||
|
||||
#: Original heartbeat interval proposed by server.
|
||||
server_heartbeat = None
|
||||
|
||||
#: Time of last heartbeat sent (in monotonic time, if available).
|
||||
last_heartbeat_sent = 0
|
||||
|
||||
#: Time of last heartbeat received (in monotonic time, if available).
|
||||
last_heartbeat_received = 0
|
||||
|
||||
#: Number of successful writes to socket.
|
||||
bytes_sent = 0
|
||||
|
||||
#: Number of successful reads from socket.
|
||||
bytes_recv = 0
|
||||
|
||||
#: Number of bytes sent to socket at the last heartbeat check.
|
||||
prev_sent = None
|
||||
|
||||
#: Number of bytes received from socket at the last heartbeat check.
|
||||
prev_recv = None
|
||||
|
||||
_METHODS = {
|
||||
spec.method(spec.Connection.Start, 'ooFSS'),
|
||||
spec.method(spec.Connection.OpenOk),
|
||||
spec.method(spec.Connection.Secure, 's'),
|
||||
spec.method(spec.Connection.Tune, 'BlB'),
|
||||
spec.method(spec.Connection.Close, 'BsBB'),
|
||||
spec.method(spec.Connection.Blocked),
|
||||
spec.method(spec.Connection.Unblocked),
|
||||
spec.method(spec.Connection.CloseOk),
|
||||
}
|
||||
_METHODS = {m.method_sig: m for m in _METHODS}
|
||||
|
||||
_ALLOWED_METHODS_WHEN_CLOSING = (
|
||||
spec.Connection.Close, spec.Connection.CloseOk
|
||||
)
|
||||
|
||||
connection_errors = (
|
||||
ConnectionError,
|
||||
socket.error,
|
||||
IOError,
|
||||
OSError,
|
||||
)
|
||||
channel_errors = (ChannelError,)
|
||||
recoverable_connection_errors = (
|
||||
RecoverableConnectionError,
|
||||
socket.error,
|
||||
IOError,
|
||||
OSError,
|
||||
)
|
||||
recoverable_channel_errors = (
|
||||
RecoverableChannelError,
|
||||
)
|
||||
|
||||
def __init__(self, host='localhost:5672', userid='guest', password='guest',
|
||||
login_method=None, login_response=None,
|
||||
authentication=(),
|
||||
virtual_host='/', locale='en_US', client_properties=None,
|
||||
ssl=False, connect_timeout=None, channel_max=None,
|
||||
frame_max=None, heartbeat=0, on_open=None, on_blocked=None,
|
||||
on_unblocked=None, confirm_publish=False,
|
||||
on_tune_ok=None, read_timeout=None, write_timeout=None,
|
||||
socket_settings=None, frame_handler=frame_handler,
|
||||
frame_writer=frame_writer, **kwargs):
|
||||
self._connection_id = uuid.uuid4().hex
|
||||
channel_max = channel_max or 65535
|
||||
frame_max = frame_max or 131072
|
||||
if authentication:
|
||||
if isinstance(authentication, sasl.SASL):
|
||||
authentication = (authentication,)
|
||||
self.authentication = authentication
|
||||
elif login_method is not None:
|
||||
if login_method == 'GSSAPI':
|
||||
auth = sasl.GSSAPI(userid)
|
||||
elif login_method == 'EXTERNAL':
|
||||
auth = sasl.EXTERNAL()
|
||||
elif login_method == 'AMQPLAIN':
|
||||
if userid is None or password is None:
|
||||
raise ValueError(
|
||||
"Must supply authentication or userid/password")
|
||||
auth = sasl.AMQPLAIN(userid, password)
|
||||
elif login_method == 'PLAIN':
|
||||
if userid is None or password is None:
|
||||
raise ValueError(
|
||||
"Must supply authentication or userid/password")
|
||||
auth = sasl.PLAIN(userid, password)
|
||||
elif login_response is not None:
|
||||
auth = sasl.RAW(login_method, login_response)
|
||||
else:
|
||||
raise ValueError("Invalid login method", login_method)
|
||||
self.authentication = (auth,)
|
||||
else:
|
||||
self.authentication = (sasl.GSSAPI(userid, fail_soft=True),
|
||||
sasl.EXTERNAL(),
|
||||
sasl.AMQPLAIN(userid, password),
|
||||
sasl.PLAIN(userid, password))
|
||||
|
||||
self.client_properties = dict(
|
||||
self.library_properties, **client_properties or {}
|
||||
)
|
||||
self.locale = locale
|
||||
self.host = host
|
||||
self.virtual_host = virtual_host
|
||||
self.on_tune_ok = ensure_promise(on_tune_ok)
|
||||
|
||||
self.frame_handler_cls = frame_handler
|
||||
self.frame_writer_cls = frame_writer
|
||||
|
||||
self._handshake_complete = False
|
||||
|
||||
self.channels = {}
|
||||
# The connection object itself is treated as channel 0
|
||||
super().__init__(self, 0)
|
||||
|
||||
self._frame_writer = None
|
||||
self._on_inbound_frame = None
|
||||
self._transport = None
|
||||
|
||||
# Properties set in the Tune method
|
||||
self.channel_max = channel_max
|
||||
self.frame_max = frame_max
|
||||
self.client_heartbeat = heartbeat
|
||||
|
||||
self.confirm_publish = confirm_publish
|
||||
self.ssl = ssl
|
||||
self.read_timeout = read_timeout
|
||||
self.write_timeout = write_timeout
|
||||
self.socket_settings = socket_settings
|
||||
|
||||
# Callbacks
|
||||
self.on_blocked = on_blocked
|
||||
self.on_unblocked = on_unblocked
|
||||
self.on_open = ensure_promise(on_open)
|
||||
|
||||
self._avail_channel_ids = array('H', range(self.channel_max, 0, -1))
|
||||
|
||||
# Properties set in the Start method
|
||||
self.version_major = 0
|
||||
self.version_minor = 0
|
||||
self.server_properties = {}
|
||||
self.mechanisms = []
|
||||
self.locales = []
|
||||
|
||||
self.connect_timeout = connect_timeout
|
||||
|
||||
def __repr__(self):
|
||||
if self._transport:
|
||||
return f'<AMQP Connection: {self.host}/{self.virtual_host} '\
|
||||
f'using {self._transport} at {id(self):#x}>'
|
||||
else:
|
||||
return f'<AMQP Connection: {self.host}/{self.virtual_host} '\
|
||||
f'(disconnected) at {id(self):#x}>'
|
||||
|
||||
def __enter__(self):
|
||||
self.connect()
|
||||
return self
|
||||
|
||||
def __exit__(self, *eargs):
|
||||
self.close()
|
||||
|
||||
def then(self, on_success, on_error=None):
|
||||
return self.on_open.then(on_success, on_error)
|
||||
|
||||
def _setup_listeners(self):
|
||||
self._callbacks.update({
|
||||
spec.Connection.Start: self._on_start,
|
||||
spec.Connection.OpenOk: self._on_open_ok,
|
||||
spec.Connection.Secure: self._on_secure,
|
||||
spec.Connection.Tune: self._on_tune,
|
||||
spec.Connection.Close: self._on_close,
|
||||
spec.Connection.Blocked: self._on_blocked,
|
||||
spec.Connection.Unblocked: self._on_unblocked,
|
||||
spec.Connection.CloseOk: self._on_close_ok,
|
||||
})
|
||||
|
||||
def connect(self, callback=None):
|
||||
# Let the transport.py module setup the actual
|
||||
# socket connection to the broker.
|
||||
#
|
||||
if self.connected:
|
||||
return callback() if callback else None
|
||||
try:
|
||||
self.transport = self.Transport(
|
||||
self.host, self.connect_timeout, self.ssl,
|
||||
self.read_timeout, self.write_timeout,
|
||||
socket_settings=self.socket_settings,
|
||||
)
|
||||
self.transport.connect()
|
||||
self.on_inbound_frame = self.frame_handler_cls(
|
||||
self, self.on_inbound_method)
|
||||
self.frame_writer = self.frame_writer_cls(self, self.transport)
|
||||
|
||||
while not self._handshake_complete:
|
||||
self.drain_events(timeout=self.connect_timeout)
|
||||
|
||||
except (OSError, SSLError):
|
||||
self.collect()
|
||||
raise
|
||||
|
||||
def _warn_force_connect(self, attr):
|
||||
warnings.warn(AMQPDeprecationWarning(
|
||||
W_FORCE_CONNECT.format(attr=attr)))
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
if self._transport is None:
|
||||
self._warn_force_connect('transport')
|
||||
self.connect()
|
||||
return self._transport
|
||||
|
||||
@transport.setter
|
||||
def transport(self, transport):
|
||||
self._transport = transport
|
||||
|
||||
@property
|
||||
def on_inbound_frame(self):
|
||||
if self._on_inbound_frame is None:
|
||||
self._warn_force_connect('on_inbound_frame')
|
||||
self.connect()
|
||||
return self._on_inbound_frame
|
||||
|
||||
@on_inbound_frame.setter
|
||||
def on_inbound_frame(self, on_inbound_frame):
|
||||
self._on_inbound_frame = on_inbound_frame
|
||||
|
||||
@property
|
||||
def frame_writer(self):
|
||||
if self._frame_writer is None:
|
||||
self._warn_force_connect('frame_writer')
|
||||
self.connect()
|
||||
return self._frame_writer
|
||||
|
||||
@frame_writer.setter
|
||||
def frame_writer(self, frame_writer):
|
||||
self._frame_writer = frame_writer
|
||||
|
||||
def _on_start(self, version_major, version_minor, server_properties,
|
||||
mechanisms, locales, argsig='FsSs'):
|
||||
client_properties = self.client_properties
|
||||
self.version_major = version_major
|
||||
self.version_minor = version_minor
|
||||
self.server_properties = server_properties
|
||||
if isinstance(mechanisms, str):
|
||||
mechanisms = mechanisms.encode('utf-8')
|
||||
self.mechanisms = mechanisms.split(b' ')
|
||||
self.locales = locales.split(' ')
|
||||
AMQP_LOGGER.debug(
|
||||
START_DEBUG_FMT,
|
||||
self.version_major, self.version_minor,
|
||||
self.server_properties, self.mechanisms, self.locales,
|
||||
)
|
||||
|
||||
# Negotiate protocol extensions (capabilities)
|
||||
scap = server_properties.get('capabilities') or {}
|
||||
cap = client_properties.setdefault('capabilities', {})
|
||||
cap.update({
|
||||
wanted_cap: enable_cap
|
||||
for wanted_cap, enable_cap in self.negotiate_capabilities.items()
|
||||
if scap.get(wanted_cap)
|
||||
})
|
||||
if not cap:
|
||||
# no capabilities, server may not react well to having
|
||||
# this key present in client_properties, so we remove it.
|
||||
client_properties.pop('capabilities', None)
|
||||
|
||||
for authentication in self.authentication:
|
||||
if authentication.mechanism in self.mechanisms:
|
||||
login_response = authentication.start(self)
|
||||
if login_response is not NotImplemented:
|
||||
break
|
||||
else:
|
||||
raise ConnectionError(
|
||||
"Couldn't find appropriate auth mechanism "
|
||||
"(can offer: {}; available: {})".format(
|
||||
b", ".join(m.mechanism
|
||||
for m in self.authentication
|
||||
if m.mechanism).decode(),
|
||||
b", ".join(self.mechanisms).decode()))
|
||||
|
||||
self.send_method(
|
||||
spec.Connection.StartOk, argsig,
|
||||
(client_properties, authentication.mechanism,
|
||||
login_response, self.locale),
|
||||
)
|
||||
|
||||
def _on_secure(self, challenge):
|
||||
pass
|
||||
|
||||
def _on_tune(self, channel_max, frame_max, server_heartbeat, argsig='BlB'):
|
||||
client_heartbeat = self.client_heartbeat or 0
|
||||
self.channel_max = channel_max or self.channel_max
|
||||
self.frame_max = frame_max or self.frame_max
|
||||
self.server_heartbeat = server_heartbeat or 0
|
||||
|
||||
# negotiate the heartbeat interval to the smaller of the
|
||||
# specified values
|
||||
if self.server_heartbeat == 0 or client_heartbeat == 0:
|
||||
self.heartbeat = max(self.server_heartbeat, client_heartbeat)
|
||||
else:
|
||||
self.heartbeat = min(self.server_heartbeat, client_heartbeat)
|
||||
|
||||
# Ignore server heartbeat if client_heartbeat is disabled
|
||||
if not self.client_heartbeat:
|
||||
self.heartbeat = 0
|
||||
|
||||
self.send_method(
|
||||
spec.Connection.TuneOk, argsig,
|
||||
(self.channel_max, self.frame_max, self.heartbeat),
|
||||
callback=self._on_tune_sent,
|
||||
)
|
||||
|
||||
def _on_tune_sent(self, argsig='ssb'):
|
||||
self.send_method(
|
||||
spec.Connection.Open, argsig, (self.virtual_host, '', False),
|
||||
)
|
||||
|
||||
def _on_open_ok(self):
|
||||
self._handshake_complete = True
|
||||
self.on_open(self)
|
||||
|
||||
def Transport(self, host, connect_timeout,
|
||||
ssl=False, read_timeout=None, write_timeout=None,
|
||||
socket_settings=None, **kwargs):
|
||||
return Transport(
|
||||
host, connect_timeout=connect_timeout, ssl=ssl,
|
||||
read_timeout=read_timeout, write_timeout=write_timeout,
|
||||
socket_settings=socket_settings, **kwargs)
|
||||
|
||||
@property
|
||||
def connected(self):
|
||||
return self._transport and self._transport.connected
|
||||
|
||||
def collect(self):
|
||||
try:
|
||||
if self._transport:
|
||||
self._transport.close()
|
||||
|
||||
if self.channels:
|
||||
# Copy all the channels except self since the channels
|
||||
# dictionary changes during the collection process.
|
||||
channels = [
|
||||
ch for ch in self.channels.values()
|
||||
if ch is not self
|
||||
]
|
||||
|
||||
for ch in channels:
|
||||
ch.collect()
|
||||
except OSError:
|
||||
pass # connection already closed on the other end
|
||||
finally:
|
||||
self._transport = self.connection = self.channels = None
|
||||
|
||||
def _get_free_channel_id(self):
|
||||
try:
|
||||
return self._avail_channel_ids.pop()
|
||||
except IndexError:
|
||||
raise ResourceError(
|
||||
'No free channel ids, current={}, channel_max={}'.format(
|
||||
len(self.channels), self.channel_max), spec.Channel.Open)
|
||||
|
||||
def _claim_channel_id(self, channel_id):
|
||||
try:
|
||||
return self._avail_channel_ids.remove(channel_id)
|
||||
except ValueError:
|
||||
raise ConnectionError(f'Channel {channel_id!r} already open')
|
||||
|
||||
def channel(self, channel_id=None, callback=None):
|
||||
"""Create new channel.
|
||||
|
||||
Fetch a Channel object identified by the numeric channel_id, or
|
||||
create that object if it doesn't already exist.
|
||||
"""
|
||||
if self.channels is None:
|
||||
raise RecoverableConnectionError('Connection already closed.')
|
||||
|
||||
try:
|
||||
return self.channels[channel_id]
|
||||
except KeyError:
|
||||
channel = self.Channel(self, channel_id, on_open=callback)
|
||||
channel.open()
|
||||
return channel
|
||||
|
||||
def is_alive(self):
|
||||
raise NotImplementedError('Use AMQP heartbeats')
|
||||
|
||||
def drain_events(self, timeout=None):
|
||||
# read until message is ready
|
||||
while not self.blocking_read(timeout):
|
||||
pass
|
||||
|
||||
def blocking_read(self, timeout=None):
|
||||
with self.transport.having_timeout(timeout):
|
||||
frame = self.transport.read_frame()
|
||||
return self.on_inbound_frame(frame)
|
||||
|
||||
def on_inbound_method(self, channel_id, method_sig, payload, content):
|
||||
if self.channels is None:
|
||||
raise RecoverableConnectionError('Connection already closed')
|
||||
|
||||
return self.channels[channel_id].dispatch_method(
|
||||
method_sig, payload, content,
|
||||
)
|
||||
|
||||
def close(self, reply_code=0, reply_text='', method_sig=(0, 0),
|
||||
argsig='BsBB'):
|
||||
"""Request a connection close.
|
||||
|
||||
This method indicates that the sender wants to close the
|
||||
connection. This may be due to internal conditions (e.g. a
|
||||
forced shut-down) or due to an error handling a specific
|
||||
method, i.e. an exception. When a close is due to an
|
||||
exception, the sender provides the class and method id of the
|
||||
method which caused the exception.
|
||||
|
||||
RULE:
|
||||
|
||||
After sending this method any received method except the
|
||||
Close-OK method MUST be discarded.
|
||||
|
||||
RULE:
|
||||
|
||||
The peer sending this method MAY use a counter or timeout
|
||||
to detect failure of the other peer to respond correctly
|
||||
with the Close-OK method.
|
||||
|
||||
RULE:
|
||||
|
||||
When a server receives the Close method from a client it
|
||||
MUST delete all server-side resources associated with the
|
||||
client's context. A client CANNOT reconnect to a context
|
||||
after sending or receiving a Close method.
|
||||
|
||||
PARAMETERS:
|
||||
reply_code: short
|
||||
|
||||
The reply code. The AMQ reply codes are defined in AMQ
|
||||
RFC 011.
|
||||
|
||||
reply_text: shortstr
|
||||
|
||||
The localised reply text. This text can be logged as an
|
||||
aid to resolving issues.
|
||||
|
||||
class_id: short
|
||||
|
||||
failing method class
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the class of the method.
|
||||
|
||||
method_id: short
|
||||
|
||||
failing method ID
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the ID of the method.
|
||||
"""
|
||||
if self._transport is None:
|
||||
# already closed
|
||||
return
|
||||
|
||||
try:
|
||||
self.is_closing = True
|
||||
return self.send_method(
|
||||
spec.Connection.Close, argsig,
|
||||
(reply_code, reply_text, method_sig[0], method_sig[1]),
|
||||
wait=spec.Connection.CloseOk,
|
||||
)
|
||||
except (OSError, SSLError):
|
||||
# close connection
|
||||
self.collect()
|
||||
raise
|
||||
finally:
|
||||
self.is_closing = False
|
||||
|
||||
def _on_close(self, reply_code, reply_text, class_id, method_id):
|
||||
"""Request a connection close.
|
||||
|
||||
This method indicates that the sender wants to close the
|
||||
connection. This may be due to internal conditions (e.g. a
|
||||
forced shut-down) or due to an error handling a specific
|
||||
method, i.e. an exception. When a close is due to an
|
||||
exception, the sender provides the class and method id of the
|
||||
method which caused the exception.
|
||||
|
||||
RULE:
|
||||
|
||||
After sending this method any received method except the
|
||||
Close-OK method MUST be discarded.
|
||||
|
||||
RULE:
|
||||
|
||||
The peer sending this method MAY use a counter or timeout
|
||||
to detect failure of the other peer to respond correctly
|
||||
with the Close-OK method.
|
||||
|
||||
RULE:
|
||||
|
||||
When a server receives the Close method from a client it
|
||||
MUST delete all server-side resources associated with the
|
||||
client's context. A client CANNOT reconnect to a context
|
||||
after sending or receiving a Close method.
|
||||
|
||||
PARAMETERS:
|
||||
reply_code: short
|
||||
|
||||
The reply code. The AMQ reply codes are defined in AMQ
|
||||
RFC 011.
|
||||
|
||||
reply_text: shortstr
|
||||
|
||||
The localised reply text. This text can be logged as an
|
||||
aid to resolving issues.
|
||||
|
||||
class_id: short
|
||||
|
||||
failing method class
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the class of the method.
|
||||
|
||||
method_id: short
|
||||
|
||||
failing method ID
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the ID of the method.
|
||||
"""
|
||||
self._x_close_ok()
|
||||
raise error_for_code(reply_code, reply_text,
|
||||
(class_id, method_id), ConnectionError)
|
||||
|
||||
def _x_close_ok(self):
|
||||
"""Confirm a connection close.
|
||||
|
||||
This method confirms a Connection.Close method and tells the
|
||||
recipient that it is safe to release resources for the
|
||||
connection and close the socket.
|
||||
|
||||
RULE:
|
||||
A peer that detects a socket closure without having
|
||||
received a Close-Ok handshake method SHOULD log the error.
|
||||
"""
|
||||
self.send_method(spec.Connection.CloseOk, callback=self._on_close_ok)
|
||||
|
||||
def _on_close_ok(self):
|
||||
"""Confirm a connection close.
|
||||
|
||||
This method confirms a Connection.Close method and tells the
|
||||
recipient that it is safe to release resources for the
|
||||
connection and close the socket.
|
||||
|
||||
RULE:
|
||||
|
||||
A peer that detects a socket closure without having
|
||||
received a Close-Ok handshake method SHOULD log the error.
|
||||
"""
|
||||
self.collect()
|
||||
|
||||
def _on_blocked(self):
|
||||
"""Callback called when connection blocked.
|
||||
|
||||
Notes:
|
||||
This is an RabbitMQ Extension.
|
||||
"""
|
||||
reason = 'connection blocked, see broker logs'
|
||||
if self.on_blocked:
|
||||
return self.on_blocked(reason)
|
||||
|
||||
def _on_unblocked(self):
|
||||
if self.on_unblocked:
|
||||
return self.on_unblocked()
|
||||
|
||||
def send_heartbeat(self):
|
||||
self.frame_writer(8, 0, None, None, None)
|
||||
|
||||
def heartbeat_tick(self, rate=2):
|
||||
"""Send heartbeat packets if necessary.
|
||||
|
||||
Raises:
|
||||
~amqp.exceptions.ConnectionForvced: if none have been
|
||||
received recently.
|
||||
|
||||
Note:
|
||||
This should be called frequently, on the order of
|
||||
once per second.
|
||||
|
||||
Keyword Arguments:
|
||||
rate (int): Previously used, but ignored now.
|
||||
"""
|
||||
AMQP_HEARTBEAT_LOGGER.debug('heartbeat_tick : for connection %s',
|
||||
self._connection_id)
|
||||
if not self.heartbeat:
|
||||
return
|
||||
|
||||
# treat actual data exchange in either direction as a heartbeat
|
||||
sent_now = self.bytes_sent
|
||||
recv_now = self.bytes_recv
|
||||
if self.prev_sent is None or self.prev_sent != sent_now:
|
||||
self.last_heartbeat_sent = monotonic()
|
||||
if self.prev_recv is None or self.prev_recv != recv_now:
|
||||
self.last_heartbeat_received = monotonic()
|
||||
|
||||
now = monotonic()
|
||||
AMQP_HEARTBEAT_LOGGER.debug(
|
||||
'heartbeat_tick : Prev sent/recv: %s/%s, '
|
||||
'now - %s/%s, monotonic - %s, '
|
||||
'last_heartbeat_sent - %s, heartbeat int. - %s '
|
||||
'for connection %s',
|
||||
self.prev_sent, self.prev_recv,
|
||||
sent_now, recv_now, now,
|
||||
self.last_heartbeat_sent,
|
||||
self.heartbeat,
|
||||
self._connection_id,
|
||||
)
|
||||
|
||||
self.prev_sent, self.prev_recv = sent_now, recv_now
|
||||
|
||||
# send a heartbeat if it's time to do so
|
||||
if now > self.last_heartbeat_sent + self.heartbeat:
|
||||
AMQP_HEARTBEAT_LOGGER.debug(
|
||||
'heartbeat_tick: sending heartbeat for connection %s',
|
||||
self._connection_id)
|
||||
self.send_heartbeat()
|
||||
self.last_heartbeat_sent = monotonic()
|
||||
|
||||
# if we've missed two intervals' heartbeats, fail; this gives the
|
||||
# server enough time to send heartbeats a little late
|
||||
two_heartbeats = 2 * self.heartbeat
|
||||
two_heartbeats_interval = self.last_heartbeat_received + two_heartbeats
|
||||
heartbeats_missed = two_heartbeats_interval < monotonic()
|
||||
if self.last_heartbeat_received and heartbeats_missed:
|
||||
raise ConnectionForced('Too many heartbeats missed')
|
||||
|
||||
@property
|
||||
def sock(self):
|
||||
return self.transport.sock
|
||||
|
||||
@property
|
||||
def server_capabilities(self):
|
||||
return self.server_properties.get('capabilities') or {}
|
@ -0,0 +1,288 @@
|
||||
"""Exceptions used by amqp."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
|
||||
from struct import pack, unpack
|
||||
|
||||
__all__ = (
|
||||
'AMQPError',
|
||||
'ConnectionError', 'ChannelError',
|
||||
'RecoverableConnectionError', 'IrrecoverableConnectionError',
|
||||
'RecoverableChannelError', 'IrrecoverableChannelError',
|
||||
'ConsumerCancelled', 'ContentTooLarge', 'NoConsumers',
|
||||
'ConnectionForced', 'InvalidPath', 'AccessRefused', 'NotFound',
|
||||
'ResourceLocked', 'PreconditionFailed', 'FrameError', 'FrameSyntaxError',
|
||||
'InvalidCommand', 'ChannelNotOpen', 'UnexpectedFrame', 'ResourceError',
|
||||
'NotAllowed', 'AMQPNotImplementedError', 'InternalError',
|
||||
'MessageNacked',
|
||||
'AMQPDeprecationWarning',
|
||||
)
|
||||
|
||||
|
||||
class AMQPDeprecationWarning(UserWarning):
|
||||
"""Warning for deprecated things."""
|
||||
|
||||
|
||||
class MessageNacked(Exception):
|
||||
"""Message was nacked by broker."""
|
||||
|
||||
|
||||
class AMQPError(Exception):
|
||||
"""Base class for all AMQP exceptions."""
|
||||
|
||||
code = 0
|
||||
|
||||
def __init__(self, reply_text=None, method_sig=None,
|
||||
method_name=None, reply_code=None):
|
||||
self.message = reply_text
|
||||
self.reply_code = reply_code or self.code
|
||||
self.reply_text = reply_text
|
||||
self.method_sig = method_sig
|
||||
self.method_name = method_name or ''
|
||||
if method_sig and not self.method_name:
|
||||
self.method_name = METHOD_NAME_MAP.get(method_sig, '')
|
||||
Exception.__init__(self, reply_code,
|
||||
reply_text, method_sig, self.method_name)
|
||||
|
||||
def __str__(self):
|
||||
if self.method:
|
||||
return '{0.method}: ({0.reply_code}) {0.reply_text}'.format(self)
|
||||
return self.reply_text or '<{}: unknown error>'.format(
|
||||
type(self).__name__
|
||||
)
|
||||
|
||||
@property
|
||||
def method(self):
|
||||
return self.method_name or self.method_sig
|
||||
|
||||
|
||||
class ConnectionError(AMQPError):
|
||||
"""AMQP Connection Error."""
|
||||
|
||||
|
||||
class ChannelError(AMQPError):
|
||||
"""AMQP Channel Error."""
|
||||
|
||||
|
||||
class RecoverableChannelError(ChannelError):
|
||||
"""Exception class for recoverable channel errors."""
|
||||
|
||||
|
||||
class IrrecoverableChannelError(ChannelError):
|
||||
"""Exception class for irrecoverable channel errors."""
|
||||
|
||||
|
||||
class RecoverableConnectionError(ConnectionError):
|
||||
"""Exception class for recoverable connection errors."""
|
||||
|
||||
|
||||
class IrrecoverableConnectionError(ConnectionError):
|
||||
"""Exception class for irrecoverable connection errors."""
|
||||
|
||||
|
||||
class Blocked(RecoverableConnectionError):
|
||||
"""AMQP Connection Blocked Predicate."""
|
||||
|
||||
|
||||
class ConsumerCancelled(RecoverableConnectionError):
|
||||
"""AMQP Consumer Cancelled Predicate."""
|
||||
|
||||
|
||||
class ContentTooLarge(RecoverableChannelError):
|
||||
"""AMQP Content Too Large Error."""
|
||||
|
||||
code = 311
|
||||
|
||||
|
||||
class NoConsumers(RecoverableChannelError):
|
||||
"""AMQP No Consumers Error."""
|
||||
|
||||
code = 313
|
||||
|
||||
|
||||
class ConnectionForced(RecoverableConnectionError):
|
||||
"""AMQP Connection Forced Error."""
|
||||
|
||||
code = 320
|
||||
|
||||
|
||||
class InvalidPath(IrrecoverableConnectionError):
|
||||
"""AMQP Invalid Path Error."""
|
||||
|
||||
code = 402
|
||||
|
||||
|
||||
class AccessRefused(IrrecoverableChannelError):
|
||||
"""AMQP Access Refused Error."""
|
||||
|
||||
code = 403
|
||||
|
||||
|
||||
class NotFound(IrrecoverableChannelError):
|
||||
"""AMQP Not Found Error."""
|
||||
|
||||
code = 404
|
||||
|
||||
|
||||
class ResourceLocked(RecoverableChannelError):
|
||||
"""AMQP Resource Locked Error."""
|
||||
|
||||
code = 405
|
||||
|
||||
|
||||
class PreconditionFailed(IrrecoverableChannelError):
|
||||
"""AMQP Precondition Failed Error."""
|
||||
|
||||
code = 406
|
||||
|
||||
|
||||
class FrameError(IrrecoverableConnectionError):
|
||||
"""AMQP Frame Error."""
|
||||
|
||||
code = 501
|
||||
|
||||
|
||||
class FrameSyntaxError(IrrecoverableConnectionError):
|
||||
"""AMQP Frame Syntax Error."""
|
||||
|
||||
code = 502
|
||||
|
||||
|
||||
class InvalidCommand(IrrecoverableConnectionError):
|
||||
"""AMQP Invalid Command Error."""
|
||||
|
||||
code = 503
|
||||
|
||||
|
||||
class ChannelNotOpen(IrrecoverableConnectionError):
|
||||
"""AMQP Channel Not Open Error."""
|
||||
|
||||
code = 504
|
||||
|
||||
|
||||
class UnexpectedFrame(IrrecoverableConnectionError):
|
||||
"""AMQP Unexpected Frame."""
|
||||
|
||||
code = 505
|
||||
|
||||
|
||||
class ResourceError(RecoverableConnectionError):
|
||||
"""AMQP Resource Error."""
|
||||
|
||||
code = 506
|
||||
|
||||
|
||||
class NotAllowed(IrrecoverableConnectionError):
|
||||
"""AMQP Not Allowed Error."""
|
||||
|
||||
code = 530
|
||||
|
||||
|
||||
class AMQPNotImplementedError(IrrecoverableConnectionError):
|
||||
"""AMQP Not Implemented Error."""
|
||||
|
||||
code = 540
|
||||
|
||||
|
||||
class InternalError(IrrecoverableConnectionError):
|
||||
"""AMQP Internal Error."""
|
||||
|
||||
code = 541
|
||||
|
||||
|
||||
ERROR_MAP = {
|
||||
311: ContentTooLarge,
|
||||
313: NoConsumers,
|
||||
320: ConnectionForced,
|
||||
402: InvalidPath,
|
||||
403: AccessRefused,
|
||||
404: NotFound,
|
||||
405: ResourceLocked,
|
||||
406: PreconditionFailed,
|
||||
501: FrameError,
|
||||
502: FrameSyntaxError,
|
||||
503: InvalidCommand,
|
||||
504: ChannelNotOpen,
|
||||
505: UnexpectedFrame,
|
||||
506: ResourceError,
|
||||
530: NotAllowed,
|
||||
540: AMQPNotImplementedError,
|
||||
541: InternalError,
|
||||
}
|
||||
|
||||
|
||||
def error_for_code(code, text, method, default):
|
||||
try:
|
||||
return ERROR_MAP[code](text, method, reply_code=code)
|
||||
except KeyError:
|
||||
return default(text, method, reply_code=code)
|
||||
|
||||
|
||||
METHOD_NAME_MAP = {
|
||||
(10, 10): 'Connection.start',
|
||||
(10, 11): 'Connection.start_ok',
|
||||
(10, 20): 'Connection.secure',
|
||||
(10, 21): 'Connection.secure_ok',
|
||||
(10, 30): 'Connection.tune',
|
||||
(10, 31): 'Connection.tune_ok',
|
||||
(10, 40): 'Connection.open',
|
||||
(10, 41): 'Connection.open_ok',
|
||||
(10, 50): 'Connection.close',
|
||||
(10, 51): 'Connection.close_ok',
|
||||
(20, 10): 'Channel.open',
|
||||
(20, 11): 'Channel.open_ok',
|
||||
(20, 20): 'Channel.flow',
|
||||
(20, 21): 'Channel.flow_ok',
|
||||
(20, 40): 'Channel.close',
|
||||
(20, 41): 'Channel.close_ok',
|
||||
(30, 10): 'Access.request',
|
||||
(30, 11): 'Access.request_ok',
|
||||
(40, 10): 'Exchange.declare',
|
||||
(40, 11): 'Exchange.declare_ok',
|
||||
(40, 20): 'Exchange.delete',
|
||||
(40, 21): 'Exchange.delete_ok',
|
||||
(40, 30): 'Exchange.bind',
|
||||
(40, 31): 'Exchange.bind_ok',
|
||||
(40, 40): 'Exchange.unbind',
|
||||
(40, 41): 'Exchange.unbind_ok',
|
||||
(50, 10): 'Queue.declare',
|
||||
(50, 11): 'Queue.declare_ok',
|
||||
(50, 20): 'Queue.bind',
|
||||
(50, 21): 'Queue.bind_ok',
|
||||
(50, 30): 'Queue.purge',
|
||||
(50, 31): 'Queue.purge_ok',
|
||||
(50, 40): 'Queue.delete',
|
||||
(50, 41): 'Queue.delete_ok',
|
||||
(50, 50): 'Queue.unbind',
|
||||
(50, 51): 'Queue.unbind_ok',
|
||||
(60, 10): 'Basic.qos',
|
||||
(60, 11): 'Basic.qos_ok',
|
||||
(60, 20): 'Basic.consume',
|
||||
(60, 21): 'Basic.consume_ok',
|
||||
(60, 30): 'Basic.cancel',
|
||||
(60, 31): 'Basic.cancel_ok',
|
||||
(60, 40): 'Basic.publish',
|
||||
(60, 50): 'Basic.return',
|
||||
(60, 60): 'Basic.deliver',
|
||||
(60, 70): 'Basic.get',
|
||||
(60, 71): 'Basic.get_ok',
|
||||
(60, 72): 'Basic.get_empty',
|
||||
(60, 80): 'Basic.ack',
|
||||
(60, 90): 'Basic.reject',
|
||||
(60, 100): 'Basic.recover_async',
|
||||
(60, 110): 'Basic.recover',
|
||||
(60, 111): 'Basic.recover_ok',
|
||||
(60, 120): 'Basic.nack',
|
||||
(90, 10): 'Tx.select',
|
||||
(90, 11): 'Tx.select_ok',
|
||||
(90, 20): 'Tx.commit',
|
||||
(90, 21): 'Tx.commit_ok',
|
||||
(90, 30): 'Tx.rollback',
|
||||
(90, 31): 'Tx.rollback_ok',
|
||||
(85, 10): 'Confirm.select',
|
||||
(85, 11): 'Confirm.select_ok',
|
||||
}
|
||||
|
||||
|
||||
for _method_id, _method_name in list(METHOD_NAME_MAP.items()):
|
||||
METHOD_NAME_MAP[unpack('>I', pack('>HH', *_method_id))[0]] = \
|
||||
_method_name
|
@ -0,0 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Python 2/3 compatibility."""
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import sys
|
||||
|
||||
import vine.five
|
||||
|
||||
sys.modules[__name__] = vine.five
|
@ -0,0 +1,186 @@
|
||||
"""Convert between frames and higher-level AMQP methods."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
|
||||
from collections import defaultdict
|
||||
from struct import pack, pack_into, unpack_from
|
||||
|
||||
from . import spec
|
||||
from .basic_message import Message
|
||||
from .exceptions import UnexpectedFrame
|
||||
from .utils import str_to_bytes
|
||||
|
||||
__all__ = ('frame_handler', 'frame_writer')
|
||||
|
||||
#: Set of methods that require both a content frame and a body frame.
|
||||
_CONTENT_METHODS = frozenset([
|
||||
spec.Basic.Return,
|
||||
spec.Basic.Deliver,
|
||||
spec.Basic.GetOk,
|
||||
])
|
||||
|
||||
|
||||
#: Number of bytes reserved for protocol in a content frame.
|
||||
#: We use this to calculate when a frame exceeeds the max frame size,
|
||||
#: and if it does not the message will fit into the preallocated buffer.
|
||||
FRAME_OVERHEAD = 40
|
||||
|
||||
|
||||
def frame_handler(connection, callback,
|
||||
unpack_from=unpack_from, content_methods=_CONTENT_METHODS):
|
||||
"""Create closure that reads frames."""
|
||||
expected_types = defaultdict(lambda: 1)
|
||||
partial_messages = {}
|
||||
|
||||
def on_frame(frame):
|
||||
frame_type, channel, buf = frame
|
||||
connection.bytes_recv += 1
|
||||
if frame_type not in (expected_types[channel], 8):
|
||||
raise UnexpectedFrame(
|
||||
'Received frame {} while expecting type: {}'.format(
|
||||
frame_type, expected_types[channel]),
|
||||
)
|
||||
elif frame_type == 1:
|
||||
method_sig = unpack_from('>HH', buf, 0)
|
||||
|
||||
if method_sig in content_methods:
|
||||
# Save what we've got so far and wait for the content-header
|
||||
partial_messages[channel] = Message(
|
||||
frame_method=method_sig, frame_args=buf,
|
||||
)
|
||||
expected_types[channel] = 2
|
||||
return False
|
||||
|
||||
callback(channel, method_sig, buf, None)
|
||||
|
||||
elif frame_type == 2:
|
||||
msg = partial_messages[channel]
|
||||
msg.inbound_header(buf)
|
||||
|
||||
if not msg.ready:
|
||||
# wait for the content-body
|
||||
expected_types[channel] = 3
|
||||
return False
|
||||
|
||||
# bodyless message, we're done
|
||||
expected_types[channel] = 1
|
||||
partial_messages.pop(channel, None)
|
||||
callback(channel, msg.frame_method, msg.frame_args, msg)
|
||||
|
||||
elif frame_type == 3:
|
||||
msg = partial_messages[channel]
|
||||
msg.inbound_body(buf)
|
||||
if not msg.ready:
|
||||
# wait for the rest of the content-body
|
||||
return False
|
||||
expected_types[channel] = 1
|
||||
partial_messages.pop(channel, None)
|
||||
callback(channel, msg.frame_method, msg.frame_args, msg)
|
||||
elif frame_type == 8:
|
||||
# bytes_recv already updated
|
||||
return False
|
||||
return True
|
||||
|
||||
return on_frame
|
||||
|
||||
|
||||
class Buffer:
|
||||
def __init__(self, buf):
|
||||
self.buf = buf
|
||||
|
||||
@property
|
||||
def buf(self):
|
||||
return self._buf
|
||||
|
||||
@buf.setter
|
||||
def buf(self, buf):
|
||||
self._buf = buf
|
||||
self.view = memoryview(buf)
|
||||
|
||||
|
||||
def frame_writer(connection, transport,
|
||||
pack=pack, pack_into=pack_into, range=range, len=len,
|
||||
bytes=bytes, str_to_bytes=str_to_bytes, text_t=str):
|
||||
"""Create closure that writes frames."""
|
||||
write = transport.write
|
||||
|
||||
buffer_store = Buffer(bytearray(connection.frame_max - 8))
|
||||
|
||||
def write_frame(type_, channel, method_sig, args, content):
|
||||
chunk_size = connection.frame_max - 8
|
||||
# frame_max can be updated via connection._on_tune. If
|
||||
# it became larger, then we need to resize the buffer
|
||||
# to prevent overflow.
|
||||
if chunk_size > len(buffer_store.buf):
|
||||
buffer_store.buf = bytearray(chunk_size)
|
||||
buf = buffer_store.buf
|
||||
view = buffer_store.view
|
||||
offset = 0
|
||||
properties = None
|
||||
args = str_to_bytes(args)
|
||||
if content:
|
||||
body = content.body
|
||||
if isinstance(body, str):
|
||||
encoding = content.properties.setdefault(
|
||||
'content_encoding', 'utf-8')
|
||||
body = body.encode(encoding)
|
||||
properties = content._serialize_properties()
|
||||
bodylen = len(body)
|
||||
properties_len = len(properties) or 0
|
||||
framelen = len(args) + properties_len + bodylen + FRAME_OVERHEAD
|
||||
bigbody = framelen > chunk_size
|
||||
else:
|
||||
body, bodylen, bigbody = None, 0, 0
|
||||
|
||||
if bigbody:
|
||||
# ## SLOW: string copy and write for every frame
|
||||
frame = (b''.join([pack('>HH', *method_sig), args])
|
||||
if type_ == 1 else b'') # encode method frame
|
||||
framelen = len(frame)
|
||||
write(pack('>BHI%dsB' % framelen,
|
||||
type_, channel, framelen, frame, 0xce))
|
||||
if body:
|
||||
frame = b''.join([
|
||||
pack('>HHQ', method_sig[0], 0, len(body)),
|
||||
properties,
|
||||
])
|
||||
framelen = len(frame)
|
||||
write(pack('>BHI%dsB' % framelen,
|
||||
2, channel, framelen, frame, 0xce))
|
||||
|
||||
for i in range(0, bodylen, chunk_size):
|
||||
frame = body[i:i + chunk_size]
|
||||
framelen = len(frame)
|
||||
write(pack('>BHI%dsB' % framelen,
|
||||
3, channel, framelen,
|
||||
frame, 0xce))
|
||||
|
||||
else:
|
||||
# ## FAST: pack into buffer and single write
|
||||
frame = (b''.join([pack('>HH', *method_sig), args])
|
||||
if type_ == 1 else b'')
|
||||
framelen = len(frame)
|
||||
pack_into('>BHI%dsB' % framelen, buf, offset,
|
||||
type_, channel, framelen, frame, 0xce)
|
||||
offset += 8 + framelen
|
||||
if body is not None:
|
||||
frame = b''.join([
|
||||
pack('>HHQ', method_sig[0], 0, len(body)),
|
||||
properties,
|
||||
])
|
||||
framelen = len(frame)
|
||||
|
||||
pack_into('>BHI%dsB' % framelen, buf, offset,
|
||||
2, channel, framelen, frame, 0xce)
|
||||
offset += 8 + framelen
|
||||
|
||||
bodylen = len(body)
|
||||
if bodylen > 0:
|
||||
framelen = bodylen
|
||||
pack_into('>BHI%dsB' % framelen, buf, offset,
|
||||
3, channel, framelen, body, 0xce)
|
||||
offset += 8 + framelen
|
||||
|
||||
write(view[:offset])
|
||||
|
||||
connection.bytes_sent += 1
|
||||
return write_frame
|
@ -0,0 +1,79 @@
|
||||
"""Platform compatibility."""
|
||||
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
# Jython does not have this attribute
|
||||
import typing
|
||||
|
||||
try:
|
||||
from socket import SOL_TCP
|
||||
except ImportError: # pragma: no cover
|
||||
from socket import IPPROTO_TCP as SOL_TCP # noqa
|
||||
|
||||
|
||||
RE_NUM = re.compile(r'(\d+).+')
|
||||
|
||||
|
||||
def _linux_version_to_tuple(s: str) -> typing.Tuple[int, int, int]:
|
||||
return tuple(map(_versionatom, s.split('.')[:3]))
|
||||
|
||||
|
||||
def _versionatom(s: str) -> int:
|
||||
if s.isdigit():
|
||||
return int(s)
|
||||
match = RE_NUM.match(s)
|
||||
return int(match.groups()[0]) if match else 0
|
||||
|
||||
|
||||
# available socket options for TCP level
|
||||
KNOWN_TCP_OPTS = {
|
||||
'TCP_CORK', 'TCP_DEFER_ACCEPT', 'TCP_KEEPCNT',
|
||||
'TCP_KEEPIDLE', 'TCP_KEEPINTVL', 'TCP_LINGER2',
|
||||
'TCP_MAXSEG', 'TCP_NODELAY', 'TCP_QUICKACK',
|
||||
'TCP_SYNCNT', 'TCP_USER_TIMEOUT', 'TCP_WINDOW_CLAMP',
|
||||
}
|
||||
|
||||
LINUX_VERSION = None
|
||||
if sys.platform.startswith('linux'):
|
||||
LINUX_VERSION = _linux_version_to_tuple(platform.release())
|
||||
if LINUX_VERSION < (2, 6, 37):
|
||||
KNOWN_TCP_OPTS.remove('TCP_USER_TIMEOUT')
|
||||
|
||||
# Windows Subsystem for Linux is an edge-case: the Python socket library
|
||||
# returns most TCP_* enums, but they aren't actually supported
|
||||
if platform.release().endswith("Microsoft"):
|
||||
KNOWN_TCP_OPTS = {'TCP_NODELAY', 'TCP_KEEPIDLE', 'TCP_KEEPINTVL',
|
||||
'TCP_KEEPCNT'}
|
||||
|
||||
elif sys.platform.startswith('darwin'):
|
||||
KNOWN_TCP_OPTS.remove('TCP_USER_TIMEOUT')
|
||||
|
||||
elif 'bsd' in sys.platform:
|
||||
KNOWN_TCP_OPTS.remove('TCP_USER_TIMEOUT')
|
||||
|
||||
# According to MSDN Windows platforms support getsockopt(TCP_MAXSSEG) but not
|
||||
# setsockopt(TCP_MAXSEG) on IPPROTO_TCP sockets.
|
||||
elif sys.platform.startswith('win'):
|
||||
KNOWN_TCP_OPTS = {'TCP_NODELAY'}
|
||||
|
||||
elif sys.platform.startswith('cygwin'):
|
||||
KNOWN_TCP_OPTS = {'TCP_NODELAY'}
|
||||
|
||||
# illumos does not allow to set the TCP_MAXSEG socket option,
|
||||
# even if the Oracle documentation says otherwise.
|
||||
# TCP_USER_TIMEOUT does not exist on Solaris 11.4
|
||||
elif sys.platform.startswith('sunos'):
|
||||
KNOWN_TCP_OPTS.remove('TCP_MAXSEG')
|
||||
KNOWN_TCP_OPTS.remove('TCP_USER_TIMEOUT')
|
||||
|
||||
# aix does not allow to set the TCP_MAXSEG
|
||||
# or the TCP_USER_TIMEOUT socket options.
|
||||
elif sys.platform.startswith('aix'):
|
||||
KNOWN_TCP_OPTS.remove('TCP_MAXSEG')
|
||||
KNOWN_TCP_OPTS.remove('TCP_USER_TIMEOUT')
|
||||
__all__ = (
|
||||
'LINUX_VERSION',
|
||||
'SOL_TCP',
|
||||
'KNOWN_TCP_OPTS',
|
||||
)
|
@ -0,0 +1,12 @@
|
||||
"""Protocol data."""
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
queue_declare_ok_t = namedtuple(
|
||||
'queue_declare_ok_t', ('queue', 'message_count', 'consumer_count'),
|
||||
)
|
||||
|
||||
basic_return_t = namedtuple(
|
||||
'basic_return_t',
|
||||
('reply_code', 'reply_text', 'exchange', 'routing_key', 'message'),
|
||||
)
|
@ -0,0 +1,174 @@
|
||||
"""SASL mechanisms for AMQP authentication."""
|
||||
|
||||
import socket
|
||||
import warnings
|
||||
from io import BytesIO
|
||||
|
||||
from amqp.serialization import _write_table
|
||||
|
||||
|
||||
class SASL:
|
||||
"""The base class for all amqp SASL authentication mechanisms.
|
||||
|
||||
You should sub-class this if you're implementing your own authentication.
|
||||
"""
|
||||
|
||||
@property
|
||||
def mechanism(self):
|
||||
"""Return a bytes containing the SASL mechanism name."""
|
||||
raise NotImplementedError
|
||||
|
||||
def start(self, connection):
|
||||
"""Return the first response to a SASL challenge as a bytes object."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class PLAIN(SASL):
|
||||
"""PLAIN SASL authentication mechanism.
|
||||
|
||||
See https://tools.ietf.org/html/rfc4616 for details
|
||||
"""
|
||||
|
||||
mechanism = b'PLAIN'
|
||||
|
||||
def __init__(self, username, password):
|
||||
self.username, self.password = username, password
|
||||
|
||||
def start(self, connection):
|
||||
if self.username is None or self.password is None:
|
||||
return NotImplemented
|
||||
login_response = BytesIO()
|
||||
login_response.write(b'\0')
|
||||
login_response.write(self.username.encode('utf-8'))
|
||||
login_response.write(b'\0')
|
||||
login_response.write(self.password.encode('utf-8'))
|
||||
return login_response.getvalue()
|
||||
|
||||
|
||||
class AMQPLAIN(SASL):
|
||||
"""AMQPLAIN SASL authentication mechanism.
|
||||
|
||||
This is a non-standard mechanism used by AMQP servers.
|
||||
"""
|
||||
|
||||
mechanism = b'AMQPLAIN'
|
||||
|
||||
def __init__(self, username, password):
|
||||
self.username, self.password = username, password
|
||||
|
||||
def start(self, connection):
|
||||
if self.username is None or self.password is None:
|
||||
return NotImplemented
|
||||
login_response = BytesIO()
|
||||
_write_table({b'LOGIN': self.username, b'PASSWORD': self.password},
|
||||
login_response.write, [])
|
||||
# Skip the length at the beginning
|
||||
return login_response.getvalue()[4:]
|
||||
|
||||
|
||||
def _get_gssapi_mechanism():
|
||||
try:
|
||||
import gssapi
|
||||
import gssapi.raw.misc # Fail if the old python-gssapi is installed
|
||||
except ImportError:
|
||||
class FakeGSSAPI(SASL):
|
||||
"""A no-op SASL mechanism for when gssapi isn't available."""
|
||||
|
||||
mechanism = None
|
||||
|
||||
def __init__(self, client_name=None, service=b'amqp',
|
||||
rdns=False, fail_soft=False):
|
||||
if not fail_soft:
|
||||
raise NotImplementedError(
|
||||
"You need to install the `gssapi` module for GSSAPI "
|
||||
"SASL support")
|
||||
|
||||
def start(self): # pragma: no cover
|
||||
return NotImplemented
|
||||
return FakeGSSAPI
|
||||
else:
|
||||
class GSSAPI(SASL):
|
||||
"""GSSAPI SASL authentication mechanism.
|
||||
|
||||
See https://tools.ietf.org/html/rfc4752 for details
|
||||
"""
|
||||
|
||||
mechanism = b'GSSAPI'
|
||||
|
||||
def __init__(self, client_name=None, service=b'amqp',
|
||||
rdns=False, fail_soft=False):
|
||||
if client_name and not isinstance(client_name, bytes):
|
||||
client_name = client_name.encode('ascii')
|
||||
self.client_name = client_name
|
||||
self.fail_soft = fail_soft
|
||||
self.service = service
|
||||
self.rdns = rdns
|
||||
|
||||
def get_hostname(self, connection):
|
||||
sock = connection.transport.sock
|
||||
if self.rdns and sock.family in (socket.AF_INET,
|
||||
socket.AF_INET6):
|
||||
peer = sock.getpeername()
|
||||
hostname, _, _ = socket.gethostbyaddr(peer[0])
|
||||
else:
|
||||
hostname = connection.transport.host
|
||||
if not isinstance(hostname, bytes):
|
||||
hostname = hostname.encode('ascii')
|
||||
return hostname
|
||||
|
||||
def start(self, connection):
|
||||
try:
|
||||
if self.client_name:
|
||||
creds = gssapi.Credentials(
|
||||
name=gssapi.Name(self.client_name))
|
||||
else:
|
||||
creds = None
|
||||
hostname = self.get_hostname(connection)
|
||||
name = gssapi.Name(b'@'.join([self.service, hostname]),
|
||||
gssapi.NameType.hostbased_service)
|
||||
context = gssapi.SecurityContext(name=name, creds=creds)
|
||||
return context.step(None)
|
||||
except gssapi.raw.misc.GSSError:
|
||||
if self.fail_soft:
|
||||
return NotImplemented
|
||||
else:
|
||||
raise
|
||||
return GSSAPI
|
||||
|
||||
|
||||
GSSAPI = _get_gssapi_mechanism()
|
||||
|
||||
|
||||
class EXTERNAL(SASL):
|
||||
"""EXTERNAL SASL mechanism.
|
||||
|
||||
Enables external authentication, i.e. not handled through this protocol.
|
||||
Only passes 'EXTERNAL' as authentication mechanism, but no further
|
||||
authentication data.
|
||||
"""
|
||||
|
||||
mechanism = b'EXTERNAL'
|
||||
|
||||
def start(self, connection):
|
||||
return b''
|
||||
|
||||
|
||||
class RAW(SASL):
|
||||
"""A generic custom SASL mechanism.
|
||||
|
||||
This mechanism takes a mechanism name and response to send to the server,
|
||||
so can be used for simple custom authentication schemes.
|
||||
"""
|
||||
|
||||
mechanism = None
|
||||
|
||||
def __init__(self, mechanism, response):
|
||||
assert isinstance(mechanism, bytes)
|
||||
assert isinstance(response, bytes)
|
||||
self.mechanism, self.response = mechanism, response
|
||||
warnings.warn("Passing login_method and login_response to Connection "
|
||||
"is deprecated. Please implement a SASL subclass "
|
||||
"instead.", DeprecationWarning)
|
||||
|
||||
def start(self, connection):
|
||||
return self.response
|
@ -0,0 +1,569 @@
|
||||
"""Convert between bytestreams and higher-level AMQP types.
|
||||
|
||||
2007-11-05 Barry Pederson <bp@barryp.org>
|
||||
|
||||
"""
|
||||
# Copyright (C) 2007 Barry Pederson <bp@barryp.org>
|
||||
|
||||
import calendar
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
from io import BytesIO
|
||||
from struct import pack, unpack_from
|
||||
|
||||
from .exceptions import FrameSyntaxError
|
||||
from .spec import Basic
|
||||
from .utils import bytes_to_str as pstr_t
|
||||
from .utils import str_to_bytes
|
||||
|
||||
ILLEGAL_TABLE_TYPE = """\
|
||||
Table type {0!r} not handled by amqp.
|
||||
"""
|
||||
|
||||
ILLEGAL_TABLE_TYPE_WITH_KEY = """\
|
||||
Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}]
|
||||
"""
|
||||
|
||||
ILLEGAL_TABLE_TYPE_WITH_VALUE = """\
|
||||
Table type {0!r} not handled by amqp. [value: {1!r}]
|
||||
"""
|
||||
|
||||
|
||||
def _read_item(buf, offset):
|
||||
ftype = chr(buf[offset])
|
||||
offset += 1
|
||||
|
||||
# 'S': long string
|
||||
if ftype == 'S':
|
||||
slen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
try:
|
||||
val = pstr_t(buf[offset:offset + slen])
|
||||
except UnicodeDecodeError:
|
||||
val = buf[offset:offset + slen]
|
||||
|
||||
offset += slen
|
||||
# 's': short string
|
||||
elif ftype == 's':
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
val = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
# 'x': Bytes Array
|
||||
elif ftype == 'x':
|
||||
blen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
val = buf[offset:offset + blen]
|
||||
offset += blen
|
||||
# 'b': short-short int
|
||||
elif ftype == 'b':
|
||||
val, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
# 'B': short-short unsigned int
|
||||
elif ftype == 'B':
|
||||
val, = unpack_from('>b', buf, offset)
|
||||
offset += 1
|
||||
# 'U': short int
|
||||
elif ftype == 'U':
|
||||
val, = unpack_from('>h', buf, offset)
|
||||
offset += 2
|
||||
# 'u': short unsigned int
|
||||
elif ftype == 'u':
|
||||
val, = unpack_from('>H', buf, offset)
|
||||
offset += 2
|
||||
# 'I': long int
|
||||
elif ftype == 'I':
|
||||
val, = unpack_from('>i', buf, offset)
|
||||
offset += 4
|
||||
# 'i': long unsigned int
|
||||
elif ftype == 'i':
|
||||
val, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
# 'L': long long int
|
||||
elif ftype == 'L':
|
||||
val, = unpack_from('>q', buf, offset)
|
||||
offset += 8
|
||||
# 'l': long long unsigned int
|
||||
elif ftype == 'l':
|
||||
val, = unpack_from('>Q', buf, offset)
|
||||
offset += 8
|
||||
# 'f': float
|
||||
elif ftype == 'f':
|
||||
val, = unpack_from('>f', buf, offset)
|
||||
offset += 4
|
||||
# 'd': double
|
||||
elif ftype == 'd':
|
||||
val, = unpack_from('>d', buf, offset)
|
||||
offset += 8
|
||||
# 'D': decimal
|
||||
elif ftype == 'D':
|
||||
d, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
n, = unpack_from('>i', buf, offset)
|
||||
offset += 4
|
||||
val = Decimal(n) / Decimal(10 ** d)
|
||||
# 'F': table
|
||||
elif ftype == 'F':
|
||||
tlen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
limit = offset + tlen
|
||||
val = {}
|
||||
while offset < limit:
|
||||
keylen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
key = pstr_t(buf[offset:offset + keylen])
|
||||
offset += keylen
|
||||
val[key], offset = _read_item(buf, offset)
|
||||
# 'A': array
|
||||
elif ftype == 'A':
|
||||
alen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
limit = offset + alen
|
||||
val = []
|
||||
while offset < limit:
|
||||
v, offset = _read_item(buf, offset)
|
||||
val.append(v)
|
||||
# 't' (bool)
|
||||
elif ftype == 't':
|
||||
val, = unpack_from('>B', buf, offset)
|
||||
val = bool(val)
|
||||
offset += 1
|
||||
# 'T': timestamp
|
||||
elif ftype == 'T':
|
||||
val, = unpack_from('>Q', buf, offset)
|
||||
offset += 8
|
||||
val = datetime.utcfromtimestamp(val)
|
||||
# 'V': void
|
||||
elif ftype == 'V':
|
||||
val = None
|
||||
else:
|
||||
raise FrameSyntaxError(
|
||||
'Unknown value in table: {!r} ({!r})'.format(
|
||||
ftype, type(ftype)))
|
||||
return val, offset
|
||||
|
||||
|
||||
def loads(format, buf, offset):
|
||||
"""Deserialize amqp format.
|
||||
|
||||
bit = b
|
||||
octet = o
|
||||
short = B
|
||||
long = l
|
||||
long long = L
|
||||
float = f
|
||||
shortstr = s
|
||||
longstr = S
|
||||
table = F
|
||||
array = A
|
||||
timestamp = T
|
||||
"""
|
||||
bitcount = bits = 0
|
||||
|
||||
values = []
|
||||
append = values.append
|
||||
format = pstr_t(format)
|
||||
|
||||
for p in format:
|
||||
if p == 'b':
|
||||
if not bitcount:
|
||||
bits = ord(buf[offset:offset + 1])
|
||||
offset += 1
|
||||
bitcount = 8
|
||||
val = (bits & 1) == 1
|
||||
bits >>= 1
|
||||
bitcount -= 1
|
||||
elif p == 'o':
|
||||
bitcount = bits = 0
|
||||
val, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
elif p == 'B':
|
||||
bitcount = bits = 0
|
||||
val, = unpack_from('>H', buf, offset)
|
||||
offset += 2
|
||||
elif p == 'l':
|
||||
bitcount = bits = 0
|
||||
val, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
elif p == 'L':
|
||||
bitcount = bits = 0
|
||||
val, = unpack_from('>Q', buf, offset)
|
||||
offset += 8
|
||||
elif p == 'f':
|
||||
bitcount = bits = 0
|
||||
val, = unpack_from('>f', buf, offset)
|
||||
offset += 4
|
||||
elif p == 's':
|
||||
bitcount = bits = 0
|
||||
slen, = unpack_from('B', buf, offset)
|
||||
offset += 1
|
||||
val = buf[offset:offset + slen].decode('utf-8', 'surrogatepass')
|
||||
offset += slen
|
||||
elif p == 'S':
|
||||
bitcount = bits = 0
|
||||
slen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
val = buf[offset:offset + slen].decode('utf-8', 'surrogatepass')
|
||||
offset += slen
|
||||
elif p == 'x':
|
||||
blen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
val = buf[offset:offset + blen]
|
||||
offset += blen
|
||||
elif p == 'F':
|
||||
bitcount = bits = 0
|
||||
tlen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
limit = offset + tlen
|
||||
val = {}
|
||||
while offset < limit:
|
||||
keylen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
key = pstr_t(buf[offset:offset + keylen])
|
||||
offset += keylen
|
||||
val[key], offset = _read_item(buf, offset)
|
||||
elif p == 'A':
|
||||
bitcount = bits = 0
|
||||
alen, = unpack_from('>I', buf, offset)
|
||||
offset += 4
|
||||
limit = offset + alen
|
||||
val = []
|
||||
while offset < limit:
|
||||
aval, offset = _read_item(buf, offset)
|
||||
val.append(aval)
|
||||
elif p == 'T':
|
||||
bitcount = bits = 0
|
||||
val, = unpack_from('>Q', buf, offset)
|
||||
offset += 8
|
||||
val = datetime.utcfromtimestamp(val)
|
||||
else:
|
||||
raise FrameSyntaxError(ILLEGAL_TABLE_TYPE.format(p))
|
||||
append(val)
|
||||
return values, offset
|
||||
|
||||
|
||||
def _flushbits(bits, write):
|
||||
if bits:
|
||||
write(pack('B' * len(bits), *bits))
|
||||
bits[:] = []
|
||||
return 0
|
||||
|
||||
|
||||
def dumps(format, values):
|
||||
"""Serialize AMQP arguments.
|
||||
|
||||
Notes:
|
||||
bit = b
|
||||
octet = o
|
||||
short = B
|
||||
long = l
|
||||
long long = L
|
||||
shortstr = s
|
||||
longstr = S
|
||||
byte array = x
|
||||
table = F
|
||||
array = A
|
||||
"""
|
||||
bitcount = 0
|
||||
bits = []
|
||||
out = BytesIO()
|
||||
write = out.write
|
||||
|
||||
format = pstr_t(format)
|
||||
|
||||
for i, val in enumerate(values):
|
||||
p = format[i]
|
||||
if p == 'b':
|
||||
val = 1 if val else 0
|
||||
shift = bitcount % 8
|
||||
if shift == 0:
|
||||
bits.append(0)
|
||||
bits[-1] |= (val << shift)
|
||||
bitcount += 1
|
||||
elif p == 'o':
|
||||
bitcount = _flushbits(bits, write)
|
||||
write(pack('B', val))
|
||||
elif p == 'B':
|
||||
bitcount = _flushbits(bits, write)
|
||||
write(pack('>H', int(val)))
|
||||
elif p == 'l':
|
||||
bitcount = _flushbits(bits, write)
|
||||
write(pack('>I', val))
|
||||
elif p == 'L':
|
||||
bitcount = _flushbits(bits, write)
|
||||
write(pack('>Q', val))
|
||||
elif p == 'f':
|
||||
bitcount = _flushbits(bits, write)
|
||||
write(pack('>f', val))
|
||||
elif p == 's':
|
||||
val = val or ''
|
||||
bitcount = _flushbits(bits, write)
|
||||
if isinstance(val, str):
|
||||
val = val.encode('utf-8', 'surrogatepass')
|
||||
write(pack('B', len(val)))
|
||||
write(val)
|
||||
elif p == 'S' or p == 'x':
|
||||
val = val or ''
|
||||
bitcount = _flushbits(bits, write)
|
||||
if isinstance(val, str):
|
||||
val = val.encode('utf-8', 'surrogatepass')
|
||||
write(pack('>I', len(val)))
|
||||
write(val)
|
||||
elif p == 'F':
|
||||
bitcount = _flushbits(bits, write)
|
||||
_write_table(val or {}, write, bits)
|
||||
elif p == 'A':
|
||||
bitcount = _flushbits(bits, write)
|
||||
_write_array(val or [], write, bits)
|
||||
elif p == 'T':
|
||||
write(pack('>Q', int(calendar.timegm(val.utctimetuple()))))
|
||||
_flushbits(bits, write)
|
||||
|
||||
return out.getvalue()
|
||||
|
||||
|
||||
def _write_table(d, write, bits):
|
||||
out = BytesIO()
|
||||
twrite = out.write
|
||||
for k, v in d.items():
|
||||
if isinstance(k, str):
|
||||
k = k.encode('utf-8', 'surrogatepass')
|
||||
twrite(pack('B', len(k)))
|
||||
twrite(k)
|
||||
try:
|
||||
_write_item(v, twrite, bits)
|
||||
except ValueError:
|
||||
raise FrameSyntaxError(
|
||||
ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v))
|
||||
table_data = out.getvalue()
|
||||
write(pack('>I', len(table_data)))
|
||||
write(table_data)
|
||||
|
||||
|
||||
def _write_array(list_, write, bits):
|
||||
out = BytesIO()
|
||||
awrite = out.write
|
||||
for v in list_:
|
||||
try:
|
||||
_write_item(v, awrite, bits)
|
||||
except ValueError:
|
||||
raise FrameSyntaxError(
|
||||
ILLEGAL_TABLE_TYPE_WITH_VALUE.format(type(v), v))
|
||||
array_data = out.getvalue()
|
||||
write(pack('>I', len(array_data)))
|
||||
write(array_data)
|
||||
|
||||
|
||||
def _write_item(v, write, bits):
|
||||
if isinstance(v, (str, bytes)):
|
||||
if isinstance(v, str):
|
||||
v = v.encode('utf-8', 'surrogatepass')
|
||||
write(pack('>cI', b'S', len(v)))
|
||||
write(v)
|
||||
elif isinstance(v, bool):
|
||||
write(pack('>cB', b't', int(v)))
|
||||
elif isinstance(v, float):
|
||||
write(pack('>cd', b'd', v))
|
||||
elif isinstance(v, int):
|
||||
if v > 2147483647 or v < -2147483647:
|
||||
write(pack('>cq', b'L', v))
|
||||
else:
|
||||
write(pack('>ci', b'I', v))
|
||||
elif isinstance(v, Decimal):
|
||||
sign, digits, exponent = v.as_tuple()
|
||||
v = 0
|
||||
for d in digits:
|
||||
v = (v * 10) + d
|
||||
if sign:
|
||||
v = -v
|
||||
write(pack('>cBi', b'D', -exponent, v))
|
||||
elif isinstance(v, datetime):
|
||||
write(
|
||||
pack('>cQ', b'T', int(calendar.timegm(v.utctimetuple()))))
|
||||
elif isinstance(v, dict):
|
||||
write(b'F')
|
||||
_write_table(v, write, bits)
|
||||
elif isinstance(v, (list, tuple)):
|
||||
write(b'A')
|
||||
_write_array(v, write, bits)
|
||||
elif v is None:
|
||||
write(b'V')
|
||||
else:
|
||||
raise ValueError()
|
||||
|
||||
|
||||
def decode_properties_basic(buf, offset):
|
||||
"""Decode basic properties."""
|
||||
properties = {}
|
||||
|
||||
flags, = unpack_from('>H', buf, offset)
|
||||
offset += 2
|
||||
|
||||
if flags & 0x8000:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['content_type'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x4000:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['content_encoding'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x2000:
|
||||
_f, offset = loads('F', buf, offset)
|
||||
properties['application_headers'], = _f
|
||||
if flags & 0x1000:
|
||||
properties['delivery_mode'], = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
if flags & 0x0800:
|
||||
properties['priority'], = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
if flags & 0x0400:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['correlation_id'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0200:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['reply_to'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0100:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['expiration'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0080:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['message_id'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0040:
|
||||
properties['timestamp'], = unpack_from('>Q', buf, offset)
|
||||
offset += 8
|
||||
if flags & 0x0020:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['type'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0010:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['user_id'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0008:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['app_id'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
if flags & 0x0004:
|
||||
slen, = unpack_from('>B', buf, offset)
|
||||
offset += 1
|
||||
properties['cluster_id'] = pstr_t(buf[offset:offset + slen])
|
||||
offset += slen
|
||||
return properties, offset
|
||||
|
||||
|
||||
PROPERTY_CLASSES = {
|
||||
Basic.CLASS_ID: decode_properties_basic,
|
||||
}
|
||||
|
||||
|
||||
class GenericContent:
|
||||
"""Abstract base class for AMQP content.
|
||||
|
||||
Subclasses should override the PROPERTIES attribute.
|
||||
"""
|
||||
|
||||
CLASS_ID = None
|
||||
PROPERTIES = [('dummy', 's')]
|
||||
|
||||
def __init__(self, frame_method=None, frame_args=None, **props):
|
||||
self.frame_method = frame_method
|
||||
self.frame_args = frame_args
|
||||
|
||||
self.properties = props
|
||||
self._pending_chunks = []
|
||||
self.body_received = 0
|
||||
self.body_size = 0
|
||||
self.ready = False
|
||||
|
||||
def __getattr__(self, name):
|
||||
# Look for additional properties in the 'properties'
|
||||
# dictionary, and if present - the 'delivery_info' dictionary.
|
||||
if name == '__setstate__':
|
||||
# Allows pickling/unpickling to work
|
||||
raise AttributeError('__setstate__')
|
||||
|
||||
if name in self.properties:
|
||||
return self.properties[name]
|
||||
raise AttributeError(name)
|
||||
|
||||
def _load_properties(self, class_id, buf, offset):
|
||||
"""Load AMQP properties.
|
||||
|
||||
Given the raw bytes containing the property-flags and property-list
|
||||
from a content-frame-header, parse and insert into a dictionary
|
||||
stored in this object as an attribute named 'properties'.
|
||||
"""
|
||||
# Read 16-bit shorts until we get one with a low bit set to zero
|
||||
props, offset = PROPERTY_CLASSES[class_id](buf, offset)
|
||||
self.properties = props
|
||||
return offset
|
||||
|
||||
def _serialize_properties(self):
|
||||
"""Serialize AMQP properties.
|
||||
|
||||
Serialize the 'properties' attribute (a dictionary) into
|
||||
the raw bytes making up a set of property flags and a
|
||||
property list, suitable for putting into a content frame header.
|
||||
"""
|
||||
shift = 15
|
||||
flag_bits = 0
|
||||
flags = []
|
||||
sformat, svalues = [], []
|
||||
props = self.properties
|
||||
for key, proptype in self.PROPERTIES:
|
||||
val = props.get(key, None)
|
||||
if val is not None:
|
||||
if shift == 0:
|
||||
flags.append(flag_bits)
|
||||
flag_bits = 0
|
||||
shift = 15
|
||||
|
||||
flag_bits |= (1 << shift)
|
||||
if proptype != 'bit':
|
||||
sformat.append(str_to_bytes(proptype))
|
||||
svalues.append(val)
|
||||
|
||||
shift -= 1
|
||||
flags.append(flag_bits)
|
||||
result = BytesIO()
|
||||
write = result.write
|
||||
for flag_bits in flags:
|
||||
write(pack('>H', flag_bits))
|
||||
write(dumps(b''.join(sformat), svalues))
|
||||
|
||||
return result.getvalue()
|
||||
|
||||
def inbound_header(self, buf, offset=0):
|
||||
class_id, self.body_size = unpack_from('>HxxQ', buf, offset)
|
||||
offset += 12
|
||||
self._load_properties(class_id, buf, offset)
|
||||
if not self.body_size:
|
||||
self.ready = True
|
||||
return offset
|
||||
|
||||
def inbound_body(self, buf):
|
||||
chunks = self._pending_chunks
|
||||
self.body_received += len(buf)
|
||||
if self.body_received >= self.body_size:
|
||||
if chunks:
|
||||
chunks.append(buf)
|
||||
self.body = bytes().join(chunks)
|
||||
chunks[:] = []
|
||||
else:
|
||||
self.body = buf
|
||||
self.ready = True
|
||||
else:
|
||||
chunks.append(buf)
|
@ -0,0 +1,121 @@
|
||||
"""AMQP Spec."""
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
method_t = namedtuple('method_t', ('method_sig', 'args', 'content'))
|
||||
|
||||
|
||||
def method(method_sig, args=None, content=False):
|
||||
"""Create amqp method specification tuple."""
|
||||
return method_t(method_sig, args, content)
|
||||
|
||||
|
||||
class Connection:
|
||||
"""AMQ Connection class."""
|
||||
|
||||
CLASS_ID = 10
|
||||
|
||||
Start = (10, 10)
|
||||
StartOk = (10, 11)
|
||||
Secure = (10, 20)
|
||||
SecureOk = (10, 21)
|
||||
Tune = (10, 30)
|
||||
TuneOk = (10, 31)
|
||||
Open = (10, 40)
|
||||
OpenOk = (10, 41)
|
||||
Close = (10, 50)
|
||||
CloseOk = (10, 51)
|
||||
Blocked = (10, 60)
|
||||
Unblocked = (10, 61)
|
||||
|
||||
|
||||
class Channel:
|
||||
"""AMQ Channel class."""
|
||||
|
||||
CLASS_ID = 20
|
||||
|
||||
Open = (20, 10)
|
||||
OpenOk = (20, 11)
|
||||
Flow = (20, 20)
|
||||
FlowOk = (20, 21)
|
||||
Close = (20, 40)
|
||||
CloseOk = (20, 41)
|
||||
|
||||
|
||||
class Exchange:
|
||||
"""AMQ Exchange class."""
|
||||
|
||||
CLASS_ID = 40
|
||||
|
||||
Declare = (40, 10)
|
||||
DeclareOk = (40, 11)
|
||||
Delete = (40, 20)
|
||||
DeleteOk = (40, 21)
|
||||
Bind = (40, 30)
|
||||
BindOk = (40, 31)
|
||||
Unbind = (40, 40)
|
||||
UnbindOk = (40, 51)
|
||||
|
||||
|
||||
class Queue:
|
||||
"""AMQ Queue class."""
|
||||
|
||||
CLASS_ID = 50
|
||||
|
||||
Declare = (50, 10)
|
||||
DeclareOk = (50, 11)
|
||||
Bind = (50, 20)
|
||||
BindOk = (50, 21)
|
||||
Purge = (50, 30)
|
||||
PurgeOk = (50, 31)
|
||||
Delete = (50, 40)
|
||||
DeleteOk = (50, 41)
|
||||
Unbind = (50, 50)
|
||||
UnbindOk = (50, 51)
|
||||
|
||||
|
||||
class Basic:
|
||||
"""AMQ Basic class."""
|
||||
|
||||
CLASS_ID = 60
|
||||
|
||||
Qos = (60, 10)
|
||||
QosOk = (60, 11)
|
||||
Consume = (60, 20)
|
||||
ConsumeOk = (60, 21)
|
||||
Cancel = (60, 30)
|
||||
CancelOk = (60, 31)
|
||||
Publish = (60, 40)
|
||||
Return = (60, 50)
|
||||
Deliver = (60, 60)
|
||||
Get = (60, 70)
|
||||
GetOk = (60, 71)
|
||||
GetEmpty = (60, 72)
|
||||
Ack = (60, 80)
|
||||
Nack = (60, 120)
|
||||
Reject = (60, 90)
|
||||
RecoverAsync = (60, 100)
|
||||
Recover = (60, 110)
|
||||
RecoverOk = (60, 111)
|
||||
|
||||
|
||||
class Confirm:
|
||||
"""AMQ Confirm class."""
|
||||
|
||||
CLASS_ID = 85
|
||||
|
||||
Select = (85, 10)
|
||||
SelectOk = (85, 11)
|
||||
|
||||
|
||||
class Tx:
|
||||
"""AMQ Tx class."""
|
||||
|
||||
CLASS_ID = 90
|
||||
|
||||
Select = (90, 10)
|
||||
SelectOk = (90, 11)
|
||||
Commit = (90, 20)
|
||||
CommitOk = (90, 21)
|
||||
Rollback = (90, 30)
|
||||
RollbackOk = (90, 31)
|
@ -0,0 +1,675 @@
|
||||
"""Transport implementation."""
|
||||
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
|
||||
|
||||
import errno
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
from contextlib import contextmanager
|
||||
from ssl import SSLError
|
||||
from struct import pack, unpack
|
||||
|
||||
from .exceptions import UnexpectedFrame
|
||||
from .platform import KNOWN_TCP_OPTS, SOL_TCP
|
||||
from .utils import set_cloexec
|
||||
|
||||
_UNAVAIL = {errno.EAGAIN, errno.EINTR, errno.ENOENT, errno.EWOULDBLOCK}
|
||||
|
||||
AMQP_PORT = 5672
|
||||
|
||||
EMPTY_BUFFER = bytes()
|
||||
|
||||
SIGNED_INT_MAX = 0x7FFFFFFF
|
||||
|
||||
# Yes, Advanced Message Queuing Protocol Protocol is redundant
|
||||
AMQP_PROTOCOL_HEADER = b'AMQP\x00\x00\x09\x01'
|
||||
|
||||
# Match things like: [fe80::1]:5432, from RFC 2732
|
||||
IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?')
|
||||
|
||||
DEFAULT_SOCKET_SETTINGS = {
|
||||
'TCP_NODELAY': 1,
|
||||
'TCP_USER_TIMEOUT': 1000,
|
||||
'TCP_KEEPIDLE': 60,
|
||||
'TCP_KEEPINTVL': 10,
|
||||
'TCP_KEEPCNT': 9,
|
||||
}
|
||||
|
||||
|
||||
def to_host_port(host, default=AMQP_PORT):
|
||||
"""Convert hostname:port string to host, port tuple."""
|
||||
port = default
|
||||
m = IPV6_LITERAL.match(host)
|
||||
if m:
|
||||
host = m.group(1)
|
||||
if m.group(2):
|
||||
port = int(m.group(2))
|
||||
else:
|
||||
if ':' in host:
|
||||
host, port = host.rsplit(':', 1)
|
||||
port = int(port)
|
||||
return host, port
|
||||
|
||||
|
||||
class _AbstractTransport:
|
||||
"""Common superclass for TCP and SSL transports.
|
||||
|
||||
PARAMETERS:
|
||||
host: str
|
||||
|
||||
Broker address in format ``HOSTNAME:PORT``.
|
||||
|
||||
connect_timeout: int
|
||||
|
||||
Timeout of creating new connection.
|
||||
|
||||
read_timeout: int
|
||||
|
||||
sets ``SO_RCVTIMEO`` parameter of socket.
|
||||
|
||||
write_timeout: int
|
||||
|
||||
sets ``SO_SNDTIMEO`` parameter of socket.
|
||||
|
||||
socket_settings: dict
|
||||
|
||||
dictionary containing `optname` and ``optval`` passed to
|
||||
``setsockopt(2)``.
|
||||
|
||||
raise_on_initial_eintr: bool
|
||||
|
||||
when True, ``socket.timeout`` is raised
|
||||
when exception is received during first read. See ``_read()`` for
|
||||
details.
|
||||
"""
|
||||
|
||||
def __init__(self, host, connect_timeout=None,
|
||||
read_timeout=None, write_timeout=None,
|
||||
socket_settings=None, raise_on_initial_eintr=True, **kwargs):
|
||||
self.connected = False
|
||||
self.sock = None
|
||||
self.raise_on_initial_eintr = raise_on_initial_eintr
|
||||
self._read_buffer = EMPTY_BUFFER
|
||||
self.host, self.port = to_host_port(host)
|
||||
self.connect_timeout = connect_timeout
|
||||
self.read_timeout = read_timeout
|
||||
self.write_timeout = write_timeout
|
||||
self.socket_settings = socket_settings
|
||||
|
||||
def __repr__(self):
|
||||
if self.sock:
|
||||
src = f'{self.sock.getsockname()[0]}:{self.sock.getsockname()[1]}'
|
||||
dst = f'{self.sock.getpeername()[0]}:{self.sock.getpeername()[1]}'
|
||||
return f'<{type(self).__name__}: {src} -> {dst} at {id(self):#x}>'
|
||||
else:
|
||||
return f'<{type(self).__name__}: (disconnected) at {id(self):#x}>'
|
||||
|
||||
def connect(self):
|
||||
try:
|
||||
# are we already connected?
|
||||
if self.connected:
|
||||
return
|
||||
self._connect(self.host, self.port, self.connect_timeout)
|
||||
self._init_socket(
|
||||
self.socket_settings, self.read_timeout, self.write_timeout,
|
||||
)
|
||||
# we've sent the banner; signal connect
|
||||
# EINTR, EAGAIN, EWOULDBLOCK would signal that the banner
|
||||
# has _not_ been sent
|
||||
self.connected = True
|
||||
except (OSError, SSLError):
|
||||
# if not fully connected, close socket, and reraise error
|
||||
if self.sock and not self.connected:
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
raise
|
||||
|
||||
@contextmanager
|
||||
def having_timeout(self, timeout):
|
||||
if timeout is None:
|
||||
yield self.sock
|
||||
else:
|
||||
sock = self.sock
|
||||
prev = sock.gettimeout()
|
||||
if prev != timeout:
|
||||
sock.settimeout(timeout)
|
||||
try:
|
||||
yield self.sock
|
||||
except SSLError as exc:
|
||||
if 'timed out' in str(exc):
|
||||
# http://bugs.python.org/issue10272
|
||||
raise socket.timeout()
|
||||
elif 'The operation did not complete' in str(exc):
|
||||
# Non-blocking SSL sockets can throw SSLError
|
||||
raise socket.timeout()
|
||||
raise
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EWOULDBLOCK:
|
||||
raise socket.timeout()
|
||||
raise
|
||||
finally:
|
||||
if timeout != prev:
|
||||
sock.settimeout(prev)
|
||||
|
||||
def _connect(self, host, port, timeout):
|
||||
e = None
|
||||
|
||||
# Below we are trying to avoid additional DNS requests for AAAA if A
|
||||
# succeeds. This helps a lot in case when a hostname has an IPv4 entry
|
||||
# in /etc/hosts but not IPv6. Without the (arguably somewhat twisted)
|
||||
# logic below, getaddrinfo would attempt to resolve the hostname for
|
||||
# both IP versions, which would make the resolver talk to configured
|
||||
# DNS servers. If those servers are for some reason not available
|
||||
# during resolution attempt (either because of system misconfiguration,
|
||||
# or network connectivity problem), resolution process locks the
|
||||
# _connect call for extended time.
|
||||
addr_types = (socket.AF_INET, socket.AF_INET6)
|
||||
addr_types_num = len(addr_types)
|
||||
for n, family in enumerate(addr_types):
|
||||
# first, resolve the address for a single address family
|
||||
try:
|
||||
entries = socket.getaddrinfo(
|
||||
host, port, family, socket.SOCK_STREAM, SOL_TCP)
|
||||
entries_num = len(entries)
|
||||
except socket.gaierror:
|
||||
# we may have depleted all our options
|
||||
if n + 1 >= addr_types_num:
|
||||
# if getaddrinfo succeeded before for another address
|
||||
# family, reraise the previous socket.error since it's more
|
||||
# relevant to users
|
||||
raise (e
|
||||
if e is not None
|
||||
else socket.error(
|
||||
"failed to resolve broker hostname"))
|
||||
continue # pragma: no cover
|
||||
|
||||
# now that we have address(es) for the hostname, connect to broker
|
||||
for i, res in enumerate(entries):
|
||||
af, socktype, proto, _, sa = res
|
||||
try:
|
||||
self.sock = socket.socket(af, socktype, proto)
|
||||
try:
|
||||
set_cloexec(self.sock, True)
|
||||
except NotImplementedError:
|
||||
pass
|
||||
self.sock.settimeout(timeout)
|
||||
self.sock.connect(sa)
|
||||
except OSError as ex:
|
||||
e = ex
|
||||
if self.sock is not None:
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
# we may have depleted all our options
|
||||
if i + 1 >= entries_num and n + 1 >= addr_types_num:
|
||||
raise
|
||||
else:
|
||||
# hurray, we established connection
|
||||
return
|
||||
|
||||
def _init_socket(self, socket_settings, read_timeout, write_timeout):
|
||||
self.sock.settimeout(None) # set socket back to blocking mode
|
||||
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
self._set_socket_options(socket_settings)
|
||||
|
||||
# set socket timeouts
|
||||
for timeout, interval in ((socket.SO_SNDTIMEO, write_timeout),
|
||||
(socket.SO_RCVTIMEO, read_timeout)):
|
||||
if interval is not None:
|
||||
sec = int(interval)
|
||||
usec = int((interval - sec) * 1000000)
|
||||
self.sock.setsockopt(
|
||||
socket.SOL_SOCKET, timeout,
|
||||
pack('ll', sec, usec),
|
||||
)
|
||||
self._setup_transport()
|
||||
|
||||
self._write(AMQP_PROTOCOL_HEADER)
|
||||
|
||||
def _get_tcp_socket_defaults(self, sock):
|
||||
tcp_opts = {}
|
||||
for opt in KNOWN_TCP_OPTS:
|
||||
enum = None
|
||||
if opt == 'TCP_USER_TIMEOUT':
|
||||
try:
|
||||
from socket import TCP_USER_TIMEOUT as enum
|
||||
except ImportError:
|
||||
# should be in Python 3.6+ on Linux.
|
||||
enum = 18
|
||||
elif hasattr(socket, opt):
|
||||
enum = getattr(socket, opt)
|
||||
|
||||
if enum:
|
||||
if opt in DEFAULT_SOCKET_SETTINGS:
|
||||
tcp_opts[enum] = DEFAULT_SOCKET_SETTINGS[opt]
|
||||
elif hasattr(socket, opt):
|
||||
tcp_opts[enum] = sock.getsockopt(
|
||||
SOL_TCP, getattr(socket, opt))
|
||||
return tcp_opts
|
||||
|
||||
def _set_socket_options(self, socket_settings):
|
||||
tcp_opts = self._get_tcp_socket_defaults(self.sock)
|
||||
if socket_settings:
|
||||
tcp_opts.update(socket_settings)
|
||||
for opt, val in tcp_opts.items():
|
||||
self.sock.setsockopt(SOL_TCP, opt, val)
|
||||
|
||||
def _read(self, n, initial=False):
|
||||
"""Read exactly n bytes from the peer."""
|
||||
raise NotImplementedError('Must be overriden in subclass')
|
||||
|
||||
def _setup_transport(self):
|
||||
"""Do any additional initialization of the class."""
|
||||
pass
|
||||
|
||||
def _shutdown_transport(self):
|
||||
"""Do any preliminary work in shutting down the connection."""
|
||||
pass
|
||||
|
||||
def _write(self, s):
|
||||
"""Completely write a string to the peer."""
|
||||
raise NotImplementedError('Must be overriden in subclass')
|
||||
|
||||
def close(self):
|
||||
if self.sock is not None:
|
||||
self._shutdown_transport()
|
||||
# Call shutdown first to make sure that pending messages
|
||||
# reach the AMQP broker if the program exits after
|
||||
# calling this method.
|
||||
self.sock.shutdown(socket.SHUT_RDWR)
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
|
||||
def read_frame(self, unpack=unpack):
|
||||
"""Parse AMQP frame.
|
||||
|
||||
Frame has following format::
|
||||
|
||||
0 1 3 7 size+7 size+8
|
||||
+------+---------+---------+ +-------------+ +-----------+
|
||||
| type | channel | size | | payload | | frame-end |
|
||||
+------+---------+---------+ +-------------+ +-----------+
|
||||
octet short long 'size' octets octet
|
||||
|
||||
"""
|
||||
read = self._read
|
||||
read_frame_buffer = EMPTY_BUFFER
|
||||
try:
|
||||
frame_header = read(7, True)
|
||||
read_frame_buffer += frame_header
|
||||
frame_type, channel, size = unpack('>BHI', frame_header)
|
||||
# >I is an unsigned int, but the argument to sock.recv is signed,
|
||||
# so we know the size can be at most 2 * SIGNED_INT_MAX
|
||||
if size > SIGNED_INT_MAX:
|
||||
part1 = read(SIGNED_INT_MAX)
|
||||
|
||||
try:
|
||||
part2 = read(size - SIGNED_INT_MAX)
|
||||
except (socket.timeout, OSError, SSLError):
|
||||
# In case this read times out, we need to make sure to not
|
||||
# lose part1 when we retry the read
|
||||
read_frame_buffer += part1
|
||||
raise
|
||||
|
||||
payload = b''.join([part1, part2])
|
||||
else:
|
||||
payload = read(size)
|
||||
read_frame_buffer += payload
|
||||
frame_end = ord(read(1))
|
||||
except socket.timeout:
|
||||
self._read_buffer = read_frame_buffer + self._read_buffer
|
||||
raise
|
||||
except (OSError, SSLError) as exc:
|
||||
if (
|
||||
isinstance(exc, socket.error) and os.name == 'nt'
|
||||
and exc.errno == errno.EWOULDBLOCK # noqa
|
||||
):
|
||||
# On windows we can get a read timeout with a winsock error
|
||||
# code instead of a proper socket.timeout() error, see
|
||||
# https://github.com/celery/py-amqp/issues/320
|
||||
self._read_buffer = read_frame_buffer + self._read_buffer
|
||||
raise socket.timeout()
|
||||
|
||||
if isinstance(exc, SSLError) and 'timed out' in str(exc):
|
||||
# Don't disconnect for ssl read time outs
|
||||
# http://bugs.python.org/issue10272
|
||||
self._read_buffer = read_frame_buffer + self._read_buffer
|
||||
raise socket.timeout()
|
||||
|
||||
if exc.errno not in _UNAVAIL:
|
||||
self.connected = False
|
||||
raise
|
||||
# frame-end octet must contain '\xce' value
|
||||
if frame_end == 206:
|
||||
return frame_type, channel, payload
|
||||
else:
|
||||
raise UnexpectedFrame(
|
||||
f'Received frame_end {frame_end:#04x} while expecting 0xce')
|
||||
|
||||
def write(self, s):
|
||||
try:
|
||||
self._write(s)
|
||||
except socket.timeout:
|
||||
raise
|
||||
except OSError as exc:
|
||||
if exc.errno not in _UNAVAIL:
|
||||
self.connected = False
|
||||
raise
|
||||
|
||||
|
||||
class SSLTransport(_AbstractTransport):
|
||||
"""Transport that works over SSL.
|
||||
|
||||
PARAMETERS:
|
||||
host: str
|
||||
|
||||
Broker address in format ``HOSTNAME:PORT``.
|
||||
|
||||
connect_timeout: int
|
||||
|
||||
Timeout of creating new connection.
|
||||
|
||||
ssl: bool|dict
|
||||
|
||||
parameters of TLS subsystem.
|
||||
- when ``ssl`` is not dictionary, defaults of TLS are used
|
||||
- otherwise:
|
||||
- if ``ssl`` dictionary contains ``context`` key,
|
||||
:attr:`~SSLTransport._wrap_context` is used for wrapping
|
||||
socket. ``context`` is a dictionary passed to
|
||||
:attr:`~SSLTransport._wrap_context` as context parameter.
|
||||
All others items from ``ssl`` argument are passed as
|
||||
``sslopts``.
|
||||
- if ``ssl`` dictionary does not contain ``context`` key,
|
||||
:attr:`~SSLTransport._wrap_socket_sni` is used for
|
||||
wrapping socket. All items in ``ssl`` argument are
|
||||
passed to :attr:`~SSLTransport._wrap_socket_sni` as
|
||||
parameters.
|
||||
|
||||
kwargs:
|
||||
|
||||
additional arguments of
|
||||
:class:`~amqp.transport._AbstractTransport` class
|
||||
"""
|
||||
|
||||
def __init__(self, host, connect_timeout=None, ssl=None, **kwargs):
|
||||
self.sslopts = ssl if isinstance(ssl, dict) else {}
|
||||
self._read_buffer = EMPTY_BUFFER
|
||||
super().__init__(
|
||||
host, connect_timeout=connect_timeout, **kwargs)
|
||||
|
||||
def _setup_transport(self):
|
||||
"""Wrap the socket in an SSL object."""
|
||||
self.sock = self._wrap_socket(self.sock, **self.sslopts)
|
||||
self.sock.do_handshake()
|
||||
self._quick_recv = self.sock.read
|
||||
|
||||
def _wrap_socket(self, sock, context=None, **sslopts):
|
||||
if context:
|
||||
return self._wrap_context(sock, sslopts, **context)
|
||||
return self._wrap_socket_sni(sock, **sslopts)
|
||||
|
||||
def _wrap_context(self, sock, sslopts, check_hostname=None, **ctx_options):
|
||||
"""Wrap socket without SNI headers.
|
||||
|
||||
PARAMETERS:
|
||||
sock: socket.socket
|
||||
|
||||
Socket to be wrapped.
|
||||
|
||||
sslopts: dict
|
||||
|
||||
Parameters of :attr:`ssl.SSLContext.wrap_socket`.
|
||||
|
||||
check_hostname
|
||||
|
||||
Whether to match the peer cert’s hostname. See
|
||||
:attr:`ssl.SSLContext.check_hostname` for details.
|
||||
|
||||
ctx_options
|
||||
|
||||
Parameters of :attr:`ssl.create_default_context`.
|
||||
"""
|
||||
ctx = ssl.create_default_context(**ctx_options)
|
||||
ctx.check_hostname = check_hostname
|
||||
return ctx.wrap_socket(sock, **sslopts)
|
||||
|
||||
def _wrap_socket_sni(self, sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=None,
|
||||
ca_certs=None, do_handshake_on_connect=False,
|
||||
suppress_ragged_eofs=True, server_hostname=None,
|
||||
ciphers=None, ssl_version=None):
|
||||
"""Socket wrap with SNI headers.
|
||||
|
||||
stdlib :attr:`ssl.SSLContext.wrap_socket` method augmented with support
|
||||
for setting the server_hostname field required for SNI hostname header.
|
||||
|
||||
PARAMETERS:
|
||||
sock: socket.socket
|
||||
|
||||
Socket to be wrapped.
|
||||
|
||||
keyfile: str
|
||||
|
||||
Path to the private key
|
||||
|
||||
certfile: str
|
||||
|
||||
Path to the certificate
|
||||
|
||||
server_side: bool
|
||||
|
||||
Identifies whether server-side or client-side
|
||||
behavior is desired from this socket. See
|
||||
:attr:`~ssl.SSLContext.wrap_socket` for details.
|
||||
|
||||
cert_reqs: ssl.VerifyMode
|
||||
|
||||
When set to other than :attr:`ssl.CERT_NONE`, peers certificate
|
||||
is checked. Possible values are :attr:`ssl.CERT_NONE`,
|
||||
:attr:`ssl.CERT_OPTIONAL` and :attr:`ssl.CERT_REQUIRED`.
|
||||
|
||||
ca_certs: str
|
||||
|
||||
Path to “certification authority” (CA) certificates
|
||||
used to validate other peers’ certificates when ``cert_reqs``
|
||||
is other than :attr:`ssl.CERT_NONE`.
|
||||
|
||||
do_handshake_on_connect: bool
|
||||
|
||||
Specifies whether to do the SSL
|
||||
handshake automatically. See
|
||||
:attr:`~ssl.SSLContext.wrap_socket` for details.
|
||||
|
||||
suppress_ragged_eofs (bool):
|
||||
|
||||
See :attr:`~ssl.SSLContext.wrap_socket` for details.
|
||||
|
||||
server_hostname: str
|
||||
|
||||
Specifies the hostname of the service which
|
||||
we are connecting to. See :attr:`~ssl.SSLContext.wrap_socket`
|
||||
for details.
|
||||
|
||||
ciphers: str
|
||||
|
||||
Available ciphers for sockets created with this
|
||||
context. See :attr:`ssl.SSLContext.set_ciphers`
|
||||
|
||||
ssl_version:
|
||||
|
||||
Protocol of the SSL Context. The value is one of
|
||||
``ssl.PROTOCOL_*`` constants.
|
||||
"""
|
||||
opts = {
|
||||
'sock': sock,
|
||||
'server_side': server_side,
|
||||
'do_handshake_on_connect': do_handshake_on_connect,
|
||||
'suppress_ragged_eofs': suppress_ragged_eofs,
|
||||
'server_hostname': server_hostname,
|
||||
}
|
||||
|
||||
if ssl_version is None:
|
||||
ssl_version = (
|
||||
ssl.PROTOCOL_TLS_SERVER
|
||||
if server_side
|
||||
else ssl.PROTOCOL_TLS_CLIENT
|
||||
)
|
||||
|
||||
context = ssl.SSLContext(ssl_version)
|
||||
|
||||
if certfile is not None:
|
||||
context.load_cert_chain(certfile, keyfile)
|
||||
if ca_certs is not None:
|
||||
context.load_verify_locations(ca_certs)
|
||||
if ciphers is not None:
|
||||
context.set_ciphers(ciphers)
|
||||
# Set SNI headers if supported.
|
||||
# Must set context.check_hostname before setting context.verify_mode
|
||||
# to avoid setting context.verify_mode=ssl.CERT_NONE while
|
||||
# context.check_hostname is still True (the default value in context
|
||||
# if client-side) which results in the following exception:
|
||||
# ValueError: Cannot set verify_mode to CERT_NONE when check_hostname
|
||||
# is enabled.
|
||||
try:
|
||||
context.check_hostname = (
|
||||
ssl.HAS_SNI and server_hostname is not None
|
||||
)
|
||||
except AttributeError:
|
||||
pass # ask forgiveness not permission
|
||||
|
||||
# See note above re: ordering for context.check_hostname and
|
||||
# context.verify_mode assignments.
|
||||
if cert_reqs is not None:
|
||||
context.verify_mode = cert_reqs
|
||||
|
||||
if ca_certs is None and context.verify_mode != ssl.CERT_NONE:
|
||||
purpose = (
|
||||
ssl.Purpose.CLIENT_AUTH
|
||||
if server_side
|
||||
else ssl.Purpose.SERVER_AUTH
|
||||
)
|
||||
context.load_default_certs(purpose)
|
||||
|
||||
sock = context.wrap_socket(**opts)
|
||||
return sock
|
||||
|
||||
def _shutdown_transport(self):
|
||||
"""Unwrap a SSL socket, so we can call shutdown()."""
|
||||
if self.sock is not None:
|
||||
self.sock = self.sock.unwrap()
|
||||
|
||||
def _read(self, n, initial=False,
|
||||
_errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR)):
|
||||
# According to SSL_read(3), it can at most return 16kb of data.
|
||||
# Thus, we use an internal read buffer like TCPTransport._read
|
||||
# to get the exact number of bytes wanted.
|
||||
recv = self._quick_recv
|
||||
rbuf = self._read_buffer
|
||||
try:
|
||||
while len(rbuf) < n:
|
||||
try:
|
||||
s = recv(n - len(rbuf)) # see note above
|
||||
except OSError as exc:
|
||||
# ssl.sock.read may cause ENOENT if the
|
||||
# operation couldn't be performed (Issue celery#1414).
|
||||
if exc.errno in _errnos:
|
||||
if initial and self.raise_on_initial_eintr:
|
||||
raise socket.timeout()
|
||||
continue
|
||||
raise
|
||||
if not s:
|
||||
raise OSError('Server unexpectedly closed connection')
|
||||
rbuf += s
|
||||
except: # noqa
|
||||
self._read_buffer = rbuf
|
||||
raise
|
||||
result, self._read_buffer = rbuf[:n], rbuf[n:]
|
||||
return result
|
||||
|
||||
def _write(self, s):
|
||||
"""Write a string out to the SSL socket fully."""
|
||||
write = self.sock.write
|
||||
while s:
|
||||
try:
|
||||
n = write(s)
|
||||
except ValueError:
|
||||
# AG: sock._sslobj might become null in the meantime if the
|
||||
# remote connection has hung up.
|
||||
# In python 3.4, a ValueError is raised is self._sslobj is
|
||||
# None.
|
||||
n = 0
|
||||
if not n:
|
||||
raise OSError('Socket closed')
|
||||
s = s[n:]
|
||||
|
||||
|
||||
class TCPTransport(_AbstractTransport):
|
||||
"""Transport that deals directly with TCP socket.
|
||||
|
||||
All parameters are :class:`~amqp.transport._AbstractTransport` class.
|
||||
"""
|
||||
|
||||
def _setup_transport(self):
|
||||
# Setup to _write() directly to the socket, and
|
||||
# do our own buffered reads.
|
||||
self._write = self.sock.sendall
|
||||
self._read_buffer = EMPTY_BUFFER
|
||||
self._quick_recv = self.sock.recv
|
||||
|
||||
def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)):
|
||||
"""Read exactly n bytes from the socket."""
|
||||
recv = self._quick_recv
|
||||
rbuf = self._read_buffer
|
||||
try:
|
||||
while len(rbuf) < n:
|
||||
try:
|
||||
s = recv(n - len(rbuf))
|
||||
except OSError as exc:
|
||||
if exc.errno in _errnos:
|
||||
if initial and self.raise_on_initial_eintr:
|
||||
raise socket.timeout()
|
||||
continue
|
||||
raise
|
||||
if not s:
|
||||
raise OSError('Server unexpectedly closed connection')
|
||||
rbuf += s
|
||||
except: # noqa
|
||||
self._read_buffer = rbuf
|
||||
raise
|
||||
|
||||
result, self._read_buffer = rbuf[:n], rbuf[n:]
|
||||
return result
|
||||
|
||||
|
||||
def Transport(host, connect_timeout=None, ssl=False, **kwargs):
|
||||
"""Create transport.
|
||||
|
||||
Given a few parameters from the Connection constructor,
|
||||
select and create a subclass of
|
||||
:class:`~amqp.transport._AbstractTransport`.
|
||||
|
||||
PARAMETERS:
|
||||
|
||||
host: str
|
||||
|
||||
Broker address in format ``HOSTNAME:PORT``.
|
||||
|
||||
connect_timeout: int
|
||||
|
||||
Timeout of creating new connection.
|
||||
|
||||
ssl: bool|dict
|
||||
|
||||
If set, :class:`~amqp.transport.SSLTransport` is used
|
||||
and ``ssl`` parameter is passed to it. Otherwise
|
||||
:class:`~amqp.transport.TCPTransport` is used.
|
||||
|
||||
kwargs:
|
||||
|
||||
additional arguments of :class:`~amqp.transport._AbstractTransport`
|
||||
class
|
||||
"""
|
||||
transport = SSLTransport if ssl else TCPTransport
|
||||
return transport(host, connect_timeout=connect_timeout, ssl=ssl, **kwargs)
|
@ -0,0 +1,566 @@
|
||||
"""Abstract types."""
|
||||
import abc
|
||||
import asyncio
|
||||
import socket
|
||||
from array import array
|
||||
from datetime import datetime
|
||||
from typing import (
|
||||
Any, Awaitable, Callable, IO, List, Mapping,
|
||||
MutableMapping, NamedTuple, Optional, Sequence, SupportsInt,
|
||||
TypeVar, Tuple, Union,
|
||||
)
|
||||
from .protocol import queue_declare_ok_t
|
||||
from .spec import method_sig_t
|
||||
from vine import Thenable
|
||||
|
||||
Fd = TypeVar('Fd', int, IO)
|
||||
Int = TypeVar('Int', SupportsInt, str)
|
||||
|
||||
|
||||
class Frame(NamedTuple):
|
||||
type: int
|
||||
channel: int
|
||||
data: bytes
|
||||
|
||||
|
||||
ConnectionBlockedCallbackT = Callable[[str], Optional[Awaitable]]
|
||||
ConnectionUnblockedCallbackT = Callable[[], Optional[Awaitable]]
|
||||
ConnectionFrameHandlerT = Callable[[Frame], Awaitable]
|
||||
ConnectionFrameWriterT = Callable[
|
||||
[int,
|
||||
int,
|
||||
Optional[method_sig_t],
|
||||
Optional[bytes],
|
||||
Optional['MessageT'],
|
||||
Optional[float]],
|
||||
Awaitable,
|
||||
]
|
||||
|
||||
WaitMethodT = Union[method_sig_t, Sequence[method_sig_t]]
|
||||
|
||||
|
||||
class TransportT(metaclass=abc.ABCMeta):
|
||||
"""Transport type."""
|
||||
|
||||
rstream: asyncio.StreamReader
|
||||
wstream: asyncio.StreamWriter
|
||||
|
||||
connected: bool = False
|
||||
host: str
|
||||
port: int
|
||||
ssl: Any
|
||||
connect_timeout: float
|
||||
read_timeout: float
|
||||
write_timeout: float
|
||||
socket_settings: Mapping
|
||||
sock: socket.socket
|
||||
|
||||
@abc.abstractmethod
|
||||
async def connect(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def close(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def read_frame(self, timeout: float = None) -> Frame:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def write(self, s: bytes, timeout: float = None) -> None:
|
||||
...
|
||||
|
||||
|
||||
class ContentT(metaclass=abc.ABCMeta):
|
||||
"""Generic content type."""
|
||||
|
||||
CLASS_ID: int
|
||||
PROPERTIES: Sequence[Tuple[str, str]]
|
||||
|
||||
properties: MutableMapping
|
||||
body_received: int = 0
|
||||
body_size: int = 0
|
||||
ready: bool = False
|
||||
|
||||
frame_method: method_sig_t
|
||||
frame_args: str
|
||||
|
||||
@abc.abstractmethod
|
||||
def _load_properties(
|
||||
self,
|
||||
class_id: int,
|
||||
buf: bytes,
|
||||
offset: int = 0,
|
||||
classes: Mapping = None,
|
||||
unpack_from: Callable = None) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def _serialize_properties(self) -> bytes:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def inbound_header(self, buf: bytes, offset: int = 0) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def inbound_body(self, buf: bytes):
|
||||
...
|
||||
|
||||
|
||||
class MessageT(ContentT, metaclass=abc.ABCMeta):
|
||||
"""Basic message type."""
|
||||
|
||||
body: bytes
|
||||
children: Any
|
||||
channel: 'ChannelT'
|
||||
delivery_info: Mapping[str, Any]
|
||||
|
||||
content_type: str
|
||||
content_encoding: str
|
||||
application_headers: MutableMapping
|
||||
delivery_mode: int
|
||||
priority: int
|
||||
correlation_id: str
|
||||
reply_to: str
|
||||
expiration: str
|
||||
message_id: str
|
||||
timestamp: datetime
|
||||
type: str
|
||||
user_id: str
|
||||
app_id: str
|
||||
cluster_id: str
|
||||
|
||||
@abc.abstractmethod
|
||||
def __init__(self,
|
||||
body: bytes=b'',
|
||||
*,
|
||||
children: Any = None,
|
||||
channel: 'ChannelT' = None) -> None:
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def headers(self) -> MutableMapping:
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def delivery_tag(self) -> str:
|
||||
...
|
||||
|
||||
|
||||
class AbstractChannelT(metaclass=abc.ABCMeta):
|
||||
"""Abstract channel type."""
|
||||
|
||||
connection: 'ConnectionT'
|
||||
channel_id: int
|
||||
auto_decode: bool = False
|
||||
is_open: bool = False
|
||||
|
||||
@abc.abstractmethod
|
||||
def __enter__(self) -> 'AbstractChannelT':
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def __exit__(self, *exc_info) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def __aenter__(self) -> 'AbstractChannelT':
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def __aexit__(self, *exc_info) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def _setup_listeners(self):
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def send_method(
|
||||
self, sig: method_sig_t,
|
||||
format: str = None,
|
||||
args: Sequence = None,
|
||||
*,
|
||||
content: MessageT = None,
|
||||
wait: WaitMethodT = None,
|
||||
callback: Callable = None,
|
||||
returns_tuple: bool = False) -> Thenable:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def close(
|
||||
self,
|
||||
*,
|
||||
reply_code: int = 0,
|
||||
reply_text: str = '',
|
||||
method_sig: method_sig_t = method_sig_t(0, 0),
|
||||
argsig: str = 'BsBB') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def collect(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def wait(
|
||||
self,
|
||||
method: WaitMethodT,
|
||||
*,
|
||||
callback: Callable = None,
|
||||
timeout: float = None,
|
||||
returns_tuple: bool = False) -> Any:
|
||||
...
|
||||
|
||||
async def dispatch_method(
|
||||
self,
|
||||
method_sig: method_sig_t,
|
||||
payload: bytes,
|
||||
content: MessageT) -> None:
|
||||
...
|
||||
|
||||
|
||||
class ConnectionT(AbstractChannelT):
|
||||
"""Connection channel type."""
|
||||
|
||||
Channel: type
|
||||
Transport: type
|
||||
|
||||
host: str
|
||||
userid: str
|
||||
password: str
|
||||
login_method: str
|
||||
login_response: Any
|
||||
virtual_host: str
|
||||
locale: str
|
||||
client_properties: MutableMapping
|
||||
ssl: Any
|
||||
channel_max: int
|
||||
frame_max: int
|
||||
on_open: Thenable
|
||||
on_tune_ok: Thenable
|
||||
confirm_publish: bool
|
||||
connect_timeout: float
|
||||
read_timeout: float
|
||||
write_timeout: float
|
||||
socket_settings: Mapping
|
||||
|
||||
negotiate_capabilities: Mapping[str, bool]
|
||||
library_properties: Mapping[str, Any]
|
||||
heartbeat: float
|
||||
client_heartbeat: float
|
||||
server_heartbeat: float
|
||||
last_heartbeat_sent: float
|
||||
last_heartbeat_received: float
|
||||
bytes_sent: int = 0
|
||||
bytes_recv: int = 0
|
||||
prev_sent: int
|
||||
prev_recv: int
|
||||
|
||||
connection_errors: Tuple[type, ...]
|
||||
channel_errors: Tuple[type, ...]
|
||||
recoverable_connection_errors: Tuple[type, ...]
|
||||
recoverable_channel_errors: Tuple[type, ...]
|
||||
|
||||
transport: TransportT
|
||||
channels: MutableMapping[int, AbstractChannelT]
|
||||
loop: asyncio.AbstractEventLoop
|
||||
|
||||
mechanisms: List[str]
|
||||
locales: List[str]
|
||||
|
||||
_avail_channel_ids: array
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: str = 'localhost:5672',
|
||||
userid: str = 'guest',
|
||||
password: str = 'guest',
|
||||
*,
|
||||
login_method: str = 'AMQPLAIN',
|
||||
login_response: Any = None,
|
||||
virtual_host: str = '/',
|
||||
locale: str = 'en_US',
|
||||
client_properties: Mapping = None,
|
||||
ssl: Any = False,
|
||||
connect_timeout: float = None,
|
||||
channel_max: int = None,
|
||||
frame_max: int = None,
|
||||
heartbeat: float = 0.0,
|
||||
on_open: Thenable = None,
|
||||
on_blocked: ConnectionBlockedCallbackT = None,
|
||||
on_unblocked: ConnectionUnblockedCallbackT = None,
|
||||
confirm_publish: bool = False,
|
||||
on_tune_ok: Callable = None,
|
||||
read_timeout: float = None,
|
||||
write_timeout: float = None,
|
||||
socket_settings: Mapping = None,
|
||||
frame_handler: ConnectionFrameHandlerT = None,
|
||||
frame_writer: ConnectionFrameWriterT = None,
|
||||
loop: asyncio.AbstractEventLoop = None,
|
||||
transport: TransportT = None,
|
||||
**kwargs) -> None:
|
||||
self.frame_writer = frame_writer
|
||||
self.frame_handler = frame_handler
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def server_capabilities(self) -> Mapping:
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def sock(self) -> socket.socket:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
async def connect(self, callback: Callable[[], None] = None) -> None:
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def connected(self) -> bool:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def channel(self, channel_id: int,
|
||||
callback: Callable = None) -> 'AbstractChannelT':
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_alive(self) -> bool:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def drain_events(self, timeout: float = None) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def on_inbound_method(
|
||||
self,
|
||||
channel_id: int,
|
||||
method_sig: method_sig_t,
|
||||
payload: bytes,
|
||||
content: MessageT) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def send_heartbeat(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def heartbeat_tick(self, rate: int = 2) -> None:
|
||||
...
|
||||
|
||||
def _get_free_channel_id(self) -> int:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def _claim_channel_id(self, channel_id: int) -> None:
|
||||
...
|
||||
|
||||
|
||||
class ChannelT(AbstractChannelT, metaclass=abc.ABCMeta):
|
||||
"""Channel type."""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def flow(self, active: bool) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def open(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def exchange_declare(
|
||||
self, exchange: str, type: str,
|
||||
*,
|
||||
passive: bool = False,
|
||||
durable: bool = False,
|
||||
auto_delete: bool = True,
|
||||
nowait: bool = False,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
argsig: str = 'BssbbbbbF') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def exchange_delete(
|
||||
self, exchange: str,
|
||||
*,
|
||||
if_unused: bool = False,
|
||||
nowait: bool = False,
|
||||
argsig: str = 'Bsbb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def exchange_bind(
|
||||
self, destination: str,
|
||||
source: str = '',
|
||||
routing_key: str = '',
|
||||
*,
|
||||
nowait: bool = False,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
argsig: str = 'BsssbF') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def exchange_unbind(
|
||||
self, destination: str,
|
||||
source: str = '',
|
||||
routing_key: str = '',
|
||||
*,
|
||||
nowait: bool = False,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
argsig: str = 'BsssbF') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def queue_bind(
|
||||
self, queue: str,
|
||||
exchange: str = '',
|
||||
routing_key: str = '',
|
||||
*,
|
||||
nowait: bool = False,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
argsig: str = 'BsssbF') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def queue_unbind(
|
||||
self, queue: str, exchange: str,
|
||||
routing_key: str = '',
|
||||
*,
|
||||
nowait: bool = False,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
argsig: str = 'BsssF') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def queue_declare(
|
||||
self,
|
||||
queue: str = '',
|
||||
*,
|
||||
passive: bool = False,
|
||||
durable: bool = False,
|
||||
exclusive: bool = False,
|
||||
auto_delete: bool = True,
|
||||
nowait: bool = False,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
argsig: str = 'BsbbbbbF') -> queue_declare_ok_t:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def queue_delete(
|
||||
self,
|
||||
queue: str = '',
|
||||
*,
|
||||
if_unused: bool = False,
|
||||
if_empty: bool = False,
|
||||
nowait: bool = False,
|
||||
argsig: str = 'Bsbbb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def queue_purge(
|
||||
self,
|
||||
queue: str = '',
|
||||
*,
|
||||
nowait: bool = False,
|
||||
argsig: str = 'Bsb') -> Optional[int]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_ack(
|
||||
self, delivery_tag: str,
|
||||
*,
|
||||
multiple: bool = False,
|
||||
argsig: str = 'Lb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_cancel(
|
||||
self, consumer_tag: str,
|
||||
*,
|
||||
nowait: bool = False,
|
||||
argsig: str = 'sb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_consume(
|
||||
self,
|
||||
queue: str = '',
|
||||
consumer_tag: str = '',
|
||||
*,
|
||||
no_local: bool = False,
|
||||
no_ack: bool = False,
|
||||
exclusive: bool = False,
|
||||
nowait: bool = False,
|
||||
callback: Callable = None,
|
||||
arguments: Mapping[str, Any] = None,
|
||||
on_cancel: Callable = None,
|
||||
argsig: str = 'BssbbbbF') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_get(
|
||||
self,
|
||||
queue: str = '',
|
||||
*,
|
||||
no_ack: bool = False,
|
||||
argsig: str = 'Bsb') -> Optional[MessageT]:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_publish(
|
||||
self, msg: MessageT,
|
||||
exchange: str = '',
|
||||
routing_key: str = '',
|
||||
*,
|
||||
mandatory: bool = False,
|
||||
immediate: bool = False,
|
||||
timeout: float = None,
|
||||
argsig: str = 'Bssbb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_qos(
|
||||
self,
|
||||
prefetch_size: int,
|
||||
prefetch_count: int,
|
||||
a_global: bool,
|
||||
argsig: str = 'lBb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_recover(self, *, requeue: bool = False) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_recover_async(self, *, requeue: bool = False) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def basic_reject(self, delivery_tag: str,
|
||||
*,
|
||||
requeue: bool = False,
|
||||
argsig: str = 'Lb') -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def tx_commit(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def tx_rollback(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def tx_select(self) -> None:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
async def confirm_select(self, *, nowait: bool = False) -> None:
|
||||
...
|
@ -0,0 +1,64 @@
|
||||
"""Compatibility utilities."""
|
||||
import logging
|
||||
from logging import NullHandler
|
||||
|
||||
# enables celery 3.1.23 to start again
|
||||
from vine import promise # noqa
|
||||
from vine.utils import wraps
|
||||
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError: # pragma: no cover
|
||||
fcntl = None # noqa
|
||||
|
||||
|
||||
def set_cloexec(fd, cloexec):
|
||||
"""Set flag to close fd after exec."""
|
||||
if fcntl is None:
|
||||
return
|
||||
try:
|
||||
FD_CLOEXEC = fcntl.FD_CLOEXEC
|
||||
except AttributeError:
|
||||
raise NotImplementedError(
|
||||
'close-on-exec flag not supported on this platform',
|
||||
)
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
if cloexec:
|
||||
flags |= FD_CLOEXEC
|
||||
else:
|
||||
flags &= ~FD_CLOEXEC
|
||||
return fcntl.fcntl(fd, fcntl.F_SETFD, flags)
|
||||
|
||||
|
||||
def coro(gen):
|
||||
"""Decorator to mark generator as a co-routine."""
|
||||
@wraps(gen)
|
||||
def _boot(*args, **kwargs):
|
||||
co = gen(*args, **kwargs)
|
||||
next(co)
|
||||
return co
|
||||
|
||||
return _boot
|
||||
|
||||
|
||||
def str_to_bytes(s):
|
||||
"""Convert str to bytes."""
|
||||
if isinstance(s, str):
|
||||
return s.encode('utf-8', 'surrogatepass')
|
||||
return s
|
||||
|
||||
|
||||
def bytes_to_str(s):
|
||||
"""Convert bytes to str."""
|
||||
if isinstance(s, bytes):
|
||||
return s.decode('utf-8', 'surrogatepass')
|
||||
return s
|
||||
|
||||
|
||||
def get_logger(logger):
|
||||
"""Get logger by name."""
|
||||
if isinstance(logger, str):
|
||||
logger = logging.getLogger(logger)
|
||||
if not logger.handlers:
|
||||
logger.addHandler(NullHandler())
|
||||
return logger
|
@ -0,0 +1 @@
|
||||
pip
|
@ -0,0 +1,27 @@
|
||||
Copyright (c) Django Software Foundation and individual contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of Django nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,248 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: asgiref
|
||||
Version: 3.4.1
|
||||
Summary: ASGI specs, helper code, and adapters
|
||||
Home-page: https://github.com/django/asgiref/
|
||||
Author: Django Software Foundation
|
||||
Author-email: foundation@djangoproject.com
|
||||
License: BSD
|
||||
Project-URL: Documentation, https://asgi.readthedocs.io/
|
||||
Project-URL: Further Documentation, https://docs.djangoproject.com/en/stable/topics/async/#async-adapter-functions
|
||||
Project-URL: Changelog, https://github.com/django/asgiref/blob/master/CHANGELOG.txt
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Topic :: Internet :: WWW/HTTP
|
||||
Requires-Python: >=3.6
|
||||
License-File: LICENSE
|
||||
Requires-Dist: typing-extensions ; python_version < "3.8"
|
||||
Provides-Extra: tests
|
||||
Requires-Dist: pytest ; extra == 'tests'
|
||||
Requires-Dist: pytest-asyncio ; extra == 'tests'
|
||||
Requires-Dist: mypy (>=0.800) ; extra == 'tests'
|
||||
|
||||
asgiref
|
||||
=======
|
||||
|
||||
.. image:: https://api.travis-ci.org/django/asgiref.svg
|
||||
:target: https://travis-ci.org/django/asgiref
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/asgiref.svg
|
||||
:target: https://pypi.python.org/pypi/asgiref
|
||||
|
||||
ASGI is a standard for Python asynchronous web apps and servers to communicate
|
||||
with each other, and positioned as an asynchronous successor to WSGI. You can
|
||||
read more at https://asgi.readthedocs.io/en/latest/
|
||||
|
||||
This package includes ASGI base libraries, such as:
|
||||
|
||||
* Sync-to-async and async-to-sync function wrappers, ``asgiref.sync``
|
||||
* Server base classes, ``asgiref.server``
|
||||
* A WSGI-to-ASGI adapter, in ``asgiref.wsgi``
|
||||
|
||||
|
||||
Function wrappers
|
||||
-----------------
|
||||
|
||||
These allow you to wrap or decorate async or sync functions to call them from
|
||||
the other style (so you can call async functions from a synchronous thread,
|
||||
or vice-versa).
|
||||
|
||||
In particular:
|
||||
|
||||
* AsyncToSync lets a synchronous subthread stop and wait while the async
|
||||
function is called on the main thread's event loop, and then control is
|
||||
returned to the thread when the async function is finished.
|
||||
|
||||
* SyncToAsync lets async code call a synchronous function, which is run in
|
||||
a threadpool and control returned to the async coroutine when the synchronous
|
||||
function completes.
|
||||
|
||||
The idea is to make it easier to call synchronous APIs from async code and
|
||||
asynchronous APIs from synchronous code so it's easier to transition code from
|
||||
one style to the other. In the case of Channels, we wrap the (synchronous)
|
||||
Django view system with SyncToAsync to allow it to run inside the (asynchronous)
|
||||
ASGI server.
|
||||
|
||||
Note that exactly what threads things run in is very specific, and aimed to
|
||||
keep maximum compatibility with old synchronous code. See
|
||||
"Synchronous code & Threads" below for a full explanation. By default,
|
||||
``sync_to_async`` will run all synchronous code in the program in the same
|
||||
thread for safety reasons; you can disable this for more performance with
|
||||
``@sync_to_async(thread_sensitive=False)``, but make sure that your code does
|
||||
not rely on anything bound to threads (like database connections) when you do.
|
||||
|
||||
|
||||
Threadlocal replacement
|
||||
-----------------------
|
||||
|
||||
This is a drop-in replacement for ``threading.local`` that works with both
|
||||
threads and asyncio Tasks. Even better, it will proxy values through from a
|
||||
task-local context to a thread-local context when you use ``sync_to_async``
|
||||
to run things in a threadpool, and vice-versa for ``async_to_sync``.
|
||||
|
||||
If you instead want true thread- and task-safety, you can set
|
||||
``thread_critical`` on the Local object to ensure this instead.
|
||||
|
||||
|
||||
Server base classes
|
||||
-------------------
|
||||
|
||||
Includes a ``StatelessServer`` class which provides all the hard work of
|
||||
writing a stateless server (as in, does not handle direct incoming sockets
|
||||
but instead consumes external streams or sockets to work out what is happening).
|
||||
|
||||
An example of such a server would be a chatbot server that connects out to
|
||||
a central chat server and provides a "connection scope" per user chatting to
|
||||
it. There's only one actual connection, but the server has to separate things
|
||||
into several scopes for easier writing of the code.
|
||||
|
||||
You can see an example of this being used in `frequensgi <https://github.com/andrewgodwin/frequensgi>`_.
|
||||
|
||||
|
||||
WSGI-to-ASGI adapter
|
||||
--------------------
|
||||
|
||||
Allows you to wrap a WSGI application so it appears as a valid ASGI application.
|
||||
|
||||
Simply wrap it around your WSGI application like so::
|
||||
|
||||
asgi_application = WsgiToAsgi(wsgi_application)
|
||||
|
||||
The WSGI application will be run in a synchronous threadpool, and the wrapped
|
||||
ASGI application will be one that accepts ``http`` class messages.
|
||||
|
||||
Please note that not all extended features of WSGI may be supported (such as
|
||||
file handles for incoming POST bodies).
|
||||
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
``asgiref`` requires Python 3.6 or higher.
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to the
|
||||
`main Channels contributing docs <https://github.com/django/channels/blob/master/CONTRIBUTING.rst>`_.
|
||||
|
||||
|
||||
Testing
|
||||
'''''''
|
||||
|
||||
To run tests, make sure you have installed the ``tests`` extra with the package::
|
||||
|
||||
cd asgiref/
|
||||
pip install -e .[tests]
|
||||
pytest
|
||||
|
||||
|
||||
Building the documentation
|
||||
''''''''''''''''''''''''''
|
||||
|
||||
The documentation uses `Sphinx <http://www.sphinx-doc.org>`_::
|
||||
|
||||
cd asgiref/docs/
|
||||
pip install sphinx
|
||||
|
||||
To build the docs, you can use the default tools::
|
||||
|
||||
sphinx-build -b html . _build/html # or `make html`, if you've got make set up
|
||||
cd _build/html
|
||||
python -m http.server
|
||||
|
||||
...or you can use ``sphinx-autobuild`` to run a server and rebuild/reload
|
||||
your documentation changes automatically::
|
||||
|
||||
pip install sphinx-autobuild
|
||||
sphinx-autobuild . _build/html
|
||||
|
||||
|
||||
Releasing
|
||||
'''''''''
|
||||
|
||||
To release, first add details to CHANGELOG.txt and update the version number in ``asgiref/__init__.py``.
|
||||
|
||||
Then, build and push the packages::
|
||||
|
||||
python -m build
|
||||
twine upload dist/*
|
||||
rm -r build/ dist/
|
||||
|
||||
|
||||
Implementation Details
|
||||
----------------------
|
||||
|
||||
Synchronous code & threads
|
||||
''''''''''''''''''''''''''
|
||||
|
||||
The ``asgiref.sync`` module provides two wrappers that let you go between
|
||||
asynchronous and synchronous code at will, while taking care of the rough edges
|
||||
for you.
|
||||
|
||||
Unfortunately, the rough edges are numerous, and the code has to work especially
|
||||
hard to keep things in the same thread as much as possible. Notably, the
|
||||
restrictions we are working with are:
|
||||
|
||||
* All synchronous code called through ``SyncToAsync`` and marked with
|
||||
``thread_sensitive`` should run in the same thread as each other (and if the
|
||||
outer layer of the program is synchronous, the main thread)
|
||||
|
||||
* If a thread already has a running async loop, ``AsyncToSync`` can't run things
|
||||
on that loop if it's blocked on synchronous code that is above you in the
|
||||
call stack.
|
||||
|
||||
The first compromise you get to might be that ``thread_sensitive`` code should
|
||||
just run in the same thread and not spawn in a sub-thread, fulfilling the first
|
||||
restriction, but that immediately runs you into the second restriction.
|
||||
|
||||
The only real solution is to essentially have a variant of ThreadPoolExecutor
|
||||
that executes any ``thread_sensitive`` code on the outermost synchronous
|
||||
thread - either the main thread, or a single spawned subthread.
|
||||
|
||||
This means you now have two basic states:
|
||||
|
||||
* If the outermost layer of your program is synchronous, then all async code
|
||||
run through ``AsyncToSync`` will run in a per-call event loop in arbitrary
|
||||
sub-threads, while all ``thread_sensitive`` code will run in the main thread.
|
||||
|
||||
* If the outermost layer of your program is asynchronous, then all async code
|
||||
runs on the main thread's event loop, and all ``thread_sensitive`` synchronous
|
||||
code will run in a single shared sub-thread.
|
||||
|
||||
Crucially, this means that in both cases there is a thread which is a shared
|
||||
resource that all ``thread_sensitive`` code must run on, and there is a chance
|
||||
that this thread is currently blocked on its own ``AsyncToSync`` call. Thus,
|
||||
``AsyncToSync`` needs to act as an executor for thread code while it's blocking.
|
||||
|
||||
The ``CurrentThreadExecutor`` class provides this functionality; rather than
|
||||
simply waiting on a Future, you can call its ``run_until_future`` method and
|
||||
it will run submitted code until that Future is done. This means that code
|
||||
inside the call can then run code on your thread.
|
||||
|
||||
|
||||
Maintenance and Security
|
||||
------------------------
|
||||
|
||||
To report security issues, please contact security@djangoproject.com. For GPG
|
||||
signatures and more security process information, see
|
||||
https://docs.djangoproject.com/en/dev/internals/security/.
|
||||
|
||||
To report bugs or request new features, please open a new GitHub issue.
|
||||
|
||||
This repository is part of the Channels project. For the shepherd and maintenance team, please see the
|
||||
`main Channels readme <https://github.com/django/channels/blob/master/README.rst>`_.
|
||||
|
||||
|
@ -0,0 +1,30 @@
|
||||
asgiref-3.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
asgiref-3.4.1.dist-info/LICENSE,sha256=uEZBXRtRTpwd_xSiLeuQbXlLxUbKYSn5UKGM0JHipmk,1552
|
||||
asgiref-3.4.1.dist-info/METADATA,sha256=TZrVDUz2BP8ewHAkOyGQ8izCsiaq6YHMvj_TW5W7i2E,9162
|
||||
asgiref-3.4.1.dist-info/RECORD,,
|
||||
asgiref-3.4.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
asgiref-3.4.1.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
|
||||
asgiref-3.4.1.dist-info/top_level.txt,sha256=bokQjCzwwERhdBiPdvYEZa4cHxT4NCeAffQNUqJ8ssg,8
|
||||
asgiref/__init__.py,sha256=z3MJNttjzZJkd4Yv_Ut_X2qO_gIKi4TijrHVpefXuRM,22
|
||||
asgiref/__pycache__/__init__.cpython-38.pyc,,
|
||||
asgiref/__pycache__/_pep562.cpython-38.pyc,,
|
||||
asgiref/__pycache__/compatibility.cpython-38.pyc,,
|
||||
asgiref/__pycache__/current_thread_executor.cpython-38.pyc,,
|
||||
asgiref/__pycache__/local.cpython-38.pyc,,
|
||||
asgiref/__pycache__/server.cpython-38.pyc,,
|
||||
asgiref/__pycache__/sync.cpython-38.pyc,,
|
||||
asgiref/__pycache__/testing.cpython-38.pyc,,
|
||||
asgiref/__pycache__/timeout.cpython-38.pyc,,
|
||||
asgiref/__pycache__/typing.cpython-38.pyc,,
|
||||
asgiref/__pycache__/wsgi.cpython-38.pyc,,
|
||||
asgiref/_pep562.py,sha256=fyD3JhfLtViIGeXBtvhhbnbQ-R_8-nmwzbXHhncY6ow,2684
|
||||
asgiref/compatibility.py,sha256=4Plx8PT3wlDzZeuCN2cATfaXq6rya1OuSdJ262I5S6Y,2022
|
||||
asgiref/current_thread_executor.py,sha256=oeH8zv2tTmcbpxdUmOSMzbEXzeY5nJzIMFvzprE95gA,2801
|
||||
asgiref/local.py,sha256=D9kRIDARSUixNbxK8HL2O8vFhRCx_fc3fFB9uv0vG-g,4892
|
||||
asgiref/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
asgiref/server.py,sha256=lAxZxOxkdvxB073ZtYOAzN1JZ8aV-DOiFpVQJZ0X2FI,6018
|
||||
asgiref/sync.py,sha256=LCEHMPNiuoVtKrmI0kksoHeFjoSS5zLEG-n95UNd91Q,20310
|
||||
asgiref/testing.py,sha256=3byNRV7Oto_Fg8Z-fErQJ3yGf7OQlcUexbN_cDQugzQ,3119
|
||||
asgiref/timeout.py,sha256=UUYuUSY30dsqBsVzVAS7z9raQ9ntZGktScJw_Y_9iSU,3889
|
||||
asgiref/typing.py,sha256=-2wmtHqkhzV52rbMfipGTJmo8jUoU0i5AQECFH6y7aY,6722
|
||||
asgiref/wsgi.py,sha256=-L0eo_uK_dq7EPjv1meW1BRGytURaO9NPESxnJc9CtA,6575
|
@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.36.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@ -0,0 +1 @@
|
||||
asgiref
|
@ -0,0 +1 @@
|
||||
__version__ = "3.4.1"
|
@ -0,0 +1,61 @@
|
||||
"""
|
||||
Backport of PEP 562.
|
||||
https://pypi.org/search/?q=pep562
|
||||
Licensed under MIT
|
||||
Copyright (c) 2018 Isaac Muse <isaacmuse@gmail.com>
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
|
||||
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
"""
|
||||
import sys
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
|
||||
class Pep562:
|
||||
"""
|
||||
Backport of PEP 562 <https://pypi.org/search/?q=pep562>.
|
||||
Wraps the module in a class that exposes the mechanics to override `__dir__` and `__getattr__`.
|
||||
The given module will be searched for overrides of `__dir__` and `__getattr__` and use them when needed.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
"""Acquire `__getattr__` and `__dir__`, but only replace module for versions less than Python 3.7."""
|
||||
|
||||
self._module = sys.modules[name]
|
||||
self._get_attr = getattr(self._module, "__getattr__", None)
|
||||
self._get_dir: Optional[Callable[..., List[str]]] = getattr(
|
||||
self._module, "__dir__", None
|
||||
)
|
||||
sys.modules[name] = self # type: ignore[assignment]
|
||||
|
||||
def __dir__(self) -> List[str]:
|
||||
"""Return the overridden `dir` if one was provided, else apply `dir` to the module."""
|
||||
|
||||
return self._get_dir() if self._get_dir else dir(self._module)
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
"""
|
||||
Attempt to retrieve the attribute from the module, and if missing, use the overridden function if present.
|
||||
"""
|
||||
|
||||
try:
|
||||
return getattr(self._module, name)
|
||||
except AttributeError:
|
||||
if self._get_attr:
|
||||
return self._get_attr(name)
|
||||
raise
|
||||
|
||||
|
||||
def pep562(module_name: str) -> None:
|
||||
"""Helper function to apply PEP 562."""
|
||||
|
||||
if sys.version_info < (3, 7):
|
||||
Pep562(module_name)
|
@ -0,0 +1,61 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
import sys
|
||||
|
||||
|
||||
def is_double_callable(application):
|
||||
"""
|
||||
Tests to see if an application is a legacy-style (double-callable) application.
|
||||
"""
|
||||
# Look for a hint on the object first
|
||||
if getattr(application, "_asgi_single_callable", False):
|
||||
return False
|
||||
if getattr(application, "_asgi_double_callable", False):
|
||||
return True
|
||||
# Uninstanted classes are double-callable
|
||||
if inspect.isclass(application):
|
||||
return True
|
||||
# Instanted classes depend on their __call__
|
||||
if hasattr(application, "__call__"):
|
||||
# We only check to see if its __call__ is a coroutine function -
|
||||
# if it's not, it still might be a coroutine function itself.
|
||||
if asyncio.iscoroutinefunction(application.__call__):
|
||||
return False
|
||||
# Non-classes we just check directly
|
||||
return not asyncio.iscoroutinefunction(application)
|
||||
|
||||
|
||||
def double_to_single_callable(application):
|
||||
"""
|
||||
Transforms a double-callable ASGI application into a single-callable one.
|
||||
"""
|
||||
|
||||
async def new_application(scope, receive, send):
|
||||
instance = application(scope)
|
||||
return await instance(receive, send)
|
||||
|
||||
return new_application
|
||||
|
||||
|
||||
def guarantee_single_callable(application):
|
||||
"""
|
||||
Takes either a single- or double-callable application and always returns it
|
||||
in single-callable style. Use this to add backwards compatibility for ASGI
|
||||
2.0 applications to your server/test harness/etc.
|
||||
"""
|
||||
if is_double_callable(application):
|
||||
application = double_to_single_callable(application)
|
||||
return application
|
||||
|
||||
|
||||
if sys.version_info >= (3, 7):
|
||||
# these were introduced in 3.7
|
||||
get_running_loop = asyncio.get_running_loop
|
||||
run_future = asyncio.run
|
||||
create_task = asyncio.create_task
|
||||
else:
|
||||
# marked as deprecated in 3.10, did not exist before 3.7
|
||||
get_running_loop = asyncio.get_event_loop
|
||||
run_future = asyncio.ensure_future
|
||||
# does nothing, this is fine for <3.7
|
||||
create_task = lambda task: task
|
@ -0,0 +1,81 @@
|
||||
import queue
|
||||
import threading
|
||||
from concurrent.futures import Executor, Future
|
||||
|
||||
|
||||
class _WorkItem:
|
||||
"""
|
||||
Represents an item needing to be run in the executor.
|
||||
Copied from ThreadPoolExecutor (but it's private, so we're not going to rely on importing it)
|
||||
"""
|
||||
|
||||
def __init__(self, future, fn, args, kwargs):
|
||||
self.future = future
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def run(self):
|
||||
if not self.future.set_running_or_notify_cancel():
|
||||
return
|
||||
try:
|
||||
result = self.fn(*self.args, **self.kwargs)
|
||||
except BaseException as exc:
|
||||
self.future.set_exception(exc)
|
||||
# Break a reference cycle with the exception 'exc'
|
||||
self = None
|
||||
else:
|
||||
self.future.set_result(result)
|
||||
|
||||
|
||||
class CurrentThreadExecutor(Executor):
|
||||
"""
|
||||
An Executor that actually runs code in the thread it is instantiated in.
|
||||
Passed to other threads running async code, so they can run sync code in
|
||||
the thread they came from.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._work_thread = threading.current_thread()
|
||||
self._work_queue = queue.Queue()
|
||||
self._broken = False
|
||||
|
||||
def run_until_future(self, future):
|
||||
"""
|
||||
Runs the code in the work queue until a result is available from the future.
|
||||
Should be run from the thread the executor is initialised in.
|
||||
"""
|
||||
# Check we're in the right thread
|
||||
if threading.current_thread() != self._work_thread:
|
||||
raise RuntimeError(
|
||||
"You cannot run CurrentThreadExecutor from a different thread"
|
||||
)
|
||||
future.add_done_callback(self._work_queue.put)
|
||||
# Keep getting and running work items until we get the future we're waiting for
|
||||
# back via the future's done callback.
|
||||
try:
|
||||
while True:
|
||||
# Get a work item and run it
|
||||
work_item = self._work_queue.get()
|
||||
if work_item is future:
|
||||
return
|
||||
work_item.run()
|
||||
del work_item
|
||||
finally:
|
||||
self._broken = True
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
# Check they're not submitting from the same thread
|
||||
if threading.current_thread() == self._work_thread:
|
||||
raise RuntimeError(
|
||||
"You cannot submit onto CurrentThreadExecutor from its own thread"
|
||||
)
|
||||
# Check they're not too late or the executor errored
|
||||
if self._broken:
|
||||
raise RuntimeError("CurrentThreadExecutor already quit or is broken")
|
||||
# Add to work queue
|
||||
f = Future()
|
||||
work_item = _WorkItem(f, fn, args, kwargs)
|
||||
self._work_queue.put(work_item)
|
||||
# Return the future
|
||||
return f
|
@ -0,0 +1,122 @@
|
||||
import random
|
||||
import string
|
||||
import sys
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
|
||||
class Local:
|
||||
"""
|
||||
A drop-in replacement for threading.locals that also works with asyncio
|
||||
Tasks (via the current_task asyncio method), and passes locals through
|
||||
sync_to_async and async_to_sync.
|
||||
|
||||
Specifically:
|
||||
- Locals work per-coroutine on any thread not spawned using asgiref
|
||||
- Locals work per-thread on any thread not spawned using asgiref
|
||||
- Locals are shared with the parent coroutine when using sync_to_async
|
||||
- Locals are shared with the parent thread when using async_to_sync
|
||||
(and if that thread was launched using sync_to_async, with its parent
|
||||
coroutine as well, with this working for indefinite levels of nesting)
|
||||
|
||||
Set thread_critical to True to not allow locals to pass from an async Task
|
||||
to a thread it spawns. This is needed for code that truly needs
|
||||
thread-safety, as opposed to things used for helpful context (e.g. sqlite
|
||||
does not like being called from a different thread to the one it is from).
|
||||
Thread-critical code will still be differentiated per-Task within a thread
|
||||
as it is expected it does not like concurrent access.
|
||||
|
||||
This doesn't use contextvars as it needs to support 3.6. Once it can support
|
||||
3.7 only, we can then reimplement the storage more nicely.
|
||||
"""
|
||||
|
||||
CLEANUP_INTERVAL = 60 # seconds
|
||||
|
||||
def __init__(self, thread_critical: bool = False) -> None:
|
||||
self._thread_critical = thread_critical
|
||||
self._thread_lock = threading.RLock()
|
||||
self._context_refs: "weakref.WeakSet[object]" = weakref.WeakSet()
|
||||
# Random suffixes stop accidental reuse between different Locals,
|
||||
# though we try to force deletion as well.
|
||||
self._attr_name = "_asgiref_local_impl_{}_{}".format(
|
||||
id(self),
|
||||
"".join(random.choice(string.ascii_letters) for i in range(8)),
|
||||
)
|
||||
|
||||
def _get_context_id(self):
|
||||
"""
|
||||
Get the ID we should use for looking up variables
|
||||
"""
|
||||
# Prevent a circular reference
|
||||
from .sync import AsyncToSync, SyncToAsync
|
||||
|
||||
# First, pull the current task if we can
|
||||
context_id = SyncToAsync.get_current_task()
|
||||
context_is_async = True
|
||||
# OK, let's try for a thread ID
|
||||
if context_id is None:
|
||||
context_id = threading.current_thread()
|
||||
context_is_async = False
|
||||
# If we're thread-critical, we stop here, as we can't share contexts.
|
||||
if self._thread_critical:
|
||||
return context_id
|
||||
# Now, take those and see if we can resolve them through the launch maps
|
||||
for i in range(sys.getrecursionlimit()):
|
||||
try:
|
||||
if context_is_async:
|
||||
# Tasks have a source thread in AsyncToSync
|
||||
context_id = AsyncToSync.launch_map[context_id]
|
||||
context_is_async = False
|
||||
else:
|
||||
# Threads have a source task in SyncToAsync
|
||||
context_id = SyncToAsync.launch_map[context_id]
|
||||
context_is_async = True
|
||||
except KeyError:
|
||||
break
|
||||
else:
|
||||
# Catch infinite loops (they happen if you are screwing around
|
||||
# with AsyncToSync implementations)
|
||||
raise RuntimeError("Infinite launch_map loops")
|
||||
return context_id
|
||||
|
||||
def _get_storage(self):
|
||||
context_obj = self._get_context_id()
|
||||
if not hasattr(context_obj, self._attr_name):
|
||||
setattr(context_obj, self._attr_name, {})
|
||||
self._context_refs.add(context_obj)
|
||||
return getattr(context_obj, self._attr_name)
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
for context_obj in self._context_refs:
|
||||
try:
|
||||
delattr(context_obj, self._attr_name)
|
||||
except AttributeError:
|
||||
pass
|
||||
except TypeError:
|
||||
# WeakSet.__iter__ can crash when interpreter is shutting down due
|
||||
# to _IterationGuard being None.
|
||||
pass
|
||||
|
||||
def __getattr__(self, key):
|
||||
with self._thread_lock:
|
||||
storage = self._get_storage()
|
||||
if key in storage:
|
||||
return storage[key]
|
||||
else:
|
||||
raise AttributeError(f"{self!r} object has no attribute {key!r}")
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key in ("_context_refs", "_thread_critical", "_thread_lock", "_attr_name"):
|
||||
return super().__setattr__(key, value)
|
||||
with self._thread_lock:
|
||||
storage = self._get_storage()
|
||||
storage[key] = value
|
||||
|
||||
def __delattr__(self, key):
|
||||
with self._thread_lock:
|
||||
storage = self._get_storage()
|
||||
if key in storage:
|
||||
del storage[key]
|
||||
else:
|
||||
raise AttributeError(f"{self!r} object has no attribute {key!r}")
|
@ -0,0 +1,157 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from .compatibility import get_running_loop, guarantee_single_callable, run_future
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatelessServer:
|
||||
"""
|
||||
Base server class that handles basic concepts like application instance
|
||||
creation/pooling, exception handling, and similar, for stateless protocols
|
||||
(i.e. ones without actual incoming connections to the process)
|
||||
|
||||
Your code should override the handle() method, doing whatever it needs to,
|
||||
and calling get_or_create_application_instance with a unique `scope_id`
|
||||
and `scope` for the scope it wants to get.
|
||||
|
||||
If an application instance is found with the same `scope_id`, you are
|
||||
given its input queue, otherwise one is made for you with the scope provided
|
||||
and you are given that fresh new input queue. Either way, you should do
|
||||
something like:
|
||||
|
||||
input_queue = self.get_or_create_application_instance(
|
||||
"user-123456",
|
||||
{"type": "testprotocol", "user_id": "123456", "username": "andrew"},
|
||||
)
|
||||
input_queue.put_nowait(message)
|
||||
|
||||
If you try and create an application instance and there are already
|
||||
`max_application` instances, the oldest/least recently used one will be
|
||||
reclaimed and shut down to make space.
|
||||
|
||||
Application coroutines that error will be found periodically (every 100ms
|
||||
by default) and have their exceptions printed to the console. Override
|
||||
application_exception() if you want to do more when this happens.
|
||||
|
||||
If you override run(), make sure you handle things like launching the
|
||||
application checker.
|
||||
"""
|
||||
|
||||
application_checker_interval = 0.1
|
||||
|
||||
def __init__(self, application, max_applications=1000):
|
||||
# Parameters
|
||||
self.application = application
|
||||
self.max_applications = max_applications
|
||||
# Initialisation
|
||||
self.application_instances = {}
|
||||
|
||||
### Mainloop and handling
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Runs the asyncio event loop with our handler loop.
|
||||
"""
|
||||
event_loop = get_running_loop()
|
||||
asyncio.ensure_future(self.application_checker())
|
||||
try:
|
||||
event_loop.run_until_complete(self.handle())
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Exiting due to Ctrl-C/interrupt")
|
||||
|
||||
async def handle(self):
|
||||
raise NotImplementedError("You must implement handle()")
|
||||
|
||||
async def application_send(self, scope, message):
|
||||
"""
|
||||
Receives outbound sends from applications and handles them.
|
||||
"""
|
||||
raise NotImplementedError("You must implement application_send()")
|
||||
|
||||
### Application instance management
|
||||
|
||||
def get_or_create_application_instance(self, scope_id, scope):
|
||||
"""
|
||||
Creates an application instance and returns its queue.
|
||||
"""
|
||||
if scope_id in self.application_instances:
|
||||
self.application_instances[scope_id]["last_used"] = time.time()
|
||||
return self.application_instances[scope_id]["input_queue"]
|
||||
# See if we need to delete an old one
|
||||
while len(self.application_instances) > self.max_applications:
|
||||
self.delete_oldest_application_instance()
|
||||
# Make an instance of the application
|
||||
input_queue = asyncio.Queue()
|
||||
application_instance = guarantee_single_callable(self.application)
|
||||
# Run it, and stash the future for later checking
|
||||
future = run_future(
|
||||
application_instance(
|
||||
scope=scope,
|
||||
receive=input_queue.get,
|
||||
send=lambda message: self.application_send(scope, message),
|
||||
),
|
||||
)
|
||||
self.application_instances[scope_id] = {
|
||||
"input_queue": input_queue,
|
||||
"future": future,
|
||||
"scope": scope,
|
||||
"last_used": time.time(),
|
||||
}
|
||||
return input_queue
|
||||
|
||||
def delete_oldest_application_instance(self):
|
||||
"""
|
||||
Finds and deletes the oldest application instance
|
||||
"""
|
||||
oldest_time = min(
|
||||
details["last_used"] for details in self.application_instances.values()
|
||||
)
|
||||
for scope_id, details in self.application_instances.items():
|
||||
if details["last_used"] == oldest_time:
|
||||
self.delete_application_instance(scope_id)
|
||||
# Return to make sure we only delete one in case two have
|
||||
# the same oldest time
|
||||
return
|
||||
|
||||
def delete_application_instance(self, scope_id):
|
||||
"""
|
||||
Removes an application instance (makes sure its task is stopped,
|
||||
then removes it from the current set)
|
||||
"""
|
||||
details = self.application_instances[scope_id]
|
||||
del self.application_instances[scope_id]
|
||||
if not details["future"].done():
|
||||
details["future"].cancel()
|
||||
|
||||
async def application_checker(self):
|
||||
"""
|
||||
Goes through the set of current application instance Futures and cleans up
|
||||
any that are done/prints exceptions for any that errored.
|
||||
"""
|
||||
while True:
|
||||
await asyncio.sleep(self.application_checker_interval)
|
||||
for scope_id, details in list(self.application_instances.items()):
|
||||
if details["future"].done():
|
||||
exception = details["future"].exception()
|
||||
if exception:
|
||||
await self.application_exception(exception, details)
|
||||
try:
|
||||
del self.application_instances[scope_id]
|
||||
except KeyError:
|
||||
# Exception handling might have already got here before us. That's fine.
|
||||
pass
|
||||
|
||||
async def application_exception(self, exception, application_details):
|
||||
"""
|
||||
Called whenever an application coroutine has an exception.
|
||||
"""
|
||||
logging.error(
|
||||
"Exception inside application: %s\n%s%s",
|
||||
exception,
|
||||
"".join(traceback.format_tb(exception.__traceback__)),
|
||||
f" {exception}",
|
||||
)
|
@ -0,0 +1,548 @@
|
||||
import asyncio.coroutines
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
import weakref
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from typing import Any, Callable, Dict, Optional, overload
|
||||
|
||||
from .compatibility import get_running_loop
|
||||
from .current_thread_executor import CurrentThreadExecutor
|
||||
from .local import Local
|
||||
|
||||
if sys.version_info >= (3, 7):
|
||||
import contextvars
|
||||
else:
|
||||
contextvars = None
|
||||
|
||||
|
||||
def _restore_context(context):
|
||||
# Check for changes in contextvars, and set them to the current
|
||||
# context for downstream consumers
|
||||
for cvar in context:
|
||||
try:
|
||||
if cvar.get() != context.get(cvar):
|
||||
cvar.set(context.get(cvar))
|
||||
except LookupError:
|
||||
cvar.set(context.get(cvar))
|
||||
|
||||
|
||||
def _iscoroutinefunction_or_partial(func: Any) -> bool:
|
||||
# Python < 3.8 does not correctly determine partially wrapped
|
||||
# coroutine functions are coroutine functions, hence the need for
|
||||
# this to exist. Code taken from CPython.
|
||||
if sys.version_info >= (3, 8):
|
||||
return asyncio.iscoroutinefunction(func)
|
||||
else:
|
||||
while inspect.ismethod(func):
|
||||
func = func.__func__
|
||||
while isinstance(func, functools.partial):
|
||||
func = func.func
|
||||
|
||||
return asyncio.iscoroutinefunction(func)
|
||||
|
||||
|
||||
class ThreadSensitiveContext:
|
||||
"""Async context manager to manage context for thread sensitive mode
|
||||
|
||||
This context manager controls which thread pool executor is used when in
|
||||
thread sensitive mode. By default, a single thread pool executor is shared
|
||||
within a process.
|
||||
|
||||
In Python 3.7+, the ThreadSensitiveContext() context manager may be used to
|
||||
specify a thread pool per context.
|
||||
|
||||
In Python 3.6, usage of this context manager has no effect.
|
||||
|
||||
This context manager is re-entrant, so only the outer-most call to
|
||||
ThreadSensitiveContext will set the context.
|
||||
|
||||
Usage:
|
||||
|
||||
>>> import time
|
||||
>>> async with ThreadSensitiveContext():
|
||||
... await sync_to_async(time.sleep, 1)()
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.token = None
|
||||
|
||||
if contextvars:
|
||||
|
||||
async def __aenter__(self):
|
||||
try:
|
||||
SyncToAsync.thread_sensitive_context.get()
|
||||
except LookupError:
|
||||
self.token = SyncToAsync.thread_sensitive_context.set(self)
|
||||
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc, value, tb):
|
||||
if not self.token:
|
||||
return
|
||||
|
||||
executor = SyncToAsync.context_to_thread_executor.pop(self, None)
|
||||
if executor:
|
||||
executor.shutdown()
|
||||
SyncToAsync.thread_sensitive_context.reset(self.token)
|
||||
|
||||
else:
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc, value, tb):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncToSync:
|
||||
"""
|
||||
Utility class which turns an awaitable that only works on the thread with
|
||||
the event loop into a synchronous callable that works in a subthread.
|
||||
|
||||
If the call stack contains an async loop, the code runs there.
|
||||
Otherwise, the code runs in a new loop in a new thread.
|
||||
|
||||
Either way, this thread then pauses and waits to run any thread_sensitive
|
||||
code called from further down the call stack using SyncToAsync, before
|
||||
finally exiting once the async task returns.
|
||||
"""
|
||||
|
||||
# Maps launched Tasks to the threads that launched them (for locals impl)
|
||||
launch_map: "Dict[asyncio.Task[object], threading.Thread]" = {}
|
||||
|
||||
# Keeps track of which CurrentThreadExecutor to use. This uses an asgiref
|
||||
# Local, not a threadlocal, so that tasks can work out what their parent used.
|
||||
executors = Local()
|
||||
|
||||
def __init__(self, awaitable, force_new_loop=False):
|
||||
if not callable(awaitable) or not _iscoroutinefunction_or_partial(awaitable):
|
||||
# Python does not have very reliable detection of async functions
|
||||
# (lots of false negatives) so this is just a warning.
|
||||
warnings.warn("async_to_sync was passed a non-async-marked callable")
|
||||
self.awaitable = awaitable
|
||||
try:
|
||||
self.__self__ = self.awaitable.__self__
|
||||
except AttributeError:
|
||||
pass
|
||||
if force_new_loop:
|
||||
# They have asked that we always run in a new sub-loop.
|
||||
self.main_event_loop = None
|
||||
else:
|
||||
try:
|
||||
self.main_event_loop = get_running_loop()
|
||||
except RuntimeError:
|
||||
# There's no event loop in this thread. Look for the threadlocal if
|
||||
# we're inside SyncToAsync
|
||||
main_event_loop_pid = getattr(
|
||||
SyncToAsync.threadlocal, "main_event_loop_pid", None
|
||||
)
|
||||
# We make sure the parent loop is from the same process - if
|
||||
# they've forked, this is not going to be valid any more (#194)
|
||||
if main_event_loop_pid and main_event_loop_pid == os.getpid():
|
||||
self.main_event_loop = getattr(
|
||||
SyncToAsync.threadlocal, "main_event_loop", None
|
||||
)
|
||||
else:
|
||||
self.main_event_loop = None
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
# You can't call AsyncToSync from a thread with a running event loop
|
||||
try:
|
||||
event_loop = get_running_loop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
else:
|
||||
if event_loop.is_running():
|
||||
raise RuntimeError(
|
||||
"You cannot use AsyncToSync in the same thread as an async event loop - "
|
||||
"just await the async function directly."
|
||||
)
|
||||
|
||||
if contextvars is not None:
|
||||
# Wrapping context in list so it can be reassigned from within
|
||||
# `main_wrap`.
|
||||
context = [contextvars.copy_context()]
|
||||
else:
|
||||
context = None
|
||||
|
||||
# Make a future for the return information
|
||||
call_result = Future()
|
||||
# Get the source thread
|
||||
source_thread = threading.current_thread()
|
||||
# Make a CurrentThreadExecutor we'll use to idle in this thread - we
|
||||
# need one for every sync frame, even if there's one above us in the
|
||||
# same thread.
|
||||
if hasattr(self.executors, "current"):
|
||||
old_current_executor = self.executors.current
|
||||
else:
|
||||
old_current_executor = None
|
||||
current_executor = CurrentThreadExecutor()
|
||||
self.executors.current = current_executor
|
||||
# Use call_soon_threadsafe to schedule a synchronous callback on the
|
||||
# main event loop's thread if it's there, otherwise make a new loop
|
||||
# in this thread.
|
||||
try:
|
||||
awaitable = self.main_wrap(
|
||||
args, kwargs, call_result, source_thread, sys.exc_info(), context
|
||||
)
|
||||
|
||||
if not (self.main_event_loop and self.main_event_loop.is_running()):
|
||||
# Make our own event loop - in a new thread - and run inside that.
|
||||
loop = asyncio.new_event_loop()
|
||||
loop_executor = ThreadPoolExecutor(max_workers=1)
|
||||
loop_future = loop_executor.submit(
|
||||
self._run_event_loop, loop, awaitable
|
||||
)
|
||||
if current_executor:
|
||||
# Run the CurrentThreadExecutor until the future is done
|
||||
current_executor.run_until_future(loop_future)
|
||||
# Wait for future and/or allow for exception propagation
|
||||
loop_future.result()
|
||||
else:
|
||||
# Call it inside the existing loop
|
||||
self.main_event_loop.call_soon_threadsafe(
|
||||
self.main_event_loop.create_task, awaitable
|
||||
)
|
||||
if current_executor:
|
||||
# Run the CurrentThreadExecutor until the future is done
|
||||
current_executor.run_until_future(call_result)
|
||||
finally:
|
||||
# Clean up any executor we were running
|
||||
if hasattr(self.executors, "current"):
|
||||
del self.executors.current
|
||||
if old_current_executor:
|
||||
self.executors.current = old_current_executor
|
||||
if contextvars is not None:
|
||||
_restore_context(context[0])
|
||||
|
||||
# Wait for results from the future.
|
||||
return call_result.result()
|
||||
|
||||
def _run_event_loop(self, loop, coro):
|
||||
"""
|
||||
Runs the given event loop (designed to be called in a thread).
|
||||
"""
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
loop.run_until_complete(coro)
|
||||
finally:
|
||||
try:
|
||||
# mimic asyncio.run() behavior
|
||||
# cancel unexhausted async generators
|
||||
if sys.version_info >= (3, 7, 0):
|
||||
tasks = asyncio.all_tasks(loop)
|
||||
else:
|
||||
tasks = asyncio.Task.all_tasks(loop)
|
||||
for task in tasks:
|
||||
task.cancel()
|
||||
|
||||
async def gather():
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
loop.run_until_complete(gather())
|
||||
for task in tasks:
|
||||
if task.cancelled():
|
||||
continue
|
||||
if task.exception() is not None:
|
||||
loop.call_exception_handler(
|
||||
{
|
||||
"message": "unhandled exception during loop shutdown",
|
||||
"exception": task.exception(),
|
||||
"task": task,
|
||||
}
|
||||
)
|
||||
if hasattr(loop, "shutdown_asyncgens"):
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
finally:
|
||||
loop.close()
|
||||
asyncio.set_event_loop(self.main_event_loop)
|
||||
|
||||
def __get__(self, parent, objtype):
|
||||
"""
|
||||
Include self for methods
|
||||
"""
|
||||
func = functools.partial(self.__call__, parent)
|
||||
return functools.update_wrapper(func, self.awaitable)
|
||||
|
||||
async def main_wrap(
|
||||
self, args, kwargs, call_result, source_thread, exc_info, context
|
||||
):
|
||||
"""
|
||||
Wraps the awaitable with something that puts the result into the
|
||||
result/exception future.
|
||||
"""
|
||||
if context is not None:
|
||||
_restore_context(context[0])
|
||||
|
||||
current_task = SyncToAsync.get_current_task()
|
||||
self.launch_map[current_task] = source_thread
|
||||
try:
|
||||
# If we have an exception, run the function inside the except block
|
||||
# after raising it so exc_info is correctly populated.
|
||||
if exc_info[1]:
|
||||
try:
|
||||
raise exc_info[1]
|
||||
except BaseException:
|
||||
result = await self.awaitable(*args, **kwargs)
|
||||
else:
|
||||
result = await self.awaitable(*args, **kwargs)
|
||||
except BaseException as e:
|
||||
call_result.set_exception(e)
|
||||
else:
|
||||
call_result.set_result(result)
|
||||
finally:
|
||||
del self.launch_map[current_task]
|
||||
|
||||
if context is not None:
|
||||
context[0] = contextvars.copy_context()
|
||||
|
||||
|
||||
class SyncToAsync:
|
||||
"""
|
||||
Utility class which turns a synchronous callable into an awaitable that
|
||||
runs in a threadpool. It also sets a threadlocal inside the thread so
|
||||
calls to AsyncToSync can escape it.
|
||||
|
||||
If thread_sensitive is passed, the code will run in the same thread as any
|
||||
outer code. This is needed for underlying Python code that is not
|
||||
threadsafe (for example, code which handles SQLite database connections).
|
||||
|
||||
If the outermost program is async (i.e. SyncToAsync is outermost), then
|
||||
this will be a dedicated single sub-thread that all sync code runs in,
|
||||
one after the other. If the outermost program is sync (i.e. AsyncToSync is
|
||||
outermost), this will just be the main thread. This is achieved by idling
|
||||
with a CurrentThreadExecutor while AsyncToSync is blocking its sync parent,
|
||||
rather than just blocking.
|
||||
|
||||
If executor is passed in, that will be used instead of the loop's default executor.
|
||||
In order to pass in an executor, thread_sensitive must be set to False, otherwise
|
||||
a TypeError will be raised.
|
||||
"""
|
||||
|
||||
# If they've set ASGI_THREADS, update the default asyncio executor for now
|
||||
if "ASGI_THREADS" in os.environ:
|
||||
loop = get_running_loop()
|
||||
loop.set_default_executor(
|
||||
ThreadPoolExecutor(max_workers=int(os.environ["ASGI_THREADS"]))
|
||||
)
|
||||
|
||||
# Maps launched threads to the coroutines that spawned them
|
||||
launch_map: "Dict[threading.Thread, asyncio.Task[object]]" = {}
|
||||
|
||||
# Storage for main event loop references
|
||||
threadlocal = threading.local()
|
||||
|
||||
# Single-thread executor for thread-sensitive code
|
||||
single_thread_executor = ThreadPoolExecutor(max_workers=1)
|
||||
|
||||
# Maintain a contextvar for the current execution context. Optionally used
|
||||
# for thread sensitive mode.
|
||||
if sys.version_info >= (3, 7):
|
||||
thread_sensitive_context: "contextvars.ContextVar[str]" = (
|
||||
contextvars.ContextVar("thread_sensitive_context")
|
||||
)
|
||||
else:
|
||||
thread_sensitive_context: None = None
|
||||
|
||||
# Contextvar that is used to detect if the single thread executor
|
||||
# would be awaited on while already being used in the same context
|
||||
if sys.version_info >= (3, 7):
|
||||
deadlock_context: "contextvars.ContextVar[bool]" = contextvars.ContextVar(
|
||||
"deadlock_context"
|
||||
)
|
||||
else:
|
||||
deadlock_context: None = None
|
||||
|
||||
# Maintaining a weak reference to the context ensures that thread pools are
|
||||
# erased once the context goes out of scope. This terminates the thread pool.
|
||||
context_to_thread_executor: "weakref.WeakKeyDictionary[object, ThreadPoolExecutor]" = (
|
||||
weakref.WeakKeyDictionary()
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
func: Callable[..., Any],
|
||||
thread_sensitive: bool = True,
|
||||
executor: Optional["ThreadPoolExecutor"] = None,
|
||||
) -> None:
|
||||
if not callable(func) or _iscoroutinefunction_or_partial(func):
|
||||
raise TypeError("sync_to_async can only be applied to sync functions.")
|
||||
self.func = func
|
||||
functools.update_wrapper(self, func)
|
||||
self._thread_sensitive = thread_sensitive
|
||||
self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore
|
||||
if thread_sensitive and executor is not None:
|
||||
raise TypeError("executor must not be set when thread_sensitive is True")
|
||||
self._executor = executor
|
||||
try:
|
||||
self.__self__ = func.__self__ # type: ignore
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
async def __call__(self, *args, **kwargs):
|
||||
loop = get_running_loop()
|
||||
|
||||
# Work out what thread to run the code in
|
||||
if self._thread_sensitive:
|
||||
if hasattr(AsyncToSync.executors, "current"):
|
||||
# If we have a parent sync thread above somewhere, use that
|
||||
executor = AsyncToSync.executors.current
|
||||
elif self.thread_sensitive_context and self.thread_sensitive_context.get(
|
||||
None
|
||||
):
|
||||
# If we have a way of retrieving the current context, attempt
|
||||
# to use a per-context thread pool executor
|
||||
thread_sensitive_context = self.thread_sensitive_context.get()
|
||||
|
||||
if thread_sensitive_context in self.context_to_thread_executor:
|
||||
# Re-use thread executor in current context
|
||||
executor = self.context_to_thread_executor[thread_sensitive_context]
|
||||
else:
|
||||
# Create new thread executor in current context
|
||||
executor = ThreadPoolExecutor(max_workers=1)
|
||||
self.context_to_thread_executor[thread_sensitive_context] = executor
|
||||
elif self.deadlock_context and self.deadlock_context.get(False):
|
||||
raise RuntimeError(
|
||||
"Single thread executor already being used, would deadlock"
|
||||
)
|
||||
else:
|
||||
# Otherwise, we run it in a fixed single thread
|
||||
executor = self.single_thread_executor
|
||||
if self.deadlock_context:
|
||||
self.deadlock_context.set(True)
|
||||
else:
|
||||
# Use the passed in executor, or the loop's default if it is None
|
||||
executor = self._executor
|
||||
|
||||
if contextvars is not None:
|
||||
context = contextvars.copy_context()
|
||||
child = functools.partial(self.func, *args, **kwargs)
|
||||
func = context.run
|
||||
args = (child,)
|
||||
kwargs = {}
|
||||
else:
|
||||
func = self.func
|
||||
|
||||
try:
|
||||
# Run the code in the right thread
|
||||
future = loop.run_in_executor(
|
||||
executor,
|
||||
functools.partial(
|
||||
self.thread_handler,
|
||||
loop,
|
||||
self.get_current_task(),
|
||||
sys.exc_info(),
|
||||
func,
|
||||
*args,
|
||||
**kwargs,
|
||||
),
|
||||
)
|
||||
ret = await asyncio.wait_for(future, timeout=None)
|
||||
|
||||
finally:
|
||||
if contextvars is not None:
|
||||
_restore_context(context)
|
||||
if self.deadlock_context:
|
||||
self.deadlock_context.set(False)
|
||||
|
||||
return ret
|
||||
|
||||
def __get__(self, parent, objtype):
|
||||
"""
|
||||
Include self for methods
|
||||
"""
|
||||
return functools.partial(self.__call__, parent)
|
||||
|
||||
def thread_handler(self, loop, source_task, exc_info, func, *args, **kwargs):
|
||||
"""
|
||||
Wraps the sync application with exception handling.
|
||||
"""
|
||||
# Set the threadlocal for AsyncToSync
|
||||
self.threadlocal.main_event_loop = loop
|
||||
self.threadlocal.main_event_loop_pid = os.getpid()
|
||||
# Set the task mapping (used for the locals module)
|
||||
current_thread = threading.current_thread()
|
||||
if AsyncToSync.launch_map.get(source_task) == current_thread:
|
||||
# Our parent task was launched from this same thread, so don't make
|
||||
# a launch map entry - let it shortcut over us! (and stop infinite loops)
|
||||
parent_set = False
|
||||
else:
|
||||
self.launch_map[current_thread] = source_task
|
||||
parent_set = True
|
||||
# Run the function
|
||||
try:
|
||||
# If we have an exception, run the function inside the except block
|
||||
# after raising it so exc_info is correctly populated.
|
||||
if exc_info[1]:
|
||||
try:
|
||||
raise exc_info[1]
|
||||
except BaseException:
|
||||
return func(*args, **kwargs)
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
# Only delete the launch_map parent if we set it, otherwise it is
|
||||
# from someone else.
|
||||
if parent_set:
|
||||
del self.launch_map[current_thread]
|
||||
|
||||
@staticmethod
|
||||
def get_current_task():
|
||||
"""
|
||||
Cross-version implementation of asyncio.current_task()
|
||||
|
||||
Returns None if there is no task.
|
||||
"""
|
||||
try:
|
||||
if hasattr(asyncio, "current_task"):
|
||||
# Python 3.7 and up
|
||||
return asyncio.current_task()
|
||||
else:
|
||||
# Python 3.6
|
||||
return asyncio.Task.current_task()
|
||||
except RuntimeError:
|
||||
return None
|
||||
|
||||
|
||||
# Lowercase aliases (and decorator friendliness)
|
||||
async_to_sync = AsyncToSync
|
||||
|
||||
|
||||
@overload
|
||||
def sync_to_async(
|
||||
func: None = None,
|
||||
thread_sensitive: bool = True,
|
||||
executor: Optional["ThreadPoolExecutor"] = None,
|
||||
) -> Callable[[Callable[..., Any]], SyncToAsync]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def sync_to_async(
|
||||
func: Callable[..., Any],
|
||||
thread_sensitive: bool = True,
|
||||
executor: Optional["ThreadPoolExecutor"] = None,
|
||||
) -> SyncToAsync:
|
||||
...
|
||||
|
||||
|
||||
def sync_to_async(
|
||||
func=None,
|
||||
thread_sensitive=True,
|
||||
executor=None,
|
||||
):
|
||||
if func is None:
|
||||
return lambda f: SyncToAsync(
|
||||
f,
|
||||
thread_sensitive=thread_sensitive,
|
||||
executor=executor,
|
||||
)
|
||||
return SyncToAsync(
|
||||
func,
|
||||
thread_sensitive=thread_sensitive,
|
||||
executor=executor,
|
||||
)
|
@ -0,0 +1,97 @@
|
||||
import asyncio
|
||||
import time
|
||||
|
||||
from .compatibility import guarantee_single_callable
|
||||
from .timeout import timeout as async_timeout
|
||||
|
||||
|
||||
class ApplicationCommunicator:
|
||||
"""
|
||||
Runs an ASGI application in a test mode, allowing sending of
|
||||
messages to it and retrieval of messages it sends.
|
||||
"""
|
||||
|
||||
def __init__(self, application, scope):
|
||||
self.application = guarantee_single_callable(application)
|
||||
self.scope = scope
|
||||
self.input_queue = asyncio.Queue()
|
||||
self.output_queue = asyncio.Queue()
|
||||
self.future = asyncio.ensure_future(
|
||||
self.application(scope, self.input_queue.get, self.output_queue.put)
|
||||
)
|
||||
|
||||
async def wait(self, timeout=1):
|
||||
"""
|
||||
Waits for the application to stop itself and returns any exceptions.
|
||||
"""
|
||||
try:
|
||||
async with async_timeout(timeout):
|
||||
try:
|
||||
await self.future
|
||||
self.future.result()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
finally:
|
||||
if not self.future.done():
|
||||
self.future.cancel()
|
||||
try:
|
||||
await self.future
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
def stop(self, exceptions=True):
|
||||
if not self.future.done():
|
||||
self.future.cancel()
|
||||
elif exceptions:
|
||||
# Give a chance to raise any exceptions
|
||||
self.future.result()
|
||||
|
||||
def __del__(self):
|
||||
# Clean up on deletion
|
||||
try:
|
||||
self.stop(exceptions=False)
|
||||
except RuntimeError:
|
||||
# Event loop already stopped
|
||||
pass
|
||||
|
||||
async def send_input(self, message):
|
||||
"""
|
||||
Sends a single message to the application
|
||||
"""
|
||||
# Give it the message
|
||||
await self.input_queue.put(message)
|
||||
|
||||
async def receive_output(self, timeout=1):
|
||||
"""
|
||||
Receives a single message from the application, with optional timeout.
|
||||
"""
|
||||
# Make sure there's not an exception to raise from the task
|
||||
if self.future.done():
|
||||
self.future.result()
|
||||
# Wait and receive the message
|
||||
try:
|
||||
async with async_timeout(timeout):
|
||||
return await self.output_queue.get()
|
||||
except asyncio.TimeoutError as e:
|
||||
# See if we have another error to raise inside
|
||||
if self.future.done():
|
||||
self.future.result()
|
||||
else:
|
||||
self.future.cancel()
|
||||
try:
|
||||
await self.future
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
raise e
|
||||
|
||||
async def receive_nothing(self, timeout=0.1, interval=0.01):
|
||||
"""
|
||||
Checks that there is no message to receive in the given time.
|
||||
"""
|
||||
# `interval` has precedence over `timeout`
|
||||
start = time.monotonic()
|
||||
while time.monotonic() - start < timeout:
|
||||
if not self.output_queue.empty():
|
||||
return False
|
||||
await asyncio.sleep(interval)
|
||||
return self.output_queue.empty()
|
@ -0,0 +1,127 @@
|
||||
# This code is originally sourced from the aio-libs project "async_timeout",
|
||||
# under the Apache 2.0 license. You may see the original project at
|
||||
# https://github.com/aio-libs/async-timeout
|
||||
|
||||
# It is vendored here to reduce chain-dependencies on this library, and
|
||||
# modified slightly to remove some features we don't use.
|
||||
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from types import TracebackType
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
|
||||
class timeout:
|
||||
"""timeout context manager.
|
||||
|
||||
Useful in cases when you want to apply timeout logic around block
|
||||
of code or in cases when asyncio.wait_for is not suitable. For example:
|
||||
|
||||
>>> with timeout(0.001):
|
||||
... async with aiohttp.get('https://github.com') as r:
|
||||
... await r.text()
|
||||
|
||||
|
||||
timeout - value in seconds or None to disable timeout logic
|
||||
loop - asyncio compatible event loop
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timeout: Optional[float],
|
||||
*,
|
||||
loop: Optional[asyncio.AbstractEventLoop] = None,
|
||||
) -> None:
|
||||
self._timeout = timeout
|
||||
if loop is None:
|
||||
loop = asyncio.get_event_loop()
|
||||
self._loop = loop
|
||||
self._task = None # type: Optional[asyncio.Task[Any]]
|
||||
self._cancelled = False
|
||||
self._cancel_handler = None # type: Optional[asyncio.Handle]
|
||||
self._cancel_at = None # type: Optional[float]
|
||||
|
||||
def __enter__(self) -> "timeout":
|
||||
return self._do_enter()
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Type[BaseException],
|
||||
exc_val: BaseException,
|
||||
exc_tb: TracebackType,
|
||||
) -> Optional[bool]:
|
||||
self._do_exit(exc_type)
|
||||
return None
|
||||
|
||||
async def __aenter__(self) -> "timeout":
|
||||
return self._do_enter()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: Type[BaseException],
|
||||
exc_val: BaseException,
|
||||
exc_tb: TracebackType,
|
||||
) -> None:
|
||||
self._do_exit(exc_type)
|
||||
|
||||
@property
|
||||
def expired(self) -> bool:
|
||||
return self._cancelled
|
||||
|
||||
@property
|
||||
def remaining(self) -> Optional[float]:
|
||||
if self._cancel_at is not None:
|
||||
return max(self._cancel_at - self._loop.time(), 0.0)
|
||||
else:
|
||||
return None
|
||||
|
||||
def _do_enter(self) -> "timeout":
|
||||
# Support Tornado 5- without timeout
|
||||
# Details: https://github.com/python/asyncio/issues/392
|
||||
if self._timeout is None:
|
||||
return self
|
||||
|
||||
self._task = current_task(self._loop)
|
||||
if self._task is None:
|
||||
raise RuntimeError(
|
||||
"Timeout context manager should be used " "inside a task"
|
||||
)
|
||||
|
||||
if self._timeout <= 0:
|
||||
self._loop.call_soon(self._cancel_task)
|
||||
return self
|
||||
|
||||
self._cancel_at = self._loop.time() + self._timeout
|
||||
self._cancel_handler = self._loop.call_at(self._cancel_at, self._cancel_task)
|
||||
return self
|
||||
|
||||
def _do_exit(self, exc_type: Type[BaseException]) -> None:
|
||||
if exc_type is asyncio.CancelledError and self._cancelled:
|
||||
self._cancel_handler = None
|
||||
self._task = None
|
||||
raise asyncio.TimeoutError
|
||||
if self._timeout is not None and self._cancel_handler is not None:
|
||||
self._cancel_handler.cancel()
|
||||
self._cancel_handler = None
|
||||
self._task = None
|
||||
return None
|
||||
|
||||
def _cancel_task(self) -> None:
|
||||
if self._task is not None:
|
||||
self._task.cancel()
|
||||
self._cancelled = True
|
||||
|
||||
|
||||
def current_task(loop: asyncio.AbstractEventLoop) -> "Optional[asyncio.Task[Any]]":
|
||||
if sys.version_info >= (3, 7):
|
||||
task = asyncio.current_task(loop=loop)
|
||||
else:
|
||||
task = asyncio.Task.current_task(loop=loop)
|
||||
if task is None:
|
||||
# this should be removed, tokio must use register_task and family API
|
||||
fn = getattr(loop, "current_task", None)
|
||||
if fn is not None:
|
||||
task = fn()
|
||||
|
||||
return task
|
@ -0,0 +1,287 @@
|
||||
import sys
|
||||
import warnings
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from asgiref._pep562 import pep562
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
from typing import Literal, Protocol, TypedDict
|
||||
else:
|
||||
from typing_extensions import Literal, Protocol, TypedDict
|
||||
|
||||
__all__ = (
|
||||
"ASGIVersions",
|
||||
"HTTPScope",
|
||||
"WebSocketScope",
|
||||
"LifespanScope",
|
||||
"WWWScope",
|
||||
"Scope",
|
||||
"HTTPRequestEvent",
|
||||
"HTTPResponseStartEvent",
|
||||
"HTTPResponseBodyEvent",
|
||||
"HTTPServerPushEvent",
|
||||
"HTTPDisconnectEvent",
|
||||
"WebSocketConnectEvent",
|
||||
"WebSocketAcceptEvent",
|
||||
"WebSocketReceiveEvent",
|
||||
"WebSocketSendEvent",
|
||||
"WebSocketResponseStartEvent",
|
||||
"WebSocketResponseBodyEvent",
|
||||
"WebSocketDisconnectEvent",
|
||||
"WebSocketCloseEvent",
|
||||
"LifespanStartupEvent",
|
||||
"LifespanShutdownEvent",
|
||||
"LifespanStartupCompleteEvent",
|
||||
"LifespanStartupFailedEvent",
|
||||
"LifespanShutdownCompleteEvent",
|
||||
"LifespanShutdownFailedEvent",
|
||||
"ASGIReceiveEvent",
|
||||
"ASGISendEvent",
|
||||
"ASGIReceiveCallable",
|
||||
"ASGISendCallable",
|
||||
"ASGI2Protocol",
|
||||
"ASGI2Application",
|
||||
"ASGI3Application",
|
||||
"ASGIApplication",
|
||||
)
|
||||
|
||||
|
||||
class ASGIVersions(TypedDict):
|
||||
spec_version: str
|
||||
version: Union[Literal["2.0"], Literal["3.0"]]
|
||||
|
||||
|
||||
class HTTPScope(TypedDict):
|
||||
type: Literal["http"]
|
||||
asgi: ASGIVersions
|
||||
http_version: str
|
||||
method: str
|
||||
scheme: str
|
||||
path: str
|
||||
raw_path: bytes
|
||||
query_string: bytes
|
||||
root_path: str
|
||||
headers: Iterable[Tuple[bytes, bytes]]
|
||||
client: Optional[Tuple[str, int]]
|
||||
server: Optional[Tuple[str, Optional[int]]]
|
||||
extensions: Optional[Dict[str, Dict[object, object]]]
|
||||
|
||||
|
||||
class WebSocketScope(TypedDict):
|
||||
type: Literal["websocket"]
|
||||
asgi: ASGIVersions
|
||||
http_version: str
|
||||
scheme: str
|
||||
path: str
|
||||
raw_path: bytes
|
||||
query_string: bytes
|
||||
root_path: str
|
||||
headers: Iterable[Tuple[bytes, bytes]]
|
||||
client: Optional[Tuple[str, int]]
|
||||
server: Optional[Tuple[str, Optional[int]]]
|
||||
subprotocols: Iterable[str]
|
||||
extensions: Optional[Dict[str, Dict[object, object]]]
|
||||
|
||||
|
||||
class LifespanScope(TypedDict):
|
||||
type: Literal["lifespan"]
|
||||
asgi: ASGIVersions
|
||||
|
||||
|
||||
WWWScope = Union[HTTPScope, WebSocketScope]
|
||||
Scope = Union[HTTPScope, WebSocketScope, LifespanScope]
|
||||
|
||||
|
||||
class HTTPRequestEvent(TypedDict):
|
||||
type: Literal["http.request"]
|
||||
body: bytes
|
||||
more_body: bool
|
||||
|
||||
|
||||
class HTTPResponseStartEvent(TypedDict):
|
||||
type: Literal["http.response.start"]
|
||||
status: int
|
||||
headers: Iterable[Tuple[bytes, bytes]]
|
||||
|
||||
|
||||
class HTTPResponseBodyEvent(TypedDict):
|
||||
type: Literal["http.response.body"]
|
||||
body: bytes
|
||||
more_body: bool
|
||||
|
||||
|
||||
class HTTPServerPushEvent(TypedDict):
|
||||
type: Literal["http.response.push"]
|
||||
path: str
|
||||
headers: Iterable[Tuple[bytes, bytes]]
|
||||
|
||||
|
||||
class HTTPDisconnectEvent(TypedDict):
|
||||
type: Literal["http.disconnect"]
|
||||
|
||||
|
||||
class WebSocketConnectEvent(TypedDict):
|
||||
type: Literal["websocket.connect"]
|
||||
|
||||
|
||||
class WebSocketAcceptEvent(TypedDict):
|
||||
type: Literal["websocket.accept"]
|
||||
subprotocol: Optional[str]
|
||||
headers: Iterable[Tuple[bytes, bytes]]
|
||||
|
||||
|
||||
class WebSocketReceiveEvent(TypedDict):
|
||||
type: Literal["websocket.receive"]
|
||||
bytes: Optional[bytes]
|
||||
text: Optional[str]
|
||||
|
||||
|
||||
class WebSocketSendEvent(TypedDict):
|
||||
type: Literal["websocket.send"]
|
||||
bytes: Optional[bytes]
|
||||
text: Optional[str]
|
||||
|
||||
|
||||
class WebSocketResponseStartEvent(TypedDict):
|
||||
type: Literal["websocket.http.response.start"]
|
||||
status: int
|
||||
headers: Iterable[Tuple[bytes, bytes]]
|
||||
|
||||
|
||||
class WebSocketResponseBodyEvent(TypedDict):
|
||||
type: Literal["websocket.http.response.body"]
|
||||
body: bytes
|
||||
more_body: bool
|
||||
|
||||
|
||||
class WebSocketDisconnectEvent(TypedDict):
|
||||
type: Literal["websocket.disconnect"]
|
||||
code: int
|
||||
|
||||
|
||||
class WebSocketCloseEvent(TypedDict):
|
||||
type: Literal["websocket.close"]
|
||||
code: int
|
||||
reason: Optional[str]
|
||||
|
||||
|
||||
class LifespanStartupEvent(TypedDict):
|
||||
type: Literal["lifespan.startup"]
|
||||
|
||||
|
||||
class LifespanShutdownEvent(TypedDict):
|
||||
type: Literal["lifespan.shutdown"]
|
||||
|
||||
|
||||
class LifespanStartupCompleteEvent(TypedDict):
|
||||
type: Literal["lifespan.startup.complete"]
|
||||
|
||||
|
||||
class LifespanStartupFailedEvent(TypedDict):
|
||||
type: Literal["lifespan.startup.failed"]
|
||||
message: str
|
||||
|
||||
|
||||
class LifespanShutdownCompleteEvent(TypedDict):
|
||||
type: Literal["lifespan.shutdown.complete"]
|
||||
|
||||
|
||||
class LifespanShutdownFailedEvent(TypedDict):
|
||||
type: Literal["lifespan.shutdown.failed"]
|
||||
message: str
|
||||
|
||||
|
||||
ASGIReceiveEvent = Union[
|
||||
HTTPRequestEvent,
|
||||
HTTPDisconnectEvent,
|
||||
WebSocketConnectEvent,
|
||||
WebSocketReceiveEvent,
|
||||
WebSocketDisconnectEvent,
|
||||
LifespanStartupEvent,
|
||||
LifespanShutdownEvent,
|
||||
]
|
||||
|
||||
|
||||
ASGISendEvent = Union[
|
||||
HTTPResponseStartEvent,
|
||||
HTTPResponseBodyEvent,
|
||||
HTTPServerPushEvent,
|
||||
HTTPDisconnectEvent,
|
||||
WebSocketAcceptEvent,
|
||||
WebSocketSendEvent,
|
||||
WebSocketResponseStartEvent,
|
||||
WebSocketResponseBodyEvent,
|
||||
WebSocketCloseEvent,
|
||||
LifespanStartupCompleteEvent,
|
||||
LifespanStartupFailedEvent,
|
||||
LifespanShutdownCompleteEvent,
|
||||
LifespanShutdownFailedEvent,
|
||||
]
|
||||
|
||||
|
||||
ASGIReceiveCallable = Callable[[], Awaitable[ASGIReceiveEvent]]
|
||||
ASGISendCallable = Callable[[ASGISendEvent], Awaitable[None]]
|
||||
|
||||
|
||||
class ASGI2Protocol(Protocol):
|
||||
def __init__(self, scope: Scope) -> None:
|
||||
...
|
||||
|
||||
async def __call__(
|
||||
self, receive: ASGIReceiveCallable, send: ASGISendCallable
|
||||
) -> None:
|
||||
...
|
||||
|
||||
|
||||
ASGI2Application = Type[ASGI2Protocol]
|
||||
ASGI3Application = Callable[
|
||||
[
|
||||
Scope,
|
||||
ASGIReceiveCallable,
|
||||
ASGISendCallable,
|
||||
],
|
||||
Awaitable[None],
|
||||
]
|
||||
ASGIApplication = Union[ASGI2Application, ASGI3Application]
|
||||
|
||||
__deprecated__ = {
|
||||
"WebsocketConnectEvent": WebSocketConnectEvent,
|
||||
"WebsocketAcceptEvent": WebSocketAcceptEvent,
|
||||
"WebsocketReceiveEvent": WebSocketReceiveEvent,
|
||||
"WebsocketSendEvent": WebSocketSendEvent,
|
||||
"WebsocketResponseStartEvent": WebSocketResponseStartEvent,
|
||||
"WebsocketResponseBodyEvent": WebSocketResponseBodyEvent,
|
||||
"WebsocketDisconnectEvent": WebSocketDisconnectEvent,
|
||||
"WebsocketCloseEvent": WebSocketCloseEvent,
|
||||
}
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
deprecated = __deprecated__.get(name)
|
||||
if deprecated:
|
||||
stacklevel = 3 if sys.version_info >= (3, 7) else 4
|
||||
warnings.warn(
|
||||
f"'{name}' is deprecated. Use '{deprecated.__name__}' instead.",
|
||||
category=DeprecationWarning,
|
||||
stacklevel=stacklevel,
|
||||
)
|
||||
return deprecated
|
||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||
|
||||
|
||||
def __dir__() -> List[str]:
|
||||
return sorted(list(__all__) + list(__deprecated__.keys()))
|
||||
|
||||
|
||||
pep562(__name__)
|
@ -0,0 +1,162 @@
|
||||
from io import BytesIO
|
||||
from tempfile import SpooledTemporaryFile
|
||||
|
||||
from asgiref.sync import AsyncToSync, sync_to_async
|
||||
|
||||
|
||||
class WsgiToAsgi:
|
||||
"""
|
||||
Wraps a WSGI application to make it into an ASGI application.
|
||||
"""
|
||||
|
||||
def __init__(self, wsgi_application):
|
||||
self.wsgi_application = wsgi_application
|
||||
|
||||
async def __call__(self, scope, receive, send):
|
||||
"""
|
||||
ASGI application instantiation point.
|
||||
We return a new WsgiToAsgiInstance here with the WSGI app
|
||||
and the scope, ready to respond when it is __call__ed.
|
||||
"""
|
||||
await WsgiToAsgiInstance(self.wsgi_application)(scope, receive, send)
|
||||
|
||||
|
||||
class WsgiToAsgiInstance:
|
||||
"""
|
||||
Per-socket instance of a wrapped WSGI application
|
||||
"""
|
||||
|
||||
def __init__(self, wsgi_application):
|
||||
self.wsgi_application = wsgi_application
|
||||
self.response_started = False
|
||||
self.response_content_length = None
|
||||
|
||||
async def __call__(self, scope, receive, send):
|
||||
if scope["type"] != "http":
|
||||
raise ValueError("WSGI wrapper received a non-HTTP scope")
|
||||
self.scope = scope
|
||||
with SpooledTemporaryFile(max_size=65536) as body:
|
||||
# Alright, wait for the http.request messages
|
||||
while True:
|
||||
message = await receive()
|
||||
if message["type"] != "http.request":
|
||||
raise ValueError("WSGI wrapper received a non-HTTP-request message")
|
||||
body.write(message.get("body", b""))
|
||||
if not message.get("more_body"):
|
||||
break
|
||||
body.seek(0)
|
||||
# Wrap send so it can be called from the subthread
|
||||
self.sync_send = AsyncToSync(send)
|
||||
# Call the WSGI app
|
||||
await self.run_wsgi_app(body)
|
||||
|
||||
def build_environ(self, scope, body):
|
||||
"""
|
||||
Builds a scope and request body into a WSGI environ object.
|
||||
"""
|
||||
environ = {
|
||||
"REQUEST_METHOD": scope["method"],
|
||||
"SCRIPT_NAME": scope.get("root_path", "").encode("utf8").decode("latin1"),
|
||||
"PATH_INFO": scope["path"].encode("utf8").decode("latin1"),
|
||||
"QUERY_STRING": scope["query_string"].decode("ascii"),
|
||||
"SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"],
|
||||
"wsgi.version": (1, 0),
|
||||
"wsgi.url_scheme": scope.get("scheme", "http"),
|
||||
"wsgi.input": body,
|
||||
"wsgi.errors": BytesIO(),
|
||||
"wsgi.multithread": True,
|
||||
"wsgi.multiprocess": True,
|
||||
"wsgi.run_once": False,
|
||||
}
|
||||
# Get server name and port - required in WSGI, not in ASGI
|
||||
if "server" in scope:
|
||||
environ["SERVER_NAME"] = scope["server"][0]
|
||||
environ["SERVER_PORT"] = str(scope["server"][1])
|
||||
else:
|
||||
environ["SERVER_NAME"] = "localhost"
|
||||
environ["SERVER_PORT"] = "80"
|
||||
|
||||
if "client" in scope:
|
||||
environ["REMOTE_ADDR"] = scope["client"][0]
|
||||
|
||||
# Go through headers and make them into environ entries
|
||||
for name, value in self.scope.get("headers", []):
|
||||
name = name.decode("latin1")
|
||||
if name == "content-length":
|
||||
corrected_name = "CONTENT_LENGTH"
|
||||
elif name == "content-type":
|
||||
corrected_name = "CONTENT_TYPE"
|
||||
else:
|
||||
corrected_name = "HTTP_%s" % name.upper().replace("-", "_")
|
||||
# HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
|
||||
value = value.decode("latin1")
|
||||
if corrected_name in environ:
|
||||
value = environ[corrected_name] + "," + value
|
||||
environ[corrected_name] = value
|
||||
return environ
|
||||
|
||||
def start_response(self, status, response_headers, exc_info=None):
|
||||
"""
|
||||
WSGI start_response callable.
|
||||
"""
|
||||
# Don't allow re-calling once response has begun
|
||||
if self.response_started:
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
# Don't allow re-calling without exc_info
|
||||
if hasattr(self, "response_start") and exc_info is None:
|
||||
raise ValueError(
|
||||
"You cannot call start_response a second time without exc_info"
|
||||
)
|
||||
# Extract status code
|
||||
status_code, _ = status.split(" ", 1)
|
||||
status_code = int(status_code)
|
||||
# Extract headers
|
||||
headers = [
|
||||
(name.lower().encode("ascii"), value.encode("ascii"))
|
||||
for name, value in response_headers
|
||||
]
|
||||
# Extract content-length
|
||||
self.response_content_length = None
|
||||
for name, value in response_headers:
|
||||
if name.lower() == "content-length":
|
||||
self.response_content_length = int(value)
|
||||
# Build and send response start message.
|
||||
self.response_start = {
|
||||
"type": "http.response.start",
|
||||
"status": status_code,
|
||||
"headers": headers,
|
||||
}
|
||||
|
||||
@sync_to_async
|
||||
def run_wsgi_app(self, body):
|
||||
"""
|
||||
Called in a subthread to run the WSGI app. We encapsulate like
|
||||
this so that the start_response callable is called in the same thread.
|
||||
"""
|
||||
# Translate the scope and incoming request body into a WSGI environ
|
||||
environ = self.build_environ(self.scope, body)
|
||||
# Run the WSGI app
|
||||
bytes_sent = 0
|
||||
for output in self.wsgi_application(environ, self.start_response):
|
||||
# If this is the first response, include the response headers
|
||||
if not self.response_started:
|
||||
self.response_started = True
|
||||
self.sync_send(self.response_start)
|
||||
# If the application supplies a Content-Length header
|
||||
if self.response_content_length is not None:
|
||||
# The server should not transmit more bytes to the client than the header allows
|
||||
bytes_allowed = self.response_content_length - bytes_sent
|
||||
if len(output) > bytes_allowed:
|
||||
output = output[:bytes_allowed]
|
||||
self.sync_send(
|
||||
{"type": "http.response.body", "body": output, "more_body": True}
|
||||
)
|
||||
bytes_sent += len(output)
|
||||
# The server should stop iterating over the response when enough data has been sent
|
||||
if bytes_sent == self.response_content_length:
|
||||
break
|
||||
# Close connection
|
||||
if not self.response_started:
|
||||
self.response_started = True
|
||||
self.sync_send(self.response_start)
|
||||
self.sync_send({"type": "http.response.body"})
|
@ -0,0 +1,8 @@
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
import django
|
||||
|
||||
if django.VERSION < (3, 2):
|
||||
default_app_config = "axes.apps.AppConfig"
|
||||
|
||||
__version__ = get_distribution("django-axes").version
|
@ -0,0 +1,83 @@
|
||||
from django.contrib import admin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from axes.conf import settings
|
||||
from axes.models import AccessAttempt, AccessLog
|
||||
|
||||
|
||||
class AccessAttemptAdmin(admin.ModelAdmin):
|
||||
list_display = (
|
||||
"attempt_time",
|
||||
"ip_address",
|
||||
"user_agent",
|
||||
"username",
|
||||
"path_info",
|
||||
"failures_since_start",
|
||||
)
|
||||
|
||||
list_filter = ["attempt_time", "path_info"]
|
||||
|
||||
search_fields = ["ip_address", "username", "user_agent", "path_info"]
|
||||
|
||||
date_hierarchy = "attempt_time"
|
||||
|
||||
fieldsets = (
|
||||
(None, {"fields": ("path_info", "failures_since_start")}),
|
||||
(_("Form Data"), {"fields": ("get_data", "post_data")}),
|
||||
(_("Meta Data"), {"fields": ("user_agent", "ip_address", "http_accept")}),
|
||||
)
|
||||
|
||||
readonly_fields = [
|
||||
"user_agent",
|
||||
"ip_address",
|
||||
"username",
|
||||
"http_accept",
|
||||
"path_info",
|
||||
"attempt_time",
|
||||
"get_data",
|
||||
"post_data",
|
||||
"failures_since_start",
|
||||
]
|
||||
|
||||
def has_add_permission(self, request):
|
||||
return False
|
||||
|
||||
|
||||
class AccessLogAdmin(admin.ModelAdmin):
|
||||
list_display = (
|
||||
"attempt_time",
|
||||
"logout_time",
|
||||
"ip_address",
|
||||
"username",
|
||||
"user_agent",
|
||||
"path_info",
|
||||
)
|
||||
|
||||
list_filter = ["attempt_time", "logout_time", "path_info"]
|
||||
|
||||
search_fields = ["ip_address", "user_agent", "username", "path_info"]
|
||||
|
||||
date_hierarchy = "attempt_time"
|
||||
|
||||
fieldsets = (
|
||||
(None, {"fields": ("path_info",)}),
|
||||
(_("Meta Data"), {"fields": ("user_agent", "ip_address", "http_accept")}),
|
||||
)
|
||||
|
||||
readonly_fields = [
|
||||
"user_agent",
|
||||
"ip_address",
|
||||
"username",
|
||||
"http_accept",
|
||||
"path_info",
|
||||
"attempt_time",
|
||||
"logout_time",
|
||||
]
|
||||
|
||||
def has_add_permission(self, request):
|
||||
return False
|
||||
|
||||
|
||||
if settings.AXES_ENABLE_ADMIN:
|
||||
admin.site.register(AccessAttempt, AccessAttemptAdmin)
|
||||
admin.site.register(AccessLog, AccessLogAdmin)
|
@ -0,0 +1,49 @@
|
||||
from logging import getLogger
|
||||
|
||||
from django import apps
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
class AppConfig(apps.AppConfig):
|
||||
default_auto_field = "django.db.models.AutoField"
|
||||
name = "axes"
|
||||
initialized = False
|
||||
|
||||
@classmethod
|
||||
def initialize(cls):
|
||||
"""
|
||||
Initialize Axes logging and show version information.
|
||||
|
||||
This method is re-entrant and can be called multiple times.
|
||||
It displays version information exactly once at application startup.
|
||||
"""
|
||||
|
||||
if cls.initialized:
|
||||
return
|
||||
cls.initialized = True
|
||||
|
||||
# Only import settings, checks, and signals one time after Django has been initialized
|
||||
from axes.conf import settings # noqa
|
||||
from axes import checks, signals # noqa
|
||||
|
||||
# Skip startup log messages if Axes is not set to verbose
|
||||
if settings.AXES_VERBOSE:
|
||||
log.info("AXES: BEGIN LOG")
|
||||
log.info(
|
||||
"AXES: Using django-axes version %s",
|
||||
get_distribution("django-axes").version,
|
||||
)
|
||||
|
||||
if settings.AXES_ONLY_USER_FAILURES:
|
||||
log.info("AXES: blocking by username only.")
|
||||
elif settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP:
|
||||
log.info("AXES: blocking by combination of username and IP.")
|
||||
elif settings.AXES_LOCK_OUT_BY_USER_OR_IP:
|
||||
log.info("AXES: blocking by username or IP.")
|
||||
else:
|
||||
log.info("AXES: blocking by IP only.")
|
||||
|
||||
def ready(self):
|
||||
self.initialize()
|
@ -0,0 +1,99 @@
|
||||
from logging import getLogger
|
||||
from typing import List
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from django.utils.timezone import datetime, now
|
||||
|
||||
from axes.conf import settings
|
||||
from axes.helpers import get_client_username, get_client_parameters, get_cool_off
|
||||
from axes.models import AccessAttempt
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
def get_cool_off_threshold(attempt_time: datetime = None) -> datetime:
|
||||
"""
|
||||
Get threshold for fetching access attempts from the database.
|
||||
"""
|
||||
|
||||
cool_off = get_cool_off()
|
||||
if cool_off is None:
|
||||
raise TypeError(
|
||||
"Cool off threshold can not be calculated with settings.AXES_COOLOFF_TIME set to None"
|
||||
)
|
||||
|
||||
if attempt_time is None:
|
||||
return now() - cool_off
|
||||
return attempt_time - cool_off
|
||||
|
||||
|
||||
def filter_user_attempts(request, credentials: dict = None) -> List[QuerySet]:
|
||||
"""
|
||||
Return a list querysets of AccessAttempts that match the given request and credentials.
|
||||
"""
|
||||
|
||||
username = get_client_username(request, credentials)
|
||||
|
||||
filter_kwargs_list = get_client_parameters(
|
||||
username, request.axes_ip_address, request.axes_user_agent
|
||||
)
|
||||
attempts_list = [
|
||||
AccessAttempt.objects.filter(**filter_kwargs)
|
||||
for filter_kwargs in filter_kwargs_list
|
||||
]
|
||||
return attempts_list
|
||||
|
||||
|
||||
def get_user_attempts(request, credentials: dict = None) -> List[QuerySet]:
|
||||
"""
|
||||
Get list of querysets with valid user attempts that match the given request and credentials.
|
||||
"""
|
||||
|
||||
attempts_list = filter_user_attempts(request, credentials)
|
||||
|
||||
if settings.AXES_COOLOFF_TIME is None:
|
||||
log.debug(
|
||||
"AXES: Getting all access attempts from database because no AXES_COOLOFF_TIME is configured"
|
||||
)
|
||||
return attempts_list
|
||||
|
||||
threshold = get_cool_off_threshold(request.axes_attempt_time)
|
||||
log.debug("AXES: Getting access attempts that are newer than %s", threshold)
|
||||
return [attempts.filter(attempt_time__gte=threshold) for attempts in attempts_list]
|
||||
|
||||
|
||||
def clean_expired_user_attempts(attempt_time: datetime = None) -> int:
|
||||
"""
|
||||
Clean expired user attempts from the database.
|
||||
"""
|
||||
|
||||
if settings.AXES_COOLOFF_TIME is None:
|
||||
log.debug(
|
||||
"AXES: Skipping clean for expired access attempts because no AXES_COOLOFF_TIME is configured"
|
||||
)
|
||||
return 0
|
||||
|
||||
threshold = get_cool_off_threshold(attempt_time)
|
||||
count, _ = AccessAttempt.objects.filter(attempt_time__lt=threshold).delete()
|
||||
log.info(
|
||||
"AXES: Cleaned up %s expired access attempts from database that were older than %s",
|
||||
count,
|
||||
threshold,
|
||||
)
|
||||
return count
|
||||
|
||||
|
||||
def reset_user_attempts(request, credentials: dict = None) -> int:
|
||||
"""
|
||||
Reset all user attempts that match the given request and credentials.
|
||||
"""
|
||||
|
||||
attempts_list = filter_user_attempts(request, credentials)
|
||||
|
||||
count = 0
|
||||
for attempts in attempts_list:
|
||||
_count, _ = attempts.delete()
|
||||
count += _count
|
||||
log.info("AXES: Reset %s access attempts from database.", count)
|
||||
|
||||
return count
|
@ -0,0 +1,62 @@
|
||||
from django.contrib.auth.backends import ModelBackend
|
||||
|
||||
from axes.exceptions import (
|
||||
AxesBackendPermissionDenied,
|
||||
AxesBackendRequestParameterRequired,
|
||||
)
|
||||
from axes.handlers.proxy import AxesProxyHandler
|
||||
from axes.helpers import get_credentials, get_lockout_message, toggleable
|
||||
|
||||
|
||||
class AxesBackend(ModelBackend):
|
||||
"""
|
||||
Authentication backend class that forbids login attempts for locked out users.
|
||||
|
||||
Use this class as the first item of ``AUTHENTICATION_BACKENDS`` to
|
||||
prevent locked out users from being logged in by the Django authentication flow.
|
||||
|
||||
.. note:: This backend does not log your user in. It monitors login attempts.
|
||||
Authentication is handled by the following backends that are configured in ``AUTHENTICATION_BACKENDS``.
|
||||
"""
|
||||
|
||||
@toggleable
|
||||
def authenticate(
|
||||
self, request, username: str = None, password: str = None, **kwargs: dict
|
||||
):
|
||||
"""
|
||||
Checks user lockout status and raises an exception if user is not allowed to log in.
|
||||
|
||||
This method interrupts the login flow and inserts error message directly to the
|
||||
``response_context`` attribute that is supplied as a keyword argument.
|
||||
|
||||
:keyword response_context: kwarg that will be have its ``error`` attribute updated with context.
|
||||
:raises AxesBackendRequestParameterRequired: if request parameter is not passed.
|
||||
:raises AxesBackendPermissionDenied: if user is already locked out.
|
||||
"""
|
||||
|
||||
if request is None:
|
||||
raise AxesBackendRequestParameterRequired(
|
||||
"AxesBackend requires a request as an argument to authenticate"
|
||||
)
|
||||
|
||||
credentials = get_credentials(username=username, password=password, **kwargs)
|
||||
|
||||
if AxesProxyHandler.is_allowed(request, credentials):
|
||||
return
|
||||
|
||||
# Locked out, don't try to authenticate, just update response_context and return.
|
||||
# Its a bit weird to pass a context and expect a response value but its nice to get a "why" back.
|
||||
|
||||
error_msg = get_lockout_message()
|
||||
response_context = kwargs.get("response_context", {})
|
||||
response_context["error"] = error_msg
|
||||
|
||||
# Raise an error that stops the authentication flows at django.contrib.auth.authenticate.
|
||||
# This error stops bubbling up at the authenticate call which catches backend PermissionDenied errors.
|
||||
# After this error is caught by authenticate it emits a signal indicating user login failed,
|
||||
# which is processed by axes.signals.log_user_login_failed which logs and flags the failed request.
|
||||
# The axes.middleware.AxesMiddleware further processes the flagged request into a readable response.
|
||||
|
||||
raise AxesBackendPermissionDenied(
|
||||
"AxesBackend detected that the given user is locked out"
|
||||
)
|
@ -0,0 +1,144 @@
|
||||
from django.core.checks import ( # pylint: disable=redefined-builtin
|
||||
Tags,
|
||||
Warning,
|
||||
register,
|
||||
)
|
||||
from django.utils.module_loading import import_string
|
||||
|
||||
from axes.backends import AxesBackend
|
||||
from axes.conf import settings
|
||||
|
||||
|
||||
class Messages:
|
||||
CACHE_INVALID = (
|
||||
"You are using the django-axes cache handler for login attempt tracking."
|
||||
" Your cache configuration is however invalid and will not work correctly with django-axes."
|
||||
" This can leave security holes in your login systems as attempts are not tracked correctly."
|
||||
" Reconfigure settings.AXES_CACHE and settings.CACHES per django-axes configuration documentation."
|
||||
)
|
||||
MIDDLEWARE_INVALID = (
|
||||
"You do not have 'axes.middleware.AxesMiddleware' in your settings.MIDDLEWARE."
|
||||
)
|
||||
BACKEND_INVALID = "You do not have 'axes.backends.AxesBackend' or a subclass in your settings.AUTHENTICATION_BACKENDS."
|
||||
SETTING_DEPRECATED = "You have a deprecated setting {deprecated_setting} configured in your project settings"
|
||||
|
||||
|
||||
class Hints:
|
||||
CACHE_INVALID = None
|
||||
MIDDLEWARE_INVALID = None
|
||||
BACKEND_INVALID = (
|
||||
"AxesModelBackend was renamed to AxesBackend in django-axes version 5.0."
|
||||
)
|
||||
SETTING_DEPRECATED = None
|
||||
|
||||
|
||||
class Codes:
|
||||
CACHE_INVALID = "axes.W001"
|
||||
MIDDLEWARE_INVALID = "axes.W002"
|
||||
BACKEND_INVALID = "axes.W003"
|
||||
SETTING_DEPRECATED = "axes.W004"
|
||||
|
||||
|
||||
@register(Tags.security, Tags.caches, Tags.compatibility)
|
||||
def axes_cache_check(app_configs, **kwargs): # pylint: disable=unused-argument
|
||||
axes_handler = getattr(settings, "AXES_HANDLER", "")
|
||||
|
||||
axes_cache_key = getattr(settings, "AXES_CACHE", "default")
|
||||
axes_cache_config = settings.CACHES.get(axes_cache_key, {})
|
||||
axes_cache_backend = axes_cache_config.get("BACKEND", "")
|
||||
|
||||
axes_cache_backend_incompatible = [
|
||||
"django.core.cache.backends.dummy.DummyCache",
|
||||
"django.core.cache.backends.locmem.LocMemCache",
|
||||
"django.core.cache.backends.filebased.FileBasedCache",
|
||||
]
|
||||
|
||||
warnings = []
|
||||
|
||||
if axes_handler == "axes.handlers.cache.AxesCacheHandler":
|
||||
if axes_cache_backend in axes_cache_backend_incompatible:
|
||||
warnings.append(
|
||||
Warning(
|
||||
msg=Messages.CACHE_INVALID,
|
||||
hint=Hints.CACHE_INVALID,
|
||||
id=Codes.CACHE_INVALID,
|
||||
)
|
||||
)
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
@register(Tags.security, Tags.compatibility)
|
||||
def axes_middleware_check(app_configs, **kwargs): # pylint: disable=unused-argument
|
||||
warnings = []
|
||||
|
||||
if "axes.middleware.AxesMiddleware" not in settings.MIDDLEWARE:
|
||||
warnings.append(
|
||||
Warning(
|
||||
msg=Messages.MIDDLEWARE_INVALID,
|
||||
hint=Hints.MIDDLEWARE_INVALID,
|
||||
id=Codes.MIDDLEWARE_INVALID,
|
||||
)
|
||||
)
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
@register(Tags.security, Tags.compatibility)
|
||||
def axes_backend_check(app_configs, **kwargs): # pylint: disable=unused-argument
|
||||
warnings = []
|
||||
|
||||
found = False
|
||||
for name in settings.AUTHENTICATION_BACKENDS:
|
||||
try:
|
||||
backend = import_string(name)
|
||||
except ModuleNotFoundError as e:
|
||||
raise ModuleNotFoundError(
|
||||
"Can not find module path defined in settings.AUTHENTICATION_BACKENDS"
|
||||
) from e
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Can not import backend class defined in settings.AUTHENTICATION_BACKENDS"
|
||||
) from e
|
||||
|
||||
if issubclass(backend, AxesBackend):
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
warnings.append(
|
||||
Warning(
|
||||
msg=Messages.BACKEND_INVALID,
|
||||
hint=Hints.BACKEND_INVALID,
|
||||
id=Codes.BACKEND_INVALID,
|
||||
)
|
||||
)
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
@register(Tags.compatibility)
|
||||
def axes_deprecation_check(app_configs, **kwargs): # pylint: disable=unused-argument
|
||||
warnings = []
|
||||
|
||||
deprecated_settings = [
|
||||
"AXES_DISABLE_SUCCESS_ACCESS_LOG",
|
||||
"AXES_LOGGER",
|
||||
]
|
||||
|
||||
for deprecated_setting in deprecated_settings:
|
||||
try:
|
||||
getattr(settings, deprecated_setting)
|
||||
warnings.append(
|
||||
Warning(
|
||||
msg=Messages.SETTING_DEPRECATED.format(
|
||||
deprecated_setting=deprecated_setting
|
||||
),
|
||||
hint=None,
|
||||
id=Codes.SETTING_DEPRECATED,
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return warnings
|
@ -0,0 +1,131 @@
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
# disable plugin when set to False
|
||||
settings.AXES_ENABLED = getattr(settings, "AXES_ENABLED", True)
|
||||
|
||||
# see if the user has overridden the failure limit
|
||||
settings.AXES_FAILURE_LIMIT = getattr(settings, "AXES_FAILURE_LIMIT", 3)
|
||||
|
||||
# see if the user has set axes to lock out logins after failure limit
|
||||
settings.AXES_LOCK_OUT_AT_FAILURE = getattr(settings, "AXES_LOCK_OUT_AT_FAILURE", True)
|
||||
|
||||
# lock out with the combination of username and IP address
|
||||
settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = getattr(
|
||||
settings, "AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP", False
|
||||
)
|
||||
|
||||
# lock out with the username or IP address
|
||||
settings.AXES_LOCK_OUT_BY_USER_OR_IP = getattr(
|
||||
settings, "AXES_LOCK_OUT_BY_USER_OR_IP", False
|
||||
)
|
||||
|
||||
# lock out with username and never the IP or user agent
|
||||
settings.AXES_ONLY_USER_FAILURES = getattr(settings, "AXES_ONLY_USER_FAILURES", False)
|
||||
|
||||
# lock out just for admin site
|
||||
settings.AXES_ONLY_ADMIN_SITE = getattr(settings, "AXES_ONLY_ADMIN_SITE", False)
|
||||
|
||||
# show Axes logs in admin
|
||||
settings.AXES_ENABLE_ADMIN = getattr(settings, "AXES_ENABLE_ADMIN", True)
|
||||
|
||||
# lock out with the user agent, has no effect when ONLY_USER_FAILURES is set
|
||||
settings.AXES_USE_USER_AGENT = getattr(settings, "AXES_USE_USER_AGENT", False)
|
||||
|
||||
# use a specific username field to retrieve from login POST data
|
||||
settings.AXES_USERNAME_FORM_FIELD = getattr(
|
||||
settings, "AXES_USERNAME_FORM_FIELD", "username"
|
||||
)
|
||||
|
||||
# use a specific password field to retrieve from login POST data
|
||||
settings.AXES_PASSWORD_FORM_FIELD = getattr(
|
||||
settings, "AXES_PASSWORD_FORM_FIELD", "password"
|
||||
) # noqa
|
||||
|
||||
# use a provided callable to transform the POSTed username into the one used in credentials
|
||||
settings.AXES_USERNAME_CALLABLE = getattr(settings, "AXES_USERNAME_CALLABLE", None)
|
||||
|
||||
# determine if given user should be always allowed to attempt authentication
|
||||
settings.AXES_WHITELIST_CALLABLE = getattr(settings, "AXES_WHITELIST_CALLABLE", None)
|
||||
|
||||
# return custom lockout response if configured
|
||||
settings.AXES_LOCKOUT_CALLABLE = getattr(settings, "AXES_LOCKOUT_CALLABLE", None)
|
||||
|
||||
# reset the number of failed attempts after one successful attempt
|
||||
settings.AXES_RESET_ON_SUCCESS = getattr(settings, "AXES_RESET_ON_SUCCESS", False)
|
||||
|
||||
settings.AXES_DISABLE_ACCESS_LOG = getattr(settings, "AXES_DISABLE_ACCESS_LOG", False)
|
||||
|
||||
settings.AXES_HANDLER = getattr(
|
||||
settings, "AXES_HANDLER", "axes.handlers.database.AxesDatabaseHandler"
|
||||
)
|
||||
|
||||
settings.AXES_LOCKOUT_TEMPLATE = getattr(settings, "AXES_LOCKOUT_TEMPLATE", None)
|
||||
|
||||
settings.AXES_LOCKOUT_URL = getattr(settings, "AXES_LOCKOUT_URL", None)
|
||||
|
||||
settings.AXES_COOLOFF_TIME = getattr(settings, "AXES_COOLOFF_TIME", None)
|
||||
|
||||
settings.AXES_VERBOSE = getattr(settings, "AXES_VERBOSE", settings.AXES_ENABLED)
|
||||
|
||||
# whitelist and blacklist
|
||||
settings.AXES_NEVER_LOCKOUT_WHITELIST = getattr(
|
||||
settings, "AXES_NEVER_LOCKOUT_WHITELIST", False
|
||||
)
|
||||
|
||||
settings.AXES_NEVER_LOCKOUT_GET = getattr(settings, "AXES_NEVER_LOCKOUT_GET", False)
|
||||
|
||||
settings.AXES_ONLY_WHITELIST = getattr(settings, "AXES_ONLY_WHITELIST", False)
|
||||
|
||||
settings.AXES_IP_WHITELIST = getattr(settings, "AXES_IP_WHITELIST", None)
|
||||
|
||||
settings.AXES_IP_BLACKLIST = getattr(settings, "AXES_IP_BLACKLIST", None)
|
||||
|
||||
# message to show when locked out and have cooloff enabled
|
||||
settings.AXES_COOLOFF_MESSAGE = getattr(
|
||||
settings,
|
||||
"AXES_COOLOFF_MESSAGE",
|
||||
_("Account locked: too many login attempts. Please try again later."),
|
||||
)
|
||||
|
||||
# message to show when locked out and have cooloff disabled
|
||||
settings.AXES_PERMALOCK_MESSAGE = getattr(
|
||||
settings,
|
||||
"AXES_PERMALOCK_MESSAGE",
|
||||
_(
|
||||
"Account locked: too many login attempts. Contact an admin to unlock your account."
|
||||
),
|
||||
)
|
||||
|
||||
# if your deployment is using reverse proxies, set this value to 'left-most' or 'right-most' per your configuration
|
||||
settings.AXES_PROXY_ORDER = getattr(settings, "AXES_PROXY_ORDER", "left-most")
|
||||
|
||||
# if your deployment is using reverse proxies, set this value to the number of proxies in front of Django
|
||||
settings.AXES_PROXY_COUNT = getattr(settings, "AXES_PROXY_COUNT", None)
|
||||
|
||||
# if your deployment is using reverse proxies, set to your trusted proxy IP addresses prefixes if needed
|
||||
settings.AXES_PROXY_TRUSTED_IPS = getattr(settings, "AXES_PROXY_TRUSTED_IPS", None)
|
||||
|
||||
# set to the names of request.META attributes that should be checked for the IP address of the client
|
||||
# if your deployment is using reverse proxies, ensure that the header attributes are securely set by the proxy
|
||||
# ensure that the client can not spoof the headers by setting them and sending them through the proxy
|
||||
settings.AXES_META_PRECEDENCE_ORDER = getattr(
|
||||
settings,
|
||||
"AXES_META_PRECEDENCE_ORDER",
|
||||
getattr(settings, "IPWARE_META_PRECEDENCE_ORDER", ("REMOTE_ADDR",)),
|
||||
)
|
||||
|
||||
# set CORS allowed origins when calling authentication over ajax
|
||||
settings.AXES_ALLOWED_CORS_ORIGINS = getattr(settings, "AXES_ALLOWED_CORS_ORIGINS", "*")
|
||||
|
||||
# set the list of sensitive parameters to cleanse from get/post data before logging
|
||||
settings.AXES_SENSITIVE_PARAMETERS = getattr(
|
||||
settings,
|
||||
"AXES_SENSITIVE_PARAMETERS",
|
||||
[],
|
||||
)
|
||||
|
||||
# set the callable for the readable string that can be used in
|
||||
# e.g. logging to distinguish client requests
|
||||
settings.AXES_CLIENT_STR_CALLABLE = getattr(settings, "AXES_CLIENT_STR_CALLABLE", None)
|
@ -0,0 +1,26 @@
|
||||
from functools import wraps
|
||||
|
||||
from axes.handlers.proxy import AxesProxyHandler
|
||||
from axes.helpers import get_lockout_response
|
||||
|
||||
|
||||
def axes_dispatch(func):
|
||||
@wraps(func)
|
||||
def inner(request, *args, **kwargs):
|
||||
if AxesProxyHandler.is_allowed(request):
|
||||
return func(request, *args, **kwargs)
|
||||
|
||||
return get_lockout_response(request)
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
def axes_form_invalid(func):
|
||||
@wraps(func)
|
||||
def inner(self, *args, **kwargs):
|
||||
if AxesProxyHandler.is_allowed(self.request):
|
||||
return func(self, *args, **kwargs)
|
||||
|
||||
return get_lockout_response(self.request)
|
||||
|
||||
return inner
|
@ -0,0 +1,13 @@
|
||||
from django.core.exceptions import PermissionDenied
|
||||
|
||||
|
||||
class AxesBackendPermissionDenied(PermissionDenied):
|
||||
"""
|
||||
Raised by authentication backend on locked out requests to stop the Django authentication flow.
|
||||
"""
|
||||
|
||||
|
||||
class AxesBackendRequestParameterRequired(ValueError):
|
||||
"""
|
||||
Raised by authentication backend on invalid or missing request parameter value.
|
||||
"""
|
@ -0,0 +1,193 @@
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from django.urls import reverse
|
||||
from django.urls.exceptions import NoReverseMatch
|
||||
|
||||
from axes.conf import settings
|
||||
from axes.helpers import (
|
||||
get_failure_limit,
|
||||
is_client_ip_address_blacklisted,
|
||||
is_client_ip_address_whitelisted,
|
||||
is_client_method_whitelisted,
|
||||
is_user_attempt_whitelisted,
|
||||
)
|
||||
|
||||
|
||||
class AbstractAxesHandler(ABC):
|
||||
"""
|
||||
Contract that all handlers need to follow
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def user_login_failed(self, sender, credentials: dict, request=None, **kwargs):
|
||||
"""
|
||||
Handles the Django ``django.contrib.auth.signals.user_login_failed`` authentication signal.
|
||||
"""
|
||||
raise NotImplementedError("user_login_failed should be implemented")
|
||||
|
||||
@abstractmethod
|
||||
def user_logged_in(self, sender, request, user, **kwargs):
|
||||
"""
|
||||
Handles the Django ``django.contrib.auth.signals.user_logged_in`` authentication signal.
|
||||
"""
|
||||
raise NotImplementedError("user_logged_in should be implemented")
|
||||
|
||||
@abstractmethod
|
||||
def user_logged_out(self, sender, request, user, **kwargs):
|
||||
"""
|
||||
Handles the Django ``django.contrib.auth.signals.user_logged_out`` authentication signal.
|
||||
"""
|
||||
raise NotImplementedError("user_logged_out should be implemented")
|
||||
|
||||
@abstractmethod
|
||||
def get_failures(self, request, credentials: dict = None) -> int:
|
||||
"""
|
||||
Checks the number of failures associated to the given request and credentials.
|
||||
|
||||
This is a virtual method that needs an implementation in the handler subclass
|
||||
if the ``settings.AXES_LOCK_OUT_AT_FAILURE`` flag is set to ``True``.
|
||||
"""
|
||||
raise NotImplementedError("get_failures should be implemented")
|
||||
|
||||
|
||||
class AxesBaseHandler: # pylint: disable=unused-argument
|
||||
"""
|
||||
Handler API definition for implementations that are used by the ``AxesProxyHandler``.
|
||||
|
||||
If you wish to specialize your own handler class, override the necessary methods
|
||||
and configure the class for use by setting ``settings.AXES_HANDLER = 'module.path.to.YourClass'``.
|
||||
Make sure that new the handler is compliant with AbstractAxesHandler and make sure it extends from this mixin.
|
||||
Refer to `AxesHandler` for an example.
|
||||
|
||||
The default implementation that is actually used by Axes is ``axes.handlers.database.AxesDatabaseHandler``.
|
||||
|
||||
.. note:: This is a virtual class and **can not be used without specialization**.
|
||||
"""
|
||||
|
||||
def is_allowed(self, request, credentials: dict = None) -> bool:
|
||||
"""
|
||||
Checks if the user is allowed to access or use given functionality such as a login view or authentication.
|
||||
|
||||
This method is abstract and other backends can specialize it as needed, but the default implementation
|
||||
checks if the user has attempted to authenticate into the site too many times through the
|
||||
Django authentication backends and returns ``False`` if user exceeds the configured Axes thresholds.
|
||||
|
||||
This checker can implement arbitrary checks such as IP whitelisting or blacklisting,
|
||||
request frequency checking, failed attempt monitoring or similar functions.
|
||||
|
||||
Please refer to the ``axes.handlers.database.AxesDatabaseHandler`` for the default implementation
|
||||
and inspiration on some common checks and access restrictions before writing your own implementation.
|
||||
"""
|
||||
|
||||
if self.is_admin_site(request):
|
||||
return True
|
||||
|
||||
if self.is_blacklisted(request, credentials):
|
||||
return False
|
||||
|
||||
if self.is_whitelisted(request, credentials):
|
||||
return True
|
||||
|
||||
if self.is_locked(request, credentials):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def is_blacklisted(self, request, credentials: dict = None) -> bool:
|
||||
"""
|
||||
Checks if the request or given credentials are blacklisted from access.
|
||||
"""
|
||||
|
||||
if is_client_ip_address_blacklisted(request):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def is_whitelisted(self, request, credentials: dict = None) -> bool:
|
||||
"""
|
||||
Checks if the request or given credentials are whitelisted for access.
|
||||
"""
|
||||
|
||||
if is_user_attempt_whitelisted(request, credentials):
|
||||
return True
|
||||
|
||||
if is_client_ip_address_whitelisted(request):
|
||||
return True
|
||||
|
||||
if is_client_method_whitelisted(request):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def is_locked(self, request, credentials: dict = None) -> bool:
|
||||
"""
|
||||
Checks if the request or given credentials are locked.
|
||||
"""
|
||||
|
||||
if settings.AXES_LOCK_OUT_AT_FAILURE:
|
||||
# get_failures will have to be implemented by each specialized handler
|
||||
return self.get_failures( # type: ignore
|
||||
request, credentials
|
||||
) >= get_failure_limit(request, credentials)
|
||||
|
||||
return False
|
||||
|
||||
def is_admin_site(self, request) -> bool:
|
||||
"""
|
||||
Checks if the request is for admin site.
|
||||
"""
|
||||
if settings.AXES_ONLY_ADMIN_SITE and hasattr(request, "path"):
|
||||
try:
|
||||
admin_url = reverse("admin:index")
|
||||
except NoReverseMatch:
|
||||
return True
|
||||
return not re.match("^%s" % admin_url, request.path)
|
||||
|
||||
return False
|
||||
|
||||
def reset_attempts(
|
||||
self,
|
||||
*,
|
||||
ip_address: str = None,
|
||||
username: str = None,
|
||||
ip_or_username: bool = False,
|
||||
) -> int:
|
||||
"""
|
||||
Resets access attempts that match the given IP address or username.
|
||||
|
||||
This method makes more sense for the DB backend, but as it is used by the ProxyHandler
|
||||
(via inherent), it needs to be defined here so we get compliant with all proxy methods.
|
||||
|
||||
Please overwrite it on each specialized handler as needed.
|
||||
"""
|
||||
return 0
|
||||
|
||||
def reset_logs(self, *, age_days: int = None) -> int:
|
||||
"""
|
||||
Resets access logs that are older than given number of days.
|
||||
|
||||
This method makes more sense for the DB backend, but as it is used by the ProxyHandler
|
||||
(via inherent), it needs to be defined here so we get compliant with all proxy methods.
|
||||
|
||||
Please overwrite it on each specialized handler as needed.
|
||||
"""
|
||||
return 0
|
||||
|
||||
|
||||
class AxesHandler(AbstractAxesHandler, AxesBaseHandler):
|
||||
"""
|
||||
Signal bare handler implementation without any storage backend.
|
||||
"""
|
||||
|
||||
def user_login_failed(self, sender, credentials: dict, request=None, **kwargs):
|
||||
pass
|
||||
|
||||
def user_logged_in(self, sender, request, user, **kwargs):
|
||||
pass
|
||||
|
||||
def user_logged_out(self, sender, request, user, **kwargs):
|
||||
pass
|
||||
|
||||
def get_failures(self, request, credentials: dict = None) -> int:
|
||||
return 0
|
@ -0,0 +1,174 @@
|
||||
from logging import getLogger
|
||||
|
||||
from axes.conf import settings
|
||||
from axes.handlers.base import AxesBaseHandler, AbstractAxesHandler
|
||||
from axes.helpers import (
|
||||
get_cache,
|
||||
get_cache_timeout,
|
||||
get_client_cache_key,
|
||||
get_client_str,
|
||||
get_client_username,
|
||||
get_credentials,
|
||||
get_failure_limit,
|
||||
)
|
||||
from axes.models import AccessAttempt
|
||||
from axes.signals import user_locked_out
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
class AxesCacheHandler(AbstractAxesHandler, AxesBaseHandler):
|
||||
"""
|
||||
Signal handler implementation that records user login attempts to cache and locks users out if necessary.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.cache = get_cache()
|
||||
self.cache_timeout = get_cache_timeout()
|
||||
|
||||
def reset_attempts(
|
||||
self,
|
||||
*,
|
||||
ip_address: str = None,
|
||||
username: str = None,
|
||||
ip_or_username: bool = False,
|
||||
) -> int:
|
||||
cache_keys: list = []
|
||||
count = 0
|
||||
|
||||
if ip_address is None and username is None:
|
||||
raise NotImplementedError("Cannot clear all entries from cache")
|
||||
if ip_or_username:
|
||||
raise NotImplementedError(
|
||||
"Due to the cache key ip_or_username=True is not supported"
|
||||
)
|
||||
|
||||
cache_keys.extend(
|
||||
get_client_cache_key(
|
||||
AccessAttempt(username=username, ip_address=ip_address)
|
||||
)
|
||||
)
|
||||
|
||||
for cache_key in cache_keys:
|
||||
deleted = self.cache.delete(cache_key)
|
||||
count += int(deleted) if deleted is not None else 1
|
||||
|
||||
log.info("AXES: Reset %d access attempts from database.", count)
|
||||
|
||||
return count
|
||||
|
||||
def get_failures(self, request, credentials: dict = None) -> int:
|
||||
cache_keys = get_client_cache_key(request, credentials)
|
||||
failure_count = max(
|
||||
self.cache.get(cache_key, default=0) for cache_key in cache_keys
|
||||
)
|
||||
return failure_count
|
||||
|
||||
def user_login_failed(
|
||||
self, sender, credentials: dict, request=None, **kwargs
|
||||
): # pylint: disable=too-many-locals
|
||||
"""
|
||||
When user login fails, save attempt record in cache and lock user out if necessary.
|
||||
|
||||
:raises AxesSignalPermissionDenied: if user should be locked out.
|
||||
"""
|
||||
|
||||
if request is None:
|
||||
log.error(
|
||||
"AXES: AxesCacheHandler.user_login_failed does not function without a request."
|
||||
)
|
||||
return
|
||||
|
||||
username = get_client_username(request, credentials)
|
||||
if settings.AXES_ONLY_USER_FAILURES and username is None:
|
||||
log.warning(
|
||||
"AXES: Username is None and AXES_ONLY_USER_FAILURES is enabled, new record will NOT be created."
|
||||
)
|
||||
return
|
||||
|
||||
client_str = get_client_str(
|
||||
username,
|
||||
request.axes_ip_address,
|
||||
request.axes_user_agent,
|
||||
request.axes_path_info,
|
||||
)
|
||||
|
||||
if self.is_whitelisted(request, credentials):
|
||||
log.info("AXES: Login failed from whitelisted client %s.", client_str)
|
||||
return
|
||||
|
||||
failures_since_start = 1 + self.get_failures(request, credentials)
|
||||
|
||||
if failures_since_start > 1:
|
||||
log.warning(
|
||||
"AXES: Repeated login failure by %s. Count = %d of %d. Updating existing record in the cache.",
|
||||
client_str,
|
||||
failures_since_start,
|
||||
get_failure_limit(request, credentials),
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"AXES: New login failure by %s. Creating new record in the cache.",
|
||||
client_str,
|
||||
)
|
||||
|
||||
cache_keys = get_client_cache_key(request, credentials)
|
||||
for cache_key in cache_keys:
|
||||
failures = self.cache.get(cache_key, default=0)
|
||||
self.cache.set(cache_key, failures + 1, self.cache_timeout)
|
||||
|
||||
if (
|
||||
settings.AXES_LOCK_OUT_AT_FAILURE
|
||||
and failures_since_start >= get_failure_limit(request, credentials)
|
||||
):
|
||||
log.warning(
|
||||
"AXES: Locking out %s after repeated login failures.", client_str
|
||||
)
|
||||
|
||||
request.axes_locked_out = True
|
||||
user_locked_out.send(
|
||||
"axes",
|
||||
request=request,
|
||||
username=username,
|
||||
ip_address=request.axes_ip_address,
|
||||
)
|
||||
|
||||
def user_logged_in(
|
||||
self, sender, request, user, **kwargs
|
||||
): # pylint: disable=unused-argument
|
||||
"""
|
||||
When user logs in, update the AccessLog related to the user.
|
||||
"""
|
||||
|
||||
username = user.get_username()
|
||||
credentials = get_credentials(username)
|
||||
client_str = get_client_str(
|
||||
username,
|
||||
request.axes_ip_address,
|
||||
request.axes_user_agent,
|
||||
request.axes_path_info,
|
||||
)
|
||||
|
||||
log.info("AXES: Successful login by %s.", client_str)
|
||||
|
||||
if settings.AXES_RESET_ON_SUCCESS:
|
||||
cache_keys = get_client_cache_key(request, credentials)
|
||||
for cache_key in cache_keys:
|
||||
failures_since_start = self.cache.get(cache_key, default=0)
|
||||
self.cache.delete(cache_key)
|
||||
log.info(
|
||||
"AXES: Deleted %d failed login attempts by %s from cache.",
|
||||
failures_since_start,
|
||||
client_str,
|
||||
)
|
||||
|
||||
def user_logged_out(self, sender, request, user, **kwargs):
|
||||
username = user.get_username() if user else None
|
||||
client_str = get_client_str(
|
||||
username,
|
||||
request.axes_ip_address,
|
||||
request.axes_user_agent,
|
||||
request.axes_path_info,
|
||||
)
|
||||
|
||||
log.info("AXES: Successful logout by %s.", client_str)
|
@ -0,0 +1,270 @@
|
||||
from logging import getLogger
|
||||
|
||||
from django.db.models import Sum, Value, Q
|
||||
from django.db.models.functions import Concat
|
||||
from django.utils import timezone
|
||||
|
||||
from axes.attempts import (
|
||||
clean_expired_user_attempts,
|
||||
get_user_attempts,
|
||||
reset_user_attempts,
|
||||
)
|
||||
from axes.conf import settings
|
||||
from axes.handlers.base import AxesBaseHandler, AbstractAxesHandler
|
||||
from axes.helpers import (
|
||||
get_client_str,
|
||||
get_client_username,
|
||||
get_credentials,
|
||||
get_failure_limit,
|
||||
get_query_str,
|
||||
)
|
||||
from axes.models import AccessLog, AccessAttempt
|
||||
from axes.signals import user_locked_out
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
class AxesDatabaseHandler(AbstractAxesHandler, AxesBaseHandler):
|
||||
"""
|
||||
Signal handler implementation that records user login attempts to database and locks users out if necessary.
|
||||
|
||||
.. note:: The get_user_attempts function is called several time during the authentication and lockout
|
||||
process, caching its output can be dangerous.
|
||||
"""
|
||||
|
||||
def reset_attempts(
|
||||
self,
|
||||
*,
|
||||
ip_address: str = None,
|
||||
username: str = None,
|
||||
ip_or_username: bool = False,
|
||||
) -> int:
|
||||
attempts = AccessAttempt.objects.all()
|
||||
|
||||
if ip_or_username:
|
||||
attempts = attempts.filter(Q(ip_address=ip_address) | Q(username=username))
|
||||
else:
|
||||
if ip_address:
|
||||
attempts = attempts.filter(ip_address=ip_address)
|
||||
if username:
|
||||
attempts = attempts.filter(username=username)
|
||||
|
||||
count, _ = attempts.delete()
|
||||
log.info("AXES: Reset %d access attempts from database.", count)
|
||||
|
||||
return count
|
||||
|
||||
def reset_logs(self, *, age_days: int = None) -> int:
|
||||
if age_days is None:
|
||||
count, _ = AccessLog.objects.all().delete()
|
||||
log.info("AXES: Reset all %d access logs from database.", count)
|
||||
else:
|
||||
limit = timezone.now() - timezone.timedelta(days=age_days)
|
||||
count, _ = AccessLog.objects.filter(attempt_time__lte=limit).delete()
|
||||
log.info(
|
||||
"AXES: Reset %d access logs older than %d days from database.",
|
||||
count,
|
||||
age_days,
|
||||
)
|
||||
|
||||
return count
|
||||
|
||||
def get_failures(self, request, credentials: dict = None) -> int:
|
||||
attempts_list = get_user_attempts(request, credentials)
|
||||
attempt_count = max(
|
||||
(
|
||||
attempts.aggregate(Sum("failures_since_start"))[
|
||||
"failures_since_start__sum"
|
||||
]
|
||||
or 0
|
||||
)
|
||||
for attempts in attempts_list
|
||||
)
|
||||
return attempt_count
|
||||
|
||||
def user_login_failed(
|
||||
self, sender, credentials: dict, request=None, **kwargs
|
||||
): # pylint: disable=too-many-locals
|
||||
"""
|
||||
When user login fails, save AccessAttempt record in database, mark request with lockout attribute and emit lockout signal.
|
||||
"""
|
||||
|
||||
log.info("AXES: User login failed, running database handler for failure.")
|
||||
|
||||
if request is None:
|
||||
log.error(
|
||||
"AXES: AxesDatabaseHandler.user_login_failed does not function without a request."
|
||||
)
|
||||
return
|
||||
|
||||
# 1. database query: Clean up expired user attempts from the database before logging new attempts
|
||||
clean_expired_user_attempts(request.axes_attempt_time)
|
||||
|
||||
username = get_client_username(request, credentials)
|
||||
client_str = get_client_str(
|
||||
username,
|
||||
request.axes_ip_address,
|
||||
request.axes_user_agent,
|
||||
request.axes_path_info,
|
||||
)
|
||||
|
||||
# This replaces null byte chars that crash saving failures.
|
||||
get_data = get_query_str(request.GET).replace("\0", "0x00")
|
||||
post_data = get_query_str(request.POST).replace("\0", "0x00")
|
||||
|
||||
if self.is_whitelisted(request, credentials):
|
||||
log.info("AXES: Login failed from whitelisted client %s.", client_str)
|
||||
return
|
||||
|
||||
# 2. database query: Get or create access record with the new failure data
|
||||
if settings.AXES_ONLY_USER_FAILURES and username is None:
|
||||
log.warning(
|
||||
"AXES: Username is None and AXES_ONLY_USER_FAILURES is enabled, new record will NOT be created."
|
||||
)
|
||||
else:
|
||||
attempt, created = AccessAttempt.objects.get_or_create(
|
||||
username=username,
|
||||
ip_address=request.axes_ip_address,
|
||||
user_agent=request.axes_user_agent,
|
||||
defaults={
|
||||
"get_data": get_data,
|
||||
"post_data": post_data,
|
||||
"http_accept": request.axes_http_accept,
|
||||
"path_info": request.axes_path_info,
|
||||
"failures_since_start": 1,
|
||||
"attempt_time": request.axes_attempt_time,
|
||||
},
|
||||
)
|
||||
|
||||
# Record failed attempt with all the relevant information.
|
||||
# Filtering based on username, IP address and user agent handled elsewhere,
|
||||
# and this handler just records the available information for further use.
|
||||
if created:
|
||||
log.warning(
|
||||
"AXES: New login failure by %s. Created new record in the database.",
|
||||
client_str,
|
||||
)
|
||||
|
||||
# 3. database query if there were previous attempts in the database
|
||||
# Update failed attempt information but do not touch the username, IP address, or user agent fields,
|
||||
# because attackers can request the site with multiple different configurations
|
||||
# in order to bypass the defense mechanisms that are used by the site.
|
||||
else:
|
||||
separator = "\n---------\n"
|
||||
|
||||
attempt.get_data = Concat("get_data", Value(separator + get_data))
|
||||
attempt.post_data = Concat("post_data", Value(separator + post_data))
|
||||
attempt.http_accept = request.axes_http_accept
|
||||
attempt.path_info = request.axes_path_info
|
||||
attempt.failures_since_start += 1
|
||||
attempt.attempt_time = request.axes_attempt_time
|
||||
attempt.save()
|
||||
|
||||
log.warning(
|
||||
"AXES: Repeated login failure by %s. Count = %d of %d. Updated existing record in the database.",
|
||||
client_str,
|
||||
attempt.failures_since_start,
|
||||
get_failure_limit(request, credentials),
|
||||
)
|
||||
|
||||
# 3. or 4. database query: Calculate the current maximum failure number from the existing attempts
|
||||
failures_since_start = self.get_failures(request, credentials)
|
||||
|
||||
if (
|
||||
settings.AXES_LOCK_OUT_AT_FAILURE
|
||||
and failures_since_start >= get_failure_limit(request, credentials)
|
||||
):
|
||||
log.warning(
|
||||
"AXES: Locking out %s after repeated login failures.", client_str
|
||||
)
|
||||
|
||||
request.axes_locked_out = True
|
||||
user_locked_out.send(
|
||||
"axes",
|
||||
request=request,
|
||||
username=username,
|
||||
ip_address=request.axes_ip_address,
|
||||
)
|
||||
|
||||
def user_logged_in(
|
||||
self, sender, request, user, **kwargs
|
||||
): # pylint: disable=unused-argument
|
||||
"""
|
||||
When user logs in, update the AccessLog related to the user.
|
||||
"""
|
||||
|
||||
# 1. database query: Clean up expired user attempts from the database
|
||||
clean_expired_user_attempts(request.axes_attempt_time)
|
||||
|
||||
username = user.get_username()
|
||||
credentials = get_credentials(username)
|
||||
client_str = get_client_str(
|
||||
username,
|
||||
request.axes_ip_address,
|
||||
request.axes_user_agent,
|
||||
request.axes_path_info,
|
||||
)
|
||||
|
||||
log.info("AXES: Successful login by %s.", client_str)
|
||||
|
||||
if not settings.AXES_DISABLE_ACCESS_LOG:
|
||||
# 2. database query: Insert new access logs with login time
|
||||
AccessLog.objects.create(
|
||||
username=username,
|
||||
ip_address=request.axes_ip_address,
|
||||
user_agent=request.axes_user_agent,
|
||||
http_accept=request.axes_http_accept,
|
||||
path_info=request.axes_path_info,
|
||||
attempt_time=request.axes_attempt_time,
|
||||
)
|
||||
|
||||
if settings.AXES_RESET_ON_SUCCESS:
|
||||
# 3. database query: Reset failed attempts for the logging in user
|
||||
count = reset_user_attempts(request, credentials)
|
||||
log.info(
|
||||
"AXES: Deleted %d failed login attempts by %s from database.",
|
||||
count,
|
||||
client_str,
|
||||
)
|
||||
|
||||
def user_logged_out(
|
||||
self, sender, request, user, **kwargs
|
||||
): # pylint: disable=unused-argument
|
||||
"""
|
||||
When user logs out, update the AccessLog related to the user.
|
||||
"""
|
||||
|
||||
# 1. database query: Clean up expired user attempts from the database
|
||||
clean_expired_user_attempts(request.axes_attempt_time)
|
||||
|
||||
username = user.get_username() if user else None
|
||||
client_str = get_client_str(
|
||||
username,
|
||||
request.axes_ip_address,
|
||||
request.axes_user_agent,
|
||||
request.axes_path_info,
|
||||
)
|
||||
|
||||
log.info("AXES: Successful logout by %s.", client_str)
|
||||
|
||||
if username and not settings.AXES_DISABLE_ACCESS_LOG:
|
||||
# 2. database query: Update existing attempt logs with logout time
|
||||
AccessLog.objects.filter(
|
||||
username=username, logout_time__isnull=True
|
||||
).update(logout_time=request.axes_attempt_time)
|
||||
|
||||
def post_save_access_attempt(self, instance, **kwargs):
|
||||
"""
|
||||
Handles the ``axes.models.AccessAttempt`` object post save signal.
|
||||
|
||||
When needed, all post_save actions for this backend should be located
|
||||
here.
|
||||
"""
|
||||
|
||||
def post_delete_access_attempt(self, instance, **kwargs):
|
||||
"""
|
||||
Handles the ``axes.models.AccessAttempt`` object post delete signal.
|
||||
|
||||
When needed, all post_delete actions for this backend should be located
|
||||
here.
|
||||
"""
|
@ -0,0 +1,22 @@
|
||||
from axes.handlers.base import AxesBaseHandler, AbstractAxesHandler
|
||||
|
||||
|
||||
class AxesDummyHandler(AbstractAxesHandler, AxesBaseHandler):
|
||||
"""
|
||||
Signal handler implementation that does nothing and can be used to disable signal processing.
|
||||
"""
|
||||
|
||||
def is_allowed(self, request, credentials: dict = None) -> bool:
|
||||
return True
|
||||
|
||||
def user_login_failed(self, sender, credentials: dict, request=None, **kwargs):
|
||||
pass
|
||||
|
||||
def user_logged_in(self, sender, request, user, **kwargs):
|
||||
pass
|
||||
|
||||
def user_logged_out(self, sender, request, user, **kwargs):
|
||||
pass
|
||||
|
||||
def get_failures(self, request, credentials: dict = None) -> int:
|
||||
return 0
|
@ -0,0 +1,124 @@
|
||||
from logging import getLogger
|
||||
|
||||
from django.utils.module_loading import import_string
|
||||
from django.utils.timezone import now
|
||||
|
||||
from axes.conf import settings
|
||||
from axes.handlers.base import AxesBaseHandler, AbstractAxesHandler, AxesHandler
|
||||
from axes.helpers import (
|
||||
get_client_ip_address,
|
||||
get_client_user_agent,
|
||||
get_client_path_info,
|
||||
get_client_http_accept,
|
||||
toggleable,
|
||||
)
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
class AxesProxyHandler(AbstractAxesHandler, AxesBaseHandler):
|
||||
"""
|
||||
Proxy interface for configurable Axes signal handler class.
|
||||
|
||||
If you wish to implement a custom version of this handler,
|
||||
you can override the settings.AXES_HANDLER configuration string
|
||||
with a class that implements a compatible interface and methods.
|
||||
|
||||
Defaults to using axes.handlers.proxy.AxesProxyHandler if not overridden.
|
||||
Refer to axes.handlers.proxy.AxesProxyHandler for default implementation.
|
||||
"""
|
||||
|
||||
implementation = None # type: AxesHandler
|
||||
|
||||
@classmethod
|
||||
def get_implementation(cls, force: bool = False) -> AxesHandler:
|
||||
"""
|
||||
Fetch and initialize configured handler implementation and memoize it to avoid reinitialization.
|
||||
|
||||
This method is re-entrant and can be called multiple times from e.g. Django application loader.
|
||||
"""
|
||||
|
||||
if force or not cls.implementation:
|
||||
cls.implementation = import_string(settings.AXES_HANDLER)()
|
||||
return cls.implementation
|
||||
|
||||
@classmethod
|
||||
def reset_attempts(
|
||||
cls,
|
||||
*,
|
||||
ip_address: str = None,
|
||||
username: str = None,
|
||||
ip_or_username: bool = False,
|
||||
) -> int:
|
||||
return cls.get_implementation().reset_attempts(
|
||||
ip_address=ip_address, username=username, ip_or_username=ip_or_username
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def reset_logs(cls, *, age_days: int = None) -> int:
|
||||
return cls.get_implementation().reset_logs(age_days=age_days)
|
||||
|
||||
@staticmethod
|
||||
def update_request(request):
|
||||
"""
|
||||
Update request attributes before passing them into the selected handler class.
|
||||
"""
|
||||
|
||||
if request is None:
|
||||
log.error(
|
||||
"AXES: AxesProxyHandler.update_request can not set request attributes to a None request"
|
||||
)
|
||||
return
|
||||
if not hasattr(request, "axes_updated"):
|
||||
request.axes_locked_out = False
|
||||
request.axes_attempt_time = now()
|
||||
request.axes_ip_address = get_client_ip_address(request)
|
||||
request.axes_user_agent = get_client_user_agent(request)
|
||||
request.axes_path_info = get_client_path_info(request)
|
||||
request.axes_http_accept = get_client_http_accept(request)
|
||||
request.axes_updated = True
|
||||
|
||||
@classmethod
|
||||
def is_locked(cls, request, credentials: dict = None) -> bool:
|
||||
cls.update_request(request)
|
||||
return cls.get_implementation().is_locked(request, credentials)
|
||||
|
||||
@classmethod
|
||||
def is_allowed(cls, request, credentials: dict = None) -> bool:
|
||||
cls.update_request(request)
|
||||
return cls.get_implementation().is_allowed(request, credentials)
|
||||
|
||||
@classmethod
|
||||
def get_failures(cls, request, credentials: dict = None) -> int:
|
||||
cls.update_request(request)
|
||||
return cls.get_implementation().get_failures(request, credentials)
|
||||
|
||||
@classmethod
|
||||
@toggleable
|
||||
def user_login_failed(cls, sender, credentials: dict, request=None, **kwargs):
|
||||
cls.update_request(request)
|
||||
return cls.get_implementation().user_login_failed(
|
||||
sender, credentials, request, **kwargs
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@toggleable
|
||||
def user_logged_in(cls, sender, request, user, **kwargs):
|
||||
cls.update_request(request)
|
||||
return cls.get_implementation().user_logged_in(sender, request, user, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@toggleable
|
||||
def user_logged_out(cls, sender, request, user, **kwargs):
|
||||
cls.update_request(request)
|
||||
return cls.get_implementation().user_logged_out(sender, request, user, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@toggleable
|
||||
def post_save_access_attempt(cls, instance, **kwargs):
|
||||
return cls.get_implementation().post_save_access_attempt(instance, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@toggleable
|
||||
def post_delete_access_attempt(cls, instance, **kwargs):
|
||||
return cls.get_implementation().post_delete_access_attempt(instance, **kwargs)
|
@ -0,0 +1,25 @@
|
||||
from axes.handlers.base import AxesHandler
|
||||
|
||||
|
||||
class AxesTestHandler(AxesHandler): # pylint: disable=unused-argument
|
||||
"""
|
||||
Signal handler implementation that does nothing, ideal for a test suite.
|
||||
"""
|
||||
|
||||
def reset_attempts(
|
||||
self,
|
||||
*,
|
||||
ip_address: str = None,
|
||||
username: str = None,
|
||||
ip_or_username: bool = False,
|
||||
) -> int:
|
||||
return 0
|
||||
|
||||
def reset_logs(self, *, age_days: int = None) -> int:
|
||||
return 0
|
||||
|
||||
def is_allowed(self, request, credentials: dict = None) -> bool:
|
||||
return True
|
||||
|
||||
def get_failures(self, request, credentials: dict = None) -> int:
|
||||
return 0
|
@ -0,0 +1,517 @@
|
||||
from datetime import timedelta
|
||||
from hashlib import md5
|
||||
from logging import getLogger
|
||||
from string import Template
|
||||
from typing import Callable, Optional, Type, Union
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import ipware.ip
|
||||
from django.core.cache import caches, BaseCache
|
||||
from django.http import HttpRequest, HttpResponse, JsonResponse, QueryDict
|
||||
from django.shortcuts import render, redirect
|
||||
from django.utils.module_loading import import_string
|
||||
|
||||
from axes.conf import settings
|
||||
from axes.models import AccessBase
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
def get_cache() -> BaseCache:
|
||||
"""
|
||||
Get the cache instance Axes is configured to use with ``settings.AXES_CACHE`` and use ``'default'`` if not set.
|
||||
"""
|
||||
|
||||
return caches[getattr(settings, "AXES_CACHE", "default")]
|
||||
|
||||
|
||||
def get_cache_timeout() -> Optional[int]:
|
||||
"""
|
||||
Return the cache timeout interpreted from settings.AXES_COOLOFF_TIME.
|
||||
|
||||
The cache timeout can be either None if not configured or integer of seconds if configured.
|
||||
|
||||
Notice that the settings.AXES_COOLOFF_TIME can be None, timedelta, integer, callable, or str path,
|
||||
and this function offers a unified _integer or None_ representation of that configuration
|
||||
for use with the Django cache backends.
|
||||
"""
|
||||
|
||||
cool_off = get_cool_off()
|
||||
if cool_off is None:
|
||||
return None
|
||||
return int(cool_off.total_seconds())
|
||||
|
||||
|
||||
def get_cool_off() -> Optional[timedelta]:
|
||||
"""
|
||||
Return the login cool off time interpreted from settings.AXES_COOLOFF_TIME.
|
||||
|
||||
The return value is either None or timedelta.
|
||||
|
||||
Notice that the settings.AXES_COOLOFF_TIME is either None, timedelta, or integer of hours,
|
||||
and this function offers a unified _timedelta or None_ representation of that configuration
|
||||
for use with the Axes internal implementations.
|
||||
|
||||
:exception TypeError: if settings.AXES_COOLOFF_TIME is of wrong type.
|
||||
"""
|
||||
|
||||
cool_off = settings.AXES_COOLOFF_TIME
|
||||
|
||||
if isinstance(cool_off, int):
|
||||
return timedelta(hours=cool_off)
|
||||
if isinstance(cool_off, str):
|
||||
return import_string(cool_off)()
|
||||
if callable(cool_off):
|
||||
return cool_off()
|
||||
|
||||
return cool_off
|
||||
|
||||
|
||||
def get_cool_off_iso8601(delta: timedelta) -> str:
|
||||
"""
|
||||
Return datetime.timedelta translated to ISO 8601 formatted duration for use in e.g. cool offs.
|
||||
"""
|
||||
|
||||
seconds = delta.total_seconds()
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
|
||||
days_str = f"{days:.0f}D" if days else ""
|
||||
|
||||
time_str = "".join(
|
||||
f"{value:.0f}{designator}"
|
||||
for value, designator in [[hours, "H"], [minutes, "M"], [seconds, "S"]]
|
||||
if value
|
||||
)
|
||||
|
||||
if time_str:
|
||||
return f"P{days_str}T{time_str}"
|
||||
return f"P{days_str}"
|
||||
|
||||
|
||||
def get_credentials(username: str = None, **kwargs) -> dict:
|
||||
"""
|
||||
Calculate credentials for Axes to use internally from given username and kwargs.
|
||||
|
||||
Axes will set the username value into the key defined with ``settings.AXES_USERNAME_FORM_FIELD``
|
||||
and update the credentials dictionary with the kwargs given on top of that.
|
||||
"""
|
||||
|
||||
credentials = {settings.AXES_USERNAME_FORM_FIELD: username}
|
||||
credentials.update(kwargs)
|
||||
return credentials
|
||||
|
||||
|
||||
def get_client_username(request, credentials: dict = None) -> str:
|
||||
"""
|
||||
Resolve client username from the given request or credentials if supplied.
|
||||
|
||||
The order of preference for fetching the username is as follows:
|
||||
|
||||
1. If configured, use ``AXES_USERNAME_CALLABLE``, and supply ``request, credentials`` as arguments
|
||||
2. If given, use ``credentials`` and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``)
|
||||
3. Use request.POST and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``)
|
||||
|
||||
:param request: incoming Django ``HttpRequest`` or similar object from authentication backend or other source
|
||||
:param credentials: incoming credentials ``dict`` or similar object from authentication backend or other source
|
||||
"""
|
||||
|
||||
if settings.AXES_USERNAME_CALLABLE:
|
||||
log.debug("Using settings.AXES_USERNAME_CALLABLE to get username")
|
||||
|
||||
if callable(settings.AXES_USERNAME_CALLABLE):
|
||||
return settings.AXES_USERNAME_CALLABLE(request, credentials)
|
||||
if isinstance(settings.AXES_USERNAME_CALLABLE, str):
|
||||
return import_string(settings.AXES_USERNAME_CALLABLE)(request, credentials)
|
||||
raise TypeError(
|
||||
"settings.AXES_USERNAME_CALLABLE needs to be a string, callable, or None."
|
||||
)
|
||||
|
||||
if credentials:
|
||||
log.debug(
|
||||
"Using parameter credentials to get username with key settings.AXES_USERNAME_FORM_FIELD"
|
||||
)
|
||||
return credentials.get(settings.AXES_USERNAME_FORM_FIELD, None)
|
||||
|
||||
log.debug(
|
||||
"Using parameter request.POST to get username with key settings.AXES_USERNAME_FORM_FIELD"
|
||||
)
|
||||
|
||||
request_data = getattr(request, "data", request.POST)
|
||||
return request_data.get(settings.AXES_USERNAME_FORM_FIELD, None)
|
||||
|
||||
|
||||
def get_client_ip_address(request) -> str:
|
||||
"""
|
||||
Get client IP address as configured by the user.
|
||||
|
||||
The django-ipware package is used for address resolution
|
||||
and parameters can be configured in the Axes package.
|
||||
"""
|
||||
|
||||
client_ip_address, _ = ipware.ip.get_client_ip(
|
||||
request,
|
||||
proxy_order=settings.AXES_PROXY_ORDER,
|
||||
proxy_count=settings.AXES_PROXY_COUNT,
|
||||
proxy_trusted_ips=settings.AXES_PROXY_TRUSTED_IPS,
|
||||
request_header_order=settings.AXES_META_PRECEDENCE_ORDER,
|
||||
)
|
||||
|
||||
return client_ip_address
|
||||
|
||||
|
||||
def get_client_user_agent(request) -> str:
|
||||
return request.META.get("HTTP_USER_AGENT", "<unknown>")[:255]
|
||||
|
||||
|
||||
def get_client_path_info(request) -> str:
|
||||
return request.META.get("PATH_INFO", "<unknown>")[:255]
|
||||
|
||||
|
||||
def get_client_http_accept(request) -> str:
|
||||
return request.META.get("HTTP_ACCEPT", "<unknown>")[:1025]
|
||||
|
||||
|
||||
def get_client_parameters(username: str, ip_address: str, user_agent: str) -> list:
|
||||
"""
|
||||
Get query parameters for filtering AccessAttempt queryset.
|
||||
|
||||
This method returns a dict that guarantees iteration order for keys and values,
|
||||
and can so be used in e.g. the generation of hash keys or other deterministic functions.
|
||||
|
||||
Returns list of dict, every item of list are separate parameters
|
||||
"""
|
||||
|
||||
if settings.AXES_ONLY_USER_FAILURES:
|
||||
# 1. Only individual usernames can be tracked with parametrization
|
||||
filter_query = [{"username": username}]
|
||||
else:
|
||||
if settings.AXES_LOCK_OUT_BY_USER_OR_IP:
|
||||
# One of `username` or `IP address` is used
|
||||
filter_query = [{"username": username}, {"ip_address": ip_address}]
|
||||
elif settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP:
|
||||
# 2. A combination of username and IP address can be used as well
|
||||
filter_query = [{"username": username, "ip_address": ip_address}]
|
||||
else:
|
||||
# 3. Default case is to track the IP address only, which is the most secure option
|
||||
filter_query = [{"ip_address": ip_address}]
|
||||
|
||||
if settings.AXES_USE_USER_AGENT:
|
||||
# 4. The HTTP User-Agent can be used to track e.g. one browser
|
||||
filter_query.append({"user_agent": user_agent})
|
||||
|
||||
return filter_query
|
||||
|
||||
|
||||
def make_cache_key_list(filter_kwargs_list):
|
||||
cache_keys = []
|
||||
for filter_kwargs in filter_kwargs_list:
|
||||
cache_key_components = "".join(
|
||||
value for value in filter_kwargs.values() if value
|
||||
)
|
||||
cache_key_digest = md5(cache_key_components.encode()).hexdigest()
|
||||
cache_keys.append(f"axes-{cache_key_digest}")
|
||||
return cache_keys
|
||||
|
||||
|
||||
def get_client_cache_key(
|
||||
request_or_attempt: Union[HttpRequest, AccessBase], credentials: dict = None
|
||||
) -> str:
|
||||
"""
|
||||
Build cache key name from request or AccessAttempt object.
|
||||
|
||||
:param request_or_attempt: HttpRequest or AccessAttempt object
|
||||
:param credentials: credentials containing user information
|
||||
:return cache_key: Hash key that is usable for Django cache backends
|
||||
"""
|
||||
|
||||
if isinstance(request_or_attempt, AccessBase):
|
||||
username = request_or_attempt.username
|
||||
ip_address = request_or_attempt.ip_address
|
||||
user_agent = request_or_attempt.user_agent
|
||||
else:
|
||||
username = get_client_username(request_or_attempt, credentials)
|
||||
ip_address = get_client_ip_address(request_or_attempt)
|
||||
user_agent = get_client_user_agent(request_or_attempt)
|
||||
|
||||
filter_kwargs_list = get_client_parameters(username, ip_address, user_agent)
|
||||
|
||||
return make_cache_key_list(filter_kwargs_list)
|
||||
|
||||
|
||||
def get_client_str(
|
||||
username: str, ip_address: str, user_agent: str, path_info: str
|
||||
) -> str:
|
||||
"""
|
||||
Get a readable string that can be used in e.g. logging to distinguish client requests.
|
||||
|
||||
Example log format would be
|
||||
``{username: "example", ip_address: "127.0.0.1", path_info: "/example/"}``
|
||||
"""
|
||||
|
||||
if settings.AXES_CLIENT_STR_CALLABLE:
|
||||
log.debug("Using settings.AXES_CLIENT_STR_CALLABLE to get client string.")
|
||||
|
||||
if callable(settings.AXES_CLIENT_STR_CALLABLE):
|
||||
return settings.AXES_CLIENT_STR_CALLABLE(
|
||||
username, ip_address, user_agent, path_info
|
||||
)
|
||||
if isinstance(settings.AXES_CLIENT_STR_CALLABLE, str):
|
||||
return import_string(settings.AXES_CLIENT_STR_CALLABLE)(
|
||||
username, ip_address, user_agent, path_info
|
||||
)
|
||||
raise TypeError(
|
||||
"settings.AXES_CLIENT_STR_CALLABLE needs to be a string, callable or None."
|
||||
)
|
||||
|
||||
client_dict = dict()
|
||||
|
||||
if settings.AXES_VERBOSE:
|
||||
# Verbose mode logs every attribute that is available
|
||||
client_dict["username"] = username
|
||||
client_dict["ip_address"] = ip_address
|
||||
client_dict["user_agent"] = user_agent
|
||||
else:
|
||||
# Other modes initialize the attributes that are used for the actual lockouts
|
||||
client_list = get_client_parameters(username, ip_address, user_agent)
|
||||
client_dict = {}
|
||||
for client in client_list:
|
||||
client_dict.update(client)
|
||||
|
||||
# Path info is always included as last component in the client string for traceability purposes
|
||||
if path_info and isinstance(path_info, (tuple, list)):
|
||||
path_info = path_info[0]
|
||||
client_dict["path_info"] = path_info
|
||||
|
||||
# Template the internal dictionary representation into a readable and concatenated {key: "value"} format
|
||||
template = Template('$key: "$value"')
|
||||
items = [{"key": k, "value": v} for k, v in client_dict.items()]
|
||||
client_str = ", ".join(template.substitute(item) for item in items)
|
||||
client_str = "{" + client_str + "}"
|
||||
return client_str
|
||||
|
||||
|
||||
def cleanse_parameters(params: dict) -> dict:
|
||||
"""
|
||||
Replace sensitive parameter values in a parameter dict with
|
||||
a safe placeholder value.
|
||||
|
||||
Parameters name ``'password'`` will always be cleansed. Additionally,
|
||||
parameters named in ``settings.AXES_SENSITIVE_PARAMETERS`` and
|
||||
``settings.AXES_PASSWORD_FORM_FIELD will be cleansed.
|
||||
|
||||
This is used to prevent passwords and similar values from
|
||||
being logged in cleartext.
|
||||
"""
|
||||
sensitive_parameters = ["password"] + settings.AXES_SENSITIVE_PARAMETERS
|
||||
if settings.AXES_PASSWORD_FORM_FIELD:
|
||||
sensitive_parameters.append(settings.AXES_PASSWORD_FORM_FIELD)
|
||||
|
||||
if sensitive_parameters:
|
||||
cleansed = params.copy()
|
||||
for param in sensitive_parameters:
|
||||
if param in cleansed:
|
||||
cleansed[param] = "********************"
|
||||
return cleansed
|
||||
return params
|
||||
|
||||
|
||||
def get_query_str(query: Type[QueryDict], max_length: int = 1024) -> str:
|
||||
"""
|
||||
Turns a query dictionary into an easy-to-read list of key-value pairs.
|
||||
|
||||
If a field is called either ``'password'`` or ``settings.AXES_PASSWORD_FORM_FIELD`` or if the fieldname is included
|
||||
in ``settings.AXES_SENSITIVE_PARAMETERS`` its value will be masked.
|
||||
|
||||
The length of the output is limited to max_length to avoid a DoS attack via excessively large payloads.
|
||||
"""
|
||||
|
||||
query_dict = cleanse_parameters(query.copy())
|
||||
|
||||
template = Template("$key=$value")
|
||||
items = [{"key": k, "value": v} for k, v in query_dict.items()]
|
||||
query_str = "\n".join(template.substitute(item) for item in items)
|
||||
|
||||
return query_str[:max_length]
|
||||
|
||||
|
||||
def get_failure_limit(request, credentials) -> int:
|
||||
if callable(settings.AXES_FAILURE_LIMIT):
|
||||
return settings.AXES_FAILURE_LIMIT(request, credentials)
|
||||
if isinstance(settings.AXES_FAILURE_LIMIT, str):
|
||||
return import_string(settings.AXES_FAILURE_LIMIT)(request, credentials)
|
||||
if isinstance(settings.AXES_FAILURE_LIMIT, int):
|
||||
return settings.AXES_FAILURE_LIMIT
|
||||
raise TypeError("settings.AXES_FAILURE_LIMIT needs to be a callable or an integer")
|
||||
|
||||
|
||||
def get_lockout_message() -> str:
|
||||
if settings.AXES_COOLOFF_TIME:
|
||||
return settings.AXES_COOLOFF_MESSAGE
|
||||
return settings.AXES_PERMALOCK_MESSAGE
|
||||
|
||||
|
||||
def get_lockout_response(request, credentials: dict = None) -> HttpResponse:
|
||||
if settings.AXES_LOCKOUT_CALLABLE:
|
||||
if callable(settings.AXES_LOCKOUT_CALLABLE):
|
||||
return settings.AXES_LOCKOUT_CALLABLE(request, credentials)
|
||||
if isinstance(settings.AXES_LOCKOUT_CALLABLE, str):
|
||||
return import_string(settings.AXES_LOCKOUT_CALLABLE)(request, credentials)
|
||||
raise TypeError(
|
||||
"settings.AXES_LOCKOUT_CALLABLE needs to be a string, callable, or None."
|
||||
)
|
||||
|
||||
status = 403
|
||||
context = {
|
||||
"failure_limit": get_failure_limit(request, credentials),
|
||||
"username": get_client_username(request, credentials) or "",
|
||||
}
|
||||
|
||||
cool_off = get_cool_off()
|
||||
if cool_off:
|
||||
context.update(
|
||||
{
|
||||
"cooloff_time": get_cool_off_iso8601(
|
||||
cool_off
|
||||
), # differing old name is kept for backwards compatibility
|
||||
"cooloff_timedelta": cool_off,
|
||||
}
|
||||
)
|
||||
|
||||
if request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest":
|
||||
json_response = JsonResponse(context, status=status)
|
||||
json_response[
|
||||
"Access-Control-Allow-Origin"
|
||||
] = settings.AXES_ALLOWED_CORS_ORIGINS
|
||||
json_response["Access-Control-Allow-Methods"] = "POST, OPTIONS"
|
||||
json_response[
|
||||
"Access-Control-Allow-Headers"
|
||||
] = "Origin, Content-Type, Accept, Authorization, x-requested-with"
|
||||
return json_response
|
||||
|
||||
if settings.AXES_LOCKOUT_TEMPLATE:
|
||||
return render(request, settings.AXES_LOCKOUT_TEMPLATE, context, status=status)
|
||||
|
||||
if settings.AXES_LOCKOUT_URL:
|
||||
lockout_url = settings.AXES_LOCKOUT_URL
|
||||
query_string = urlencode({"username": context["username"]})
|
||||
url = "{}?{}".format(lockout_url, query_string)
|
||||
return redirect(url)
|
||||
|
||||
return HttpResponse(get_lockout_message(), status=status)
|
||||
|
||||
|
||||
def is_ip_address_in_whitelist(ip_address: str) -> bool:
|
||||
if not settings.AXES_IP_WHITELIST:
|
||||
return False
|
||||
|
||||
return ip_address in settings.AXES_IP_WHITELIST
|
||||
|
||||
|
||||
def is_ip_address_in_blacklist(ip_address: str) -> bool:
|
||||
if not settings.AXES_IP_BLACKLIST:
|
||||
return False
|
||||
|
||||
return ip_address in settings.AXES_IP_BLACKLIST
|
||||
|
||||
|
||||
def is_client_ip_address_whitelisted(request):
|
||||
"""
|
||||
Check if the given request refers to a whitelisted IP.
|
||||
"""
|
||||
|
||||
if settings.AXES_NEVER_LOCKOUT_WHITELIST and is_ip_address_in_whitelist(
|
||||
request.axes_ip_address
|
||||
):
|
||||
return True
|
||||
|
||||
if settings.AXES_ONLY_WHITELIST and is_ip_address_in_whitelist(
|
||||
request.axes_ip_address
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_client_ip_address_blacklisted(request) -> bool:
|
||||
"""
|
||||
Check if the given request refers to a blacklisted IP.
|
||||
"""
|
||||
|
||||
if is_ip_address_in_blacklist(request.axes_ip_address):
|
||||
return True
|
||||
|
||||
if settings.AXES_ONLY_WHITELIST and not is_ip_address_in_whitelist(
|
||||
request.axes_ip_address
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_client_method_whitelisted(request) -> bool:
|
||||
"""
|
||||
Check if the given request uses a whitelisted method.
|
||||
"""
|
||||
|
||||
if settings.AXES_NEVER_LOCKOUT_GET and request.method == "GET":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_user_attempt_whitelisted(request, credentials: dict = None) -> bool:
|
||||
"""
|
||||
Check if the given request or credentials refer to a whitelisted username.
|
||||
|
||||
This method invokes the ``settings.AXES_WHITELIST`` callable
|
||||
with ``request`` and ``credentials`` arguments.
|
||||
|
||||
This function could use the following implementation for checking
|
||||
the lockout flags from a specific property in the user object:
|
||||
|
||||
.. code-block: python
|
||||
|
||||
username_value = get_client_username(request, credentials)
|
||||
username_field = getattr(
|
||||
get_user_model(),
|
||||
"USERNAME_FIELD",
|
||||
"username"
|
||||
)
|
||||
kwargs = {username_field: username_value}
|
||||
|
||||
user_model = get_user_model()
|
||||
user = user_model.objects.get(**kwargs)
|
||||
return user.nolockout
|
||||
"""
|
||||
|
||||
whitelist_callable = settings.AXES_WHITELIST_CALLABLE
|
||||
if whitelist_callable is None:
|
||||
return False
|
||||
if callable(whitelist_callable):
|
||||
return whitelist_callable(request, credentials)
|
||||
if isinstance(whitelist_callable, str):
|
||||
return import_string(whitelist_callable)(request, credentials)
|
||||
|
||||
raise TypeError(
|
||||
"settings.AXES_WHITELIST_CALLABLE needs to be a string, callable, or None."
|
||||
)
|
||||
|
||||
|
||||
def toggleable(func) -> Callable:
|
||||
"""
|
||||
Decorator that toggles function execution based on settings.
|
||||
|
||||
If the ``settings.AXES_ENABLED`` flag is set to ``False``
|
||||
the decorated function never runs and a None is returned.
|
||||
|
||||
This decorator is only suitable for functions that do not
|
||||
require return values to be passed back to callers.
|
||||
"""
|
||||
|
||||
def inner(*args, **kwargs): # pylint: disable=inconsistent-return-statements
|
||||
if settings.AXES_ENABLED:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return inner
|
Binary file not shown.
@ -0,0 +1,97 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2018-07-17 15:56+0200\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"Language: \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
|
||||
#: axes/admin.py:38
|
||||
msgid "Form Data"
|
||||
msgstr "Form-Daten"
|
||||
|
||||
#: axes/admin.py:41 axes/admin.py:95
|
||||
msgid "Meta Data"
|
||||
msgstr "Meta-Daten"
|
||||
|
||||
#: axes/conf.py:58
|
||||
msgid "Account locked: too many login attempts. Please try again later."
|
||||
msgstr ""
|
||||
"Zugang gesperrt: zu viele fehlgeschlagene Anmeldeversuche. Bitte versuchen "
|
||||
"Sie es später erneut."
|
||||
|
||||
#: axes/conf.py:61
|
||||
msgid ""
|
||||
"Account locked: too many login attempts. Contact an admin to unlock your "
|
||||
"account."
|
||||
msgstr ""
|
||||
"Zugang gesperrt: zu viele fehlgeschlagene Anmeldeversuche. Kontaktieren Sie "
|
||||
"einen Administrator, um Ihren Zugang zu entsperren."
|
||||
|
||||
#: axes/models.py:9
|
||||
msgid "User Agent"
|
||||
msgstr "Browserkennung"
|
||||
|
||||
#: axes/models.py:15
|
||||
msgid "IP Address"
|
||||
msgstr "IP-Adresse"
|
||||
|
||||
#: axes/models.py:21
|
||||
msgid "Username"
|
||||
msgstr "Benutzername"
|
||||
|
||||
#: axes/models.py:35
|
||||
msgid "HTTP Accept"
|
||||
msgstr ""
|
||||
|
||||
#: axes/models.py:40
|
||||
msgid "Path"
|
||||
msgstr "Pfad"
|
||||
|
||||
#: axes/models.py:45
|
||||
msgid "Attempt Time"
|
||||
msgstr "Zugriffszeitpunkt"
|
||||
|
||||
#: axes/models.py:57
|
||||
msgid "GET Data"
|
||||
msgstr "GET-Daten"
|
||||
|
||||
#: axes/models.py:61
|
||||
msgid "POST Data"
|
||||
msgstr "POST-Daten"
|
||||
|
||||
#: axes/models.py:65
|
||||
msgid "Failed Logins"
|
||||
msgstr "Fehlgeschlagene Anmeldeversuche"
|
||||
|
||||
#: axes/models.py:76
|
||||
msgid "access attempt"
|
||||
msgstr "Zugriffsversuch"
|
||||
|
||||
#: axes/models.py:77
|
||||
msgid "access attempts"
|
||||
msgstr "Zugriffsversuche"
|
||||
|
||||
#: axes/models.py:81
|
||||
msgid "Logout Time"
|
||||
msgstr "Abmeldezeitpunkt"
|
||||
|
||||
#: axes/models.py:90
|
||||
msgid "access log"
|
||||
msgstr "Zugriffslog"
|
||||
|
||||
#: axes/models.py:91
|
||||
msgid "access logs"
|
||||
msgstr "Zugriffslogs"
|
Binary file not shown.
@ -0,0 +1,100 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: \n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2021-06-11 23:36+0200\n"
|
||||
"PO-Revision-Date: 2021-06-16 10:51+0300\n"
|
||||
"Language: pl\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=4; plural=(n==1 ? 0 : (n%10>=2 && n%10<=4) && (n"
|
||||
"%100<12 || n%100>14) ? 1 : n!=1 && (n%10>=0 && n%10<=1) || (n%10>=5 && n"
|
||||
"%10<=9) || (n%100>=12 && n%100<=14) ? 2 : 3);\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: \n"
|
||||
"X-Generator: Poedit 3.0\n"
|
||||
|
||||
#: .\axes\admin.py:26
|
||||
msgid "Form Data"
|
||||
msgstr "Dane formularza"
|
||||
|
||||
#: .\axes\admin.py:27 .\axes\admin.py:64
|
||||
msgid "Meta Data"
|
||||
msgstr "Metadane"
|
||||
|
||||
#: .\axes\conf.py:89
|
||||
msgid "Account locked: too many login attempts. Please try again later."
|
||||
msgstr ""
|
||||
"Konto zablokowane: zbyt wiele prób logowania. Spróbuj ponownie później."
|
||||
|
||||
#: .\axes\conf.py:97
|
||||
msgid ""
|
||||
"Account locked: too many login attempts. Contact an admin to unlock your "
|
||||
"account."
|
||||
msgstr ""
|
||||
"Konto zablokowane: zbyt wiele prób logowania. Skontaktuj się z "
|
||||
"administratorem, aby odblokować swoje konto."
|
||||
|
||||
#: .\axes\models.py:6
|
||||
#, fuzzy
|
||||
msgid "User Agent"
|
||||
msgstr "User Agent"
|
||||
|
||||
#: .\axes\models.py:8
|
||||
msgid "IP Address"
|
||||
msgstr "Adres IP"
|
||||
|
||||
#: .\axes\models.py:10
|
||||
msgid "Username"
|
||||
msgstr "Nazwa Użytkownika"
|
||||
|
||||
#: .\axes\models.py:12
|
||||
#, fuzzy
|
||||
msgid "HTTP Accept"
|
||||
msgstr "HTTP Accept"
|
||||
|
||||
#: .\axes\models.py:14
|
||||
msgid "Path"
|
||||
msgstr "Ścieżka"
|
||||
|
||||
#: .\axes\models.py:16
|
||||
msgid "Attempt Time"
|
||||
msgstr "Czas wystąpienia"
|
||||
|
||||
#: .\axes\models.py:25
|
||||
msgid "GET Data"
|
||||
msgstr "Dane GET"
|
||||
|
||||
#: .\axes\models.py:27
|
||||
msgid "POST Data"
|
||||
msgstr "Dane POST"
|
||||
|
||||
#: .\axes\models.py:29
|
||||
msgid "Failed Logins"
|
||||
msgstr "Nieudane logowania"
|
||||
|
||||
#: .\axes\models.py:35
|
||||
msgid "access attempt"
|
||||
msgstr "próba dostępu"
|
||||
|
||||
#: .\axes\models.py:36
|
||||
msgid "access attempts"
|
||||
msgstr "próby dostępu"
|
||||
|
||||
#: .\axes\models.py:40
|
||||
msgid "Logout Time"
|
||||
msgstr "Czas wylogowania"
|
||||
|
||||
#: .\axes\models.py:46
|
||||
msgid "access log"
|
||||
msgstr "dziennik logowania"
|
||||
|
||||
#: .\axes\models.py:47
|
||||
msgid "access logs"
|
||||
msgstr "dzienniki logowania"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user