add graylog config (#23)

* add additional chat gpt request error

* add graylog config
This commit is contained in:
Dmitry Afanasyev 2023-10-03 09:19:33 +03:00 committed by GitHub
parent ae6cdb896a
commit e465d71320
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 103 additions and 53 deletions

View File

@ -65,9 +65,9 @@ methods:
## Chat:
```shell
cd bot_microservice
python3 run.py
docker run -p 8858:8858 -it --name freegpt --rm -e CHAT_PATH=/chat balshdocker/freegpt:latest
```
Open http://localhost:8858/chat/
```bash
@ -85,11 +85,13 @@ gunicorn main:create_app --workers 10 --bind 0.0.0.0:8083 --worker-class uvicorn
### Run local tests:
```bash
poetry run pytest
cd bot_microservice
STAGE=runtests poetry run pytest
```
### Run tests in docker compose:
```bash
cd bot_microservice
STAGE=runtests docker compose run bot bash -c "coverage run -m pytest -vv --exitfirst && poetry run coverage report"
```
@ -101,14 +103,17 @@ Docs can be found at
on local start can be found at http://localhost/gpt/api/docs
prod docs https://bot.mywistr.ru/gpt/api/docs/
## Help article
[Пишем асинхронного Телеграм-бота](https://habr.com/ru/company/kts/blog/598575/)
[Следить за обновлениями этого репозитория](https://github.com/fantasy-peak/cpp-freegpt-webui)
## TODO
- [x] Добавить очередь сообщений
- [x] Исправить запуск локально
- [x] Добавить тестов
- [x] Close connection
- [] Добавить базу данных с моделями
- [] Добавить миграции через alembic
- [] Добавить веса моделей и их смену
- [] Добавить тестов
- [] Добавить сентри

View File

@ -2,8 +2,6 @@ from fastapi import APIRouter, Request
from starlette import status
from starlette.responses import Response
from constants import INVALID_GPT_MODEL_MESSAGE
from core.utils import ChatGptService
from settings.config import settings
router = APIRouter()
@ -19,27 +17,3 @@ router = APIRouter()
)
async def process_bot_updates(request: Request) -> None:
await request.app.state.queue.put_updates_on_queue(request)
@router.get(
"/bot-healthcheck",
name="bot:gpt_healthcheck",
response_class=Response,
summary="bot healthcheck",
responses={
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"},
status.HTTP_200_OK: {"description": "Successful Response"},
},
)
async def gpt_healthcheck(response: Response) -> Response:
chatgpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL)
data = chatgpt_service.build_request_data("Привет!")
response.status_code = status.HTTP_200_OK
try:
chatgpt_response = await chatgpt_service.do_request(data)
if chatgpt_response.status_code != status.HTTP_200_OK or chatgpt_response.text == INVALID_GPT_MODEL_MESSAGE:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
except Exception:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return Response(status_code=response.status_code, content=None)

View File

@ -1,6 +1,11 @@
from fastapi import APIRouter
from fastapi.responses import ORJSONResponse
from starlette import status
from starlette.responses import Response
from constants import INVALID_GPT_REQUEST_MESSAGES
from core.utils import ChatGptService
from settings.config import settings
router = APIRouter()
@ -13,3 +18,30 @@ router = APIRouter()
)
async def healthcheck() -> ORJSONResponse:
return ORJSONResponse(content=None, status_code=status.HTTP_200_OK)
@router.get(
"/bot-healthcheck",
name="system:gpt_healthcheck",
response_class=Response,
summary="Проверяет доступность моделей и если они недоступны, то возвращает код ответа 500",
responses={
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"},
status.HTTP_200_OK: {"description": "Successful Response"},
},
)
async def gpt_healthcheck(response: Response) -> Response:
chatgpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL)
data = chatgpt_service.build_request_data("Привет!")
response.status_code = status.HTTP_200_OK
try:
chatgpt_response = await chatgpt_service.do_request(data)
if chatgpt_response.status_code != status.HTTP_200_OK:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
for message in INVALID_GPT_REQUEST_MESSAGES:
if message in chatgpt_response.text:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
except Exception:
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return Response(status_code=response.status_code, content=None)

View File

@ -4,7 +4,7 @@ AUDIO_SEGMENT_DURATION = 120 * 1000
API_PREFIX = "/api"
CHAT_GPT_BASE_URI = "/backend-api/v2/conversation"
INVALID_GPT_MODEL_MESSAGE = "Invalid request model"
INVALID_GPT_REQUEST_MESSAGES = ("Invalid request model", "return unexpected http status code")
class BotStagesEnum(StrEnum):

View File

@ -3,6 +3,7 @@ import sys
from types import FrameType
from typing import TYPE_CHECKING, Any, cast
import graypy
from loguru import logger
from sentry_sdk.integrations.logging import EventHandler
@ -40,20 +41,35 @@ def configure_logging(*, level: LogLevelEnum, enable_json_logs: bool, enable_sen
intercept_handler = InterceptHandler()
logging.basicConfig(handlers=[intercept_handler], level=logging_level)
formatter = _json_formatter if enable_json_logs else _text_formatter
logger.configure(
handlers=[
base_config_handlers = [intercept_handler]
loguru_handlers = [
{
"sink": sys.stdout,
"level": logging_level,
"serialize": enable_json_logs,
"format": formatter,
"colorize": True,
}
]
if settings.GRAYLOG_HOST:
graylog_handler = graypy.GELFTCPHandler(settings.GRAYLOG_HOST, 12201)
base_config_handlers.append(graylog_handler)
loguru_handlers.append(
{
"sink": sys.stdout,
"sink": graylog_handler,
"level": logging_level,
"serialize": enable_json_logs,
"format": formatter,
"colorize": True,
"colorize": False,
}
],
)
)
logging.basicConfig(handlers=base_config_handlers, level=logging_level)
logger.configure(handlers=loguru_handlers)
# sentry sdk не умеет из коробки работать с loguru, нужно добавлять хандлер
# https://github.com/getsentry/sentry-python/issues/653#issuecomment-788854865

View File

@ -20,7 +20,7 @@ from speech_recognition import (
from constants import (
AUDIO_SEGMENT_DURATION,
CHAT_GPT_BASE_URI,
INVALID_GPT_MODEL_MESSAGE,
INVALID_GPT_REQUEST_MESSAGES,
)
from settings.config import settings
@ -124,10 +124,11 @@ class ChatGptService:
try:
response = await self.do_request(chat_gpt_request)
status = response.status_code
if response.text == INVALID_GPT_MODEL_MESSAGE:
message = f"{INVALID_GPT_MODEL_MESSAGE}: {settings.GPT_MODEL}"
logger.info(message, data=chat_gpt_request)
return message
for message in INVALID_GPT_REQUEST_MESSAGES:
if message in response.text:
message = f"{message}: {settings.GPT_MODEL}"
logger.info(message, data=chat_gpt_request)
return message
if status != httpx.codes.OK:
logger.info(f"got response status: {status} from chat api", data=chat_gpt_request)
return "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору"

View File

@ -6,6 +6,8 @@ APP_PORT="8000"
# SENTRY_DSN=
SENTRY_TRACES_SAMPLE_RATE="0.95"
# GRAYLOG_HOST=
USER="web"
TZ="Europe/Moscow"

View File

@ -48,8 +48,11 @@ class AppSettings(SentrySettings, BaseSettings):
DOMAIN: str = "https://localhost"
URL_PREFIX: str = ""
GRAYLOG_HOST: str | None = None
GPT_MODEL: str = "gpt-3.5-turbo-stream-AItianhuSpace"
GPT_BASE_HOST: str = "http://chat_service:8858"
# quantity of workers for uvicorn
WORKERS_COUNT: int = 1
# Enable uvicorn reloading

View File

@ -32,13 +32,13 @@ async def test_bot_healthcheck_is_ok(
assert response.status_code == httpx.codes.OK
@pytest.mark.parametrize("text", ["Invalid request model", "return unexpected http status code"])
async def test_bot_healthcheck_invalid_request_model(
rest_client: AsyncClient,
test_settings: AppSettings,
rest_client: AsyncClient, test_settings: AppSettings, text: str
) -> None:
with mocked_ask_question_api(
host=test_settings.GPT_BASE_HOST,
return_value=Response(status_code=httpx.codes.OK, text="Invalid request model"),
return_value=Response(status_code=httpx.codes.OK, text=text),
):
response = await rest_client.get("/api/bot-healthcheck")
assert response.status_code == httpx.codes.INTERNAL_SERVER_ERROR

View File

@ -19,11 +19,12 @@ services:
dockerfile: deploy/Dockerfile
target: bot-service
args:
STAGE: ${STAGE}
STAGE: ${STAGE:-production}
restart: unless-stopped
env_file:
- bot_microservice/settings/.env
volumes:
- ./bot_microservice/settings/.env:/app/settings/.env:ro
- /etc/localtime:/etc/localtime:ro
networks:
chat-gpt-network:

17
poetry.lock generated
View File

@ -999,6 +999,21 @@ gitdb = ">=4.0.1,<5"
[package.extras]
test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"]
[[package]]
name = "graypy"
version = "2.1.0"
description = "Python logging handlers that send messages in the Graylog Extended Log Format (GELF)."
optional = false
python-versions = "*"
files = [
{file = "graypy-2.1.0-py2.py3-none-any.whl", hash = "sha256:5df0102ed52fdaa24dd579bc1e4904480c2c9bbb98917a0b3241ecf510c94207"},
{file = "graypy-2.1.0.tar.gz", hash = "sha256:fd8dc4a721de1278576d92db10ac015e99b4e480cf1b18892e79429fd9236e16"},
]
[package.extras]
amqp = ["amqplib (==1.0.2)"]
docs = ["sphinx (>=2.1.2,<3.0.0)", "sphinx-autodoc-typehints (>=1.6.0,<2.0.0)", "sphinx-rtd-theme (>=0.4.3,<1.0.0)"]
[[package]]
name = "greenlet"
version = "2.0.2"
@ -3110,4 +3125,4 @@ dev = ["doc8", "flake8", "flake8-import-order", "rstcheck[sphinx]", "sphinx"]
[metadata]
lock-version = "2.0"
python-versions = "^3.11"
content-hash = "f8faa71d22eb911772b7607eb35d2feb1e5dbe0b0bf2c602373b1e31bffaf820"
content-hash = "ab644b9882ee200392911afc6b71bf87fdb413e4fdd9f06a460ce33da98687d7"

View File

@ -22,6 +22,7 @@ sentry-sdk = "^1.31.0"
SpeechRecognition = "^3.8"
pydub = "^0.25"
greenlet = "^2.0.2"
graypy = "^2.1.0"
[tool.poetry.dev-dependencies]