mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-12-15 16:10:39 +03:00
add graylog config (#23)
* add additional chat gpt request error * add graylog config
This commit is contained in:
@@ -2,8 +2,6 @@ from fastapi import APIRouter, Request
|
||||
from starlette import status
|
||||
from starlette.responses import Response
|
||||
|
||||
from constants import INVALID_GPT_MODEL_MESSAGE
|
||||
from core.utils import ChatGptService
|
||||
from settings.config import settings
|
||||
|
||||
router = APIRouter()
|
||||
@@ -19,27 +17,3 @@ router = APIRouter()
|
||||
)
|
||||
async def process_bot_updates(request: Request) -> None:
|
||||
await request.app.state.queue.put_updates_on_queue(request)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/bot-healthcheck",
|
||||
name="bot:gpt_healthcheck",
|
||||
response_class=Response,
|
||||
summary="bot healthcheck",
|
||||
responses={
|
||||
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"},
|
||||
status.HTTP_200_OK: {"description": "Successful Response"},
|
||||
},
|
||||
)
|
||||
async def gpt_healthcheck(response: Response) -> Response:
|
||||
chatgpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL)
|
||||
data = chatgpt_service.build_request_data("Привет!")
|
||||
response.status_code = status.HTTP_200_OK
|
||||
try:
|
||||
chatgpt_response = await chatgpt_service.do_request(data)
|
||||
if chatgpt_response.status_code != status.HTTP_200_OK or chatgpt_response.text == INVALID_GPT_MODEL_MESSAGE:
|
||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
except Exception:
|
||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
|
||||
return Response(status_code=response.status_code, content=None)
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
from fastapi import APIRouter
|
||||
from fastapi.responses import ORJSONResponse
|
||||
from starlette import status
|
||||
from starlette.responses import Response
|
||||
|
||||
from constants import INVALID_GPT_REQUEST_MESSAGES
|
||||
from core.utils import ChatGptService
|
||||
from settings.config import settings
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -13,3 +18,30 @@ router = APIRouter()
|
||||
)
|
||||
async def healthcheck() -> ORJSONResponse:
|
||||
return ORJSONResponse(content=None, status_code=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/bot-healthcheck",
|
||||
name="system:gpt_healthcheck",
|
||||
response_class=Response,
|
||||
summary="Проверяет доступность моделей и если они недоступны, то возвращает код ответа 500",
|
||||
responses={
|
||||
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"},
|
||||
status.HTTP_200_OK: {"description": "Successful Response"},
|
||||
},
|
||||
)
|
||||
async def gpt_healthcheck(response: Response) -> Response:
|
||||
chatgpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL)
|
||||
data = chatgpt_service.build_request_data("Привет!")
|
||||
response.status_code = status.HTTP_200_OK
|
||||
try:
|
||||
chatgpt_response = await chatgpt_service.do_request(data)
|
||||
if chatgpt_response.status_code != status.HTTP_200_OK:
|
||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
for message in INVALID_GPT_REQUEST_MESSAGES:
|
||||
if message in chatgpt_response.text:
|
||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
except Exception:
|
||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
|
||||
return Response(status_code=response.status_code, content=None)
|
||||
|
||||
@@ -4,7 +4,7 @@ AUDIO_SEGMENT_DURATION = 120 * 1000
|
||||
|
||||
API_PREFIX = "/api"
|
||||
CHAT_GPT_BASE_URI = "/backend-api/v2/conversation"
|
||||
INVALID_GPT_MODEL_MESSAGE = "Invalid request model"
|
||||
INVALID_GPT_REQUEST_MESSAGES = ("Invalid request model", "return unexpected http status code")
|
||||
|
||||
|
||||
class BotStagesEnum(StrEnum):
|
||||
|
||||
@@ -3,6 +3,7 @@ import sys
|
||||
from types import FrameType
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
import graypy
|
||||
from loguru import logger
|
||||
from sentry_sdk.integrations.logging import EventHandler
|
||||
|
||||
@@ -40,20 +41,35 @@ def configure_logging(*, level: LogLevelEnum, enable_json_logs: bool, enable_sen
|
||||
|
||||
intercept_handler = InterceptHandler()
|
||||
|
||||
logging.basicConfig(handlers=[intercept_handler], level=logging_level)
|
||||
|
||||
formatter = _json_formatter if enable_json_logs else _text_formatter
|
||||
logger.configure(
|
||||
handlers=[
|
||||
|
||||
base_config_handlers = [intercept_handler]
|
||||
|
||||
loguru_handlers = [
|
||||
{
|
||||
"sink": sys.stdout,
|
||||
"level": logging_level,
|
||||
"serialize": enable_json_logs,
|
||||
"format": formatter,
|
||||
"colorize": True,
|
||||
}
|
||||
]
|
||||
|
||||
if settings.GRAYLOG_HOST:
|
||||
graylog_handler = graypy.GELFTCPHandler(settings.GRAYLOG_HOST, 12201)
|
||||
base_config_handlers.append(graylog_handler)
|
||||
loguru_handlers.append(
|
||||
{
|
||||
"sink": sys.stdout,
|
||||
"sink": graylog_handler,
|
||||
"level": logging_level,
|
||||
"serialize": enable_json_logs,
|
||||
"format": formatter,
|
||||
"colorize": True,
|
||||
"colorize": False,
|
||||
}
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
logging.basicConfig(handlers=base_config_handlers, level=logging_level)
|
||||
logger.configure(handlers=loguru_handlers)
|
||||
|
||||
# sentry sdk не умеет из коробки работать с loguru, нужно добавлять хандлер
|
||||
# https://github.com/getsentry/sentry-python/issues/653#issuecomment-788854865
|
||||
|
||||
@@ -20,7 +20,7 @@ from speech_recognition import (
|
||||
from constants import (
|
||||
AUDIO_SEGMENT_DURATION,
|
||||
CHAT_GPT_BASE_URI,
|
||||
INVALID_GPT_MODEL_MESSAGE,
|
||||
INVALID_GPT_REQUEST_MESSAGES,
|
||||
)
|
||||
from settings.config import settings
|
||||
|
||||
@@ -124,10 +124,11 @@ class ChatGptService:
|
||||
try:
|
||||
response = await self.do_request(chat_gpt_request)
|
||||
status = response.status_code
|
||||
if response.text == INVALID_GPT_MODEL_MESSAGE:
|
||||
message = f"{INVALID_GPT_MODEL_MESSAGE}: {settings.GPT_MODEL}"
|
||||
logger.info(message, data=chat_gpt_request)
|
||||
return message
|
||||
for message in INVALID_GPT_REQUEST_MESSAGES:
|
||||
if message in response.text:
|
||||
message = f"{message}: {settings.GPT_MODEL}"
|
||||
logger.info(message, data=chat_gpt_request)
|
||||
return message
|
||||
if status != httpx.codes.OK:
|
||||
logger.info(f"got response status: {status} from chat api", data=chat_gpt_request)
|
||||
return "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору"
|
||||
|
||||
@@ -6,6 +6,8 @@ APP_PORT="8000"
|
||||
# SENTRY_DSN=
|
||||
SENTRY_TRACES_SAMPLE_RATE="0.95"
|
||||
|
||||
# GRAYLOG_HOST=
|
||||
|
||||
USER="web"
|
||||
TZ="Europe/Moscow"
|
||||
|
||||
|
||||
@@ -48,8 +48,11 @@ class AppSettings(SentrySettings, BaseSettings):
|
||||
DOMAIN: str = "https://localhost"
|
||||
URL_PREFIX: str = ""
|
||||
|
||||
GRAYLOG_HOST: str | None = None
|
||||
|
||||
GPT_MODEL: str = "gpt-3.5-turbo-stream-AItianhuSpace"
|
||||
GPT_BASE_HOST: str = "http://chat_service:8858"
|
||||
|
||||
# quantity of workers for uvicorn
|
||||
WORKERS_COUNT: int = 1
|
||||
# Enable uvicorn reloading
|
||||
|
||||
@@ -32,13 +32,13 @@ async def test_bot_healthcheck_is_ok(
|
||||
assert response.status_code == httpx.codes.OK
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text", ["Invalid request model", "return unexpected http status code"])
|
||||
async def test_bot_healthcheck_invalid_request_model(
|
||||
rest_client: AsyncClient,
|
||||
test_settings: AppSettings,
|
||||
rest_client: AsyncClient, test_settings: AppSettings, text: str
|
||||
) -> None:
|
||||
with mocked_ask_question_api(
|
||||
host=test_settings.GPT_BASE_HOST,
|
||||
return_value=Response(status_code=httpx.codes.OK, text="Invalid request model"),
|
||||
return_value=Response(status_code=httpx.codes.OK, text=text),
|
||||
):
|
||||
response = await rest_client.get("/api/bot-healthcheck")
|
||||
assert response.status_code == httpx.codes.INTERNAL_SERVER_ERROR
|
||||
|
||||
Reference in New Issue
Block a user