mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-09-11 22:30:41 +03:00
add graylog config (#23)
* add additional chat gpt request error * add graylog config
This commit is contained in:
parent
ae6cdb896a
commit
e465d71320
21
README.md
21
README.md
@ -65,9 +65,9 @@ methods:
|
|||||||
## Chat:
|
## Chat:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
cd bot_microservice
|
docker run -p 8858:8858 -it --name freegpt --rm -e CHAT_PATH=/chat balshdocker/freegpt:latest
|
||||||
python3 run.py
|
|
||||||
```
|
```
|
||||||
|
Open http://localhost:8858/chat/
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -85,11 +85,13 @@ gunicorn main:create_app --workers 10 --bind 0.0.0.0:8083 --worker-class uvicorn
|
|||||||
|
|
||||||
### Run local tests:
|
### Run local tests:
|
||||||
```bash
|
```bash
|
||||||
poetry run pytest
|
cd bot_microservice
|
||||||
|
STAGE=runtests poetry run pytest
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run tests in docker compose:
|
### Run tests in docker compose:
|
||||||
```bash
|
```bash
|
||||||
|
cd bot_microservice
|
||||||
STAGE=runtests docker compose run bot bash -c "coverage run -m pytest -vv --exitfirst && poetry run coverage report"
|
STAGE=runtests docker compose run bot bash -c "coverage run -m pytest -vv --exitfirst && poetry run coverage report"
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -101,14 +103,17 @@ Docs can be found at
|
|||||||
|
|
||||||
on local start can be found at http://localhost/gpt/api/docs
|
on local start can be found at http://localhost/gpt/api/docs
|
||||||
|
|
||||||
|
prod docs https://bot.mywistr.ru/gpt/api/docs/
|
||||||
|
|
||||||
## Help article
|
## Help article
|
||||||
|
|
||||||
[Пишем асинхронного Телеграм-бота](https://habr.com/ru/company/kts/blog/598575/)
|
[Следить за обновлениями этого репозитория](https://github.com/fantasy-peak/cpp-freegpt-webui)
|
||||||
|
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
|
|
||||||
- [x] Добавить очередь сообщений
|
- [] Добавить базу данных с моделями
|
||||||
- [x] Исправить запуск локально
|
- [] Добавить миграции через alembic
|
||||||
- [x] Добавить тестов
|
- [] Добавить веса моделей и их смену
|
||||||
- [x] Close connection
|
- [] Добавить тестов
|
||||||
|
- [] Добавить сентри
|
||||||
|
@ -2,8 +2,6 @@ from fastapi import APIRouter, Request
|
|||||||
from starlette import status
|
from starlette import status
|
||||||
from starlette.responses import Response
|
from starlette.responses import Response
|
||||||
|
|
||||||
from constants import INVALID_GPT_MODEL_MESSAGE
|
|
||||||
from core.utils import ChatGptService
|
|
||||||
from settings.config import settings
|
from settings.config import settings
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
@ -19,27 +17,3 @@ router = APIRouter()
|
|||||||
)
|
)
|
||||||
async def process_bot_updates(request: Request) -> None:
|
async def process_bot_updates(request: Request) -> None:
|
||||||
await request.app.state.queue.put_updates_on_queue(request)
|
await request.app.state.queue.put_updates_on_queue(request)
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/bot-healthcheck",
|
|
||||||
name="bot:gpt_healthcheck",
|
|
||||||
response_class=Response,
|
|
||||||
summary="bot healthcheck",
|
|
||||||
responses={
|
|
||||||
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"},
|
|
||||||
status.HTTP_200_OK: {"description": "Successful Response"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
async def gpt_healthcheck(response: Response) -> Response:
|
|
||||||
chatgpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL)
|
|
||||||
data = chatgpt_service.build_request_data("Привет!")
|
|
||||||
response.status_code = status.HTTP_200_OK
|
|
||||||
try:
|
|
||||||
chatgpt_response = await chatgpt_service.do_request(data)
|
|
||||||
if chatgpt_response.status_code != status.HTTP_200_OK or chatgpt_response.text == INVALID_GPT_MODEL_MESSAGE:
|
|
||||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
|
||||||
except Exception:
|
|
||||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
|
||||||
|
|
||||||
return Response(status_code=response.status_code, content=None)
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
from fastapi import APIRouter
|
from fastapi import APIRouter
|
||||||
from fastapi.responses import ORJSONResponse
|
from fastapi.responses import ORJSONResponse
|
||||||
from starlette import status
|
from starlette import status
|
||||||
|
from starlette.responses import Response
|
||||||
|
|
||||||
|
from constants import INVALID_GPT_REQUEST_MESSAGES
|
||||||
|
from core.utils import ChatGptService
|
||||||
|
from settings.config import settings
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
@ -13,3 +18,30 @@ router = APIRouter()
|
|||||||
)
|
)
|
||||||
async def healthcheck() -> ORJSONResponse:
|
async def healthcheck() -> ORJSONResponse:
|
||||||
return ORJSONResponse(content=None, status_code=status.HTTP_200_OK)
|
return ORJSONResponse(content=None, status_code=status.HTTP_200_OK)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get(
|
||||||
|
"/bot-healthcheck",
|
||||||
|
name="system:gpt_healthcheck",
|
||||||
|
response_class=Response,
|
||||||
|
summary="Проверяет доступность моделей и если они недоступны, то возвращает код ответа 500",
|
||||||
|
responses={
|
||||||
|
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"},
|
||||||
|
status.HTTP_200_OK: {"description": "Successful Response"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def gpt_healthcheck(response: Response) -> Response:
|
||||||
|
chatgpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL)
|
||||||
|
data = chatgpt_service.build_request_data("Привет!")
|
||||||
|
response.status_code = status.HTTP_200_OK
|
||||||
|
try:
|
||||||
|
chatgpt_response = await chatgpt_service.do_request(data)
|
||||||
|
if chatgpt_response.status_code != status.HTTP_200_OK:
|
||||||
|
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||||
|
for message in INVALID_GPT_REQUEST_MESSAGES:
|
||||||
|
if message in chatgpt_response.text:
|
||||||
|
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||||
|
except Exception:
|
||||||
|
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||||
|
|
||||||
|
return Response(status_code=response.status_code, content=None)
|
||||||
|
@ -4,7 +4,7 @@ AUDIO_SEGMENT_DURATION = 120 * 1000
|
|||||||
|
|
||||||
API_PREFIX = "/api"
|
API_PREFIX = "/api"
|
||||||
CHAT_GPT_BASE_URI = "/backend-api/v2/conversation"
|
CHAT_GPT_BASE_URI = "/backend-api/v2/conversation"
|
||||||
INVALID_GPT_MODEL_MESSAGE = "Invalid request model"
|
INVALID_GPT_REQUEST_MESSAGES = ("Invalid request model", "return unexpected http status code")
|
||||||
|
|
||||||
|
|
||||||
class BotStagesEnum(StrEnum):
|
class BotStagesEnum(StrEnum):
|
||||||
|
@ -3,6 +3,7 @@ import sys
|
|||||||
from types import FrameType
|
from types import FrameType
|
||||||
from typing import TYPE_CHECKING, Any, cast
|
from typing import TYPE_CHECKING, Any, cast
|
||||||
|
|
||||||
|
import graypy
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from sentry_sdk.integrations.logging import EventHandler
|
from sentry_sdk.integrations.logging import EventHandler
|
||||||
|
|
||||||
@ -40,20 +41,35 @@ def configure_logging(*, level: LogLevelEnum, enable_json_logs: bool, enable_sen
|
|||||||
|
|
||||||
intercept_handler = InterceptHandler()
|
intercept_handler = InterceptHandler()
|
||||||
|
|
||||||
logging.basicConfig(handlers=[intercept_handler], level=logging_level)
|
|
||||||
|
|
||||||
formatter = _json_formatter if enable_json_logs else _text_formatter
|
formatter = _json_formatter if enable_json_logs else _text_formatter
|
||||||
logger.configure(
|
|
||||||
handlers=[
|
base_config_handlers = [intercept_handler]
|
||||||
|
|
||||||
|
loguru_handlers = [
|
||||||
|
{
|
||||||
|
"sink": sys.stdout,
|
||||||
|
"level": logging_level,
|
||||||
|
"serialize": enable_json_logs,
|
||||||
|
"format": formatter,
|
||||||
|
"colorize": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
if settings.GRAYLOG_HOST:
|
||||||
|
graylog_handler = graypy.GELFTCPHandler(settings.GRAYLOG_HOST, 12201)
|
||||||
|
base_config_handlers.append(graylog_handler)
|
||||||
|
loguru_handlers.append(
|
||||||
{
|
{
|
||||||
"sink": sys.stdout,
|
"sink": graylog_handler,
|
||||||
"level": logging_level,
|
"level": logging_level,
|
||||||
"serialize": enable_json_logs,
|
"serialize": enable_json_logs,
|
||||||
"format": formatter,
|
"format": formatter,
|
||||||
"colorize": True,
|
"colorize": False,
|
||||||
}
|
}
|
||||||
],
|
)
|
||||||
)
|
|
||||||
|
logging.basicConfig(handlers=base_config_handlers, level=logging_level)
|
||||||
|
logger.configure(handlers=loguru_handlers)
|
||||||
|
|
||||||
# sentry sdk не умеет из коробки работать с loguru, нужно добавлять хандлер
|
# sentry sdk не умеет из коробки работать с loguru, нужно добавлять хандлер
|
||||||
# https://github.com/getsentry/sentry-python/issues/653#issuecomment-788854865
|
# https://github.com/getsentry/sentry-python/issues/653#issuecomment-788854865
|
||||||
|
@ -20,7 +20,7 @@ from speech_recognition import (
|
|||||||
from constants import (
|
from constants import (
|
||||||
AUDIO_SEGMENT_DURATION,
|
AUDIO_SEGMENT_DURATION,
|
||||||
CHAT_GPT_BASE_URI,
|
CHAT_GPT_BASE_URI,
|
||||||
INVALID_GPT_MODEL_MESSAGE,
|
INVALID_GPT_REQUEST_MESSAGES,
|
||||||
)
|
)
|
||||||
from settings.config import settings
|
from settings.config import settings
|
||||||
|
|
||||||
@ -124,10 +124,11 @@ class ChatGptService:
|
|||||||
try:
|
try:
|
||||||
response = await self.do_request(chat_gpt_request)
|
response = await self.do_request(chat_gpt_request)
|
||||||
status = response.status_code
|
status = response.status_code
|
||||||
if response.text == INVALID_GPT_MODEL_MESSAGE:
|
for message in INVALID_GPT_REQUEST_MESSAGES:
|
||||||
message = f"{INVALID_GPT_MODEL_MESSAGE}: {settings.GPT_MODEL}"
|
if message in response.text:
|
||||||
logger.info(message, data=chat_gpt_request)
|
message = f"{message}: {settings.GPT_MODEL}"
|
||||||
return message
|
logger.info(message, data=chat_gpt_request)
|
||||||
|
return message
|
||||||
if status != httpx.codes.OK:
|
if status != httpx.codes.OK:
|
||||||
logger.info(f"got response status: {status} from chat api", data=chat_gpt_request)
|
logger.info(f"got response status: {status} from chat api", data=chat_gpt_request)
|
||||||
return "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору"
|
return "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору"
|
||||||
|
@ -6,6 +6,8 @@ APP_PORT="8000"
|
|||||||
# SENTRY_DSN=
|
# SENTRY_DSN=
|
||||||
SENTRY_TRACES_SAMPLE_RATE="0.95"
|
SENTRY_TRACES_SAMPLE_RATE="0.95"
|
||||||
|
|
||||||
|
# GRAYLOG_HOST=
|
||||||
|
|
||||||
USER="web"
|
USER="web"
|
||||||
TZ="Europe/Moscow"
|
TZ="Europe/Moscow"
|
||||||
|
|
||||||
|
@ -48,8 +48,11 @@ class AppSettings(SentrySettings, BaseSettings):
|
|||||||
DOMAIN: str = "https://localhost"
|
DOMAIN: str = "https://localhost"
|
||||||
URL_PREFIX: str = ""
|
URL_PREFIX: str = ""
|
||||||
|
|
||||||
|
GRAYLOG_HOST: str | None = None
|
||||||
|
|
||||||
GPT_MODEL: str = "gpt-3.5-turbo-stream-AItianhuSpace"
|
GPT_MODEL: str = "gpt-3.5-turbo-stream-AItianhuSpace"
|
||||||
GPT_BASE_HOST: str = "http://chat_service:8858"
|
GPT_BASE_HOST: str = "http://chat_service:8858"
|
||||||
|
|
||||||
# quantity of workers for uvicorn
|
# quantity of workers for uvicorn
|
||||||
WORKERS_COUNT: int = 1
|
WORKERS_COUNT: int = 1
|
||||||
# Enable uvicorn reloading
|
# Enable uvicorn reloading
|
||||||
|
@ -32,13 +32,13 @@ async def test_bot_healthcheck_is_ok(
|
|||||||
assert response.status_code == httpx.codes.OK
|
assert response.status_code == httpx.codes.OK
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text", ["Invalid request model", "return unexpected http status code"])
|
||||||
async def test_bot_healthcheck_invalid_request_model(
|
async def test_bot_healthcheck_invalid_request_model(
|
||||||
rest_client: AsyncClient,
|
rest_client: AsyncClient, test_settings: AppSettings, text: str
|
||||||
test_settings: AppSettings,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
with mocked_ask_question_api(
|
with mocked_ask_question_api(
|
||||||
host=test_settings.GPT_BASE_HOST,
|
host=test_settings.GPT_BASE_HOST,
|
||||||
return_value=Response(status_code=httpx.codes.OK, text="Invalid request model"),
|
return_value=Response(status_code=httpx.codes.OK, text=text),
|
||||||
):
|
):
|
||||||
response = await rest_client.get("/api/bot-healthcheck")
|
response = await rest_client.get("/api/bot-healthcheck")
|
||||||
assert response.status_code == httpx.codes.INTERNAL_SERVER_ERROR
|
assert response.status_code == httpx.codes.INTERNAL_SERVER_ERROR
|
||||||
|
@ -19,11 +19,12 @@ services:
|
|||||||
dockerfile: deploy/Dockerfile
|
dockerfile: deploy/Dockerfile
|
||||||
target: bot-service
|
target: bot-service
|
||||||
args:
|
args:
|
||||||
STAGE: ${STAGE}
|
STAGE: ${STAGE:-production}
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
env_file:
|
env_file:
|
||||||
- bot_microservice/settings/.env
|
- bot_microservice/settings/.env
|
||||||
volumes:
|
volumes:
|
||||||
|
- ./bot_microservice/settings/.env:/app/settings/.env:ro
|
||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
networks:
|
networks:
|
||||||
chat-gpt-network:
|
chat-gpt-network:
|
||||||
|
17
poetry.lock
generated
17
poetry.lock
generated
@ -999,6 +999,21 @@ gitdb = ">=4.0.1,<5"
|
|||||||
[package.extras]
|
[package.extras]
|
||||||
test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"]
|
test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "graypy"
|
||||||
|
version = "2.1.0"
|
||||||
|
description = "Python logging handlers that send messages in the Graylog Extended Log Format (GELF)."
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
files = [
|
||||||
|
{file = "graypy-2.1.0-py2.py3-none-any.whl", hash = "sha256:5df0102ed52fdaa24dd579bc1e4904480c2c9bbb98917a0b3241ecf510c94207"},
|
||||||
|
{file = "graypy-2.1.0.tar.gz", hash = "sha256:fd8dc4a721de1278576d92db10ac015e99b4e480cf1b18892e79429fd9236e16"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
amqp = ["amqplib (==1.0.2)"]
|
||||||
|
docs = ["sphinx (>=2.1.2,<3.0.0)", "sphinx-autodoc-typehints (>=1.6.0,<2.0.0)", "sphinx-rtd-theme (>=0.4.3,<1.0.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "greenlet"
|
name = "greenlet"
|
||||||
version = "2.0.2"
|
version = "2.0.2"
|
||||||
@ -3110,4 +3125,4 @@ dev = ["doc8", "flake8", "flake8-import-order", "rstcheck[sphinx]", "sphinx"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.11"
|
python-versions = "^3.11"
|
||||||
content-hash = "f8faa71d22eb911772b7607eb35d2feb1e5dbe0b0bf2c602373b1e31bffaf820"
|
content-hash = "ab644b9882ee200392911afc6b71bf87fdb413e4fdd9f06a460ce33da98687d7"
|
||||||
|
@ -22,6 +22,7 @@ sentry-sdk = "^1.31.0"
|
|||||||
SpeechRecognition = "^3.8"
|
SpeechRecognition = "^3.8"
|
||||||
pydub = "^0.25"
|
pydub = "^0.25"
|
||||||
greenlet = "^2.0.2"
|
greenlet = "^2.0.2"
|
||||||
|
graypy = "^2.1.0"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.dev-dependencies]
|
[tool.poetry.dev-dependencies]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user