diff --git a/bot_microservice/api/bot/controllers.py b/bot_microservice/api/bot/controllers.py index afc501e..8b090ee 100644 --- a/bot_microservice/api/bot/controllers.py +++ b/bot_microservice/api/bot/controllers.py @@ -2,6 +2,8 @@ from fastapi import APIRouter, Request from starlette import status from starlette.responses import Response +from constants import INVALID_GPT_MODEL_MESSAGE +from core.utils import ChatGptService from settings.config import settings router = APIRouter() @@ -9,7 +11,7 @@ router = APIRouter() @router.post( f"/{settings.TELEGRAM_API_TOKEN}", - name="system:process_bot_updates", + name="bot:process_bot_updates", response_class=Response, status_code=status.HTTP_202_ACCEPTED, summary="process bot updates", @@ -17,3 +19,26 @@ router = APIRouter() ) async def process_bot_updates(request: Request) -> None: await request.app.state.queue.put_updates_on_queue(request) + + +@router.get( + "/bot-healthcheck", + name="bot:gpt_healthcheck", + response_class=Response, + summary="bot healthcheck", + responses={ + status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Request to chat gpt not success"}, + status.HTTP_200_OK: {"description": "Successful Response"}, + }, +) +async def gpt_healthcheck(response: Response) -> Response: + chat_gpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL) + data = chat_gpt_service.build_request_data('Привет!') + try: + gpt_response = await chat_gpt_service.do_request(data) + if gpt_response.text == INVALID_GPT_MODEL_MESSAGE or response.status_code != status.HTTP_200_OK: + response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR + except Exception: + response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR + + return Response(status_code=response.status_code, content=None) diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 5f34d23..5483a06 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -4,6 +4,7 @@ AUDIO_SEGMENT_DURATION = 120 * 1000 API_PREFIX = "/api" CHAT_GPT_BASE_URI = "/backend-api/v2/conversation" +INVALID_GPT_MODEL_MESSAGE = "Invalid request model" class BotStagesEnum(StrEnum): diff --git a/bot_microservice/core/commands.py b/bot_microservice/core/commands.py index 9f24fda..a218158 100644 --- a/bot_microservice/core/commands.py +++ b/bot_microservice/core/commands.py @@ -1,18 +1,14 @@ import asyncio -import random import tempfile from urllib.parse import urljoin -from uuid import uuid4 -import httpx -from httpx import AsyncClient, AsyncHTTPTransport from loguru import logger from telegram import InlineKeyboardMarkup, Update from telegram.ext import ContextTypes -from constants import CHAT_GPT_BASE_URI, BotEntryPoints +from constants import BotEntryPoints from core.keyboards import main_keyboard -from core.utils import SpeechToTextService +from core.utils import ChatGptService, SpeechToTextService from settings.config import settings @@ -38,7 +34,7 @@ async def about_bot(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: return None await update.effective_message.reply_text( f"Бот использует бесплатную модель {settings.GPT_MODEL} для ответов на вопросы. " - f"Принимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения в текст. " + f"\nПринимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения в текст. " f"Просто пришлите голосовуху и получите поток сознания в виде текста, но без знаков препинания", parse_mode="Markdown", ) @@ -71,38 +67,9 @@ async def ask_question(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No await update.message.reply_text("Пожалуйста подождите, ответ в среднем занимает 10-15 секунд") - chat_gpt_request = { - "conversation_id": str(uuid4()), - "action": "_ask", - "model": settings.GPT_MODEL, - "jailbreak": "default", - "meta": { - "id": random.randint(10**18, 10**19 - 1), # noqa: S311 - "content": { - "conversation": [], - "internet_access": False, - "content_type": "text", - "parts": [{"content": update.message.text, "role": "user"}], - }, - }, - } - - transport = AsyncHTTPTransport(retries=3) - async with AsyncClient(base_url=settings.GPT_BASE_HOST, transport=transport, timeout=50) as client: - try: - response = await client.post(CHAT_GPT_BASE_URI, json=chat_gpt_request, timeout=50) - status = response.status_code - if status != httpx.codes.OK: - logger.info(f"got response status: {status} from chat api", data=chat_gpt_request) - await update.message.reply_text( - "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору" - ) - return - - await update.message.reply_text(response.text) - except Exception as error: - logger.error("error get data from chat api", error=error) - await update.message.reply_text("Вообще всё сломалось :(") + chat_gpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL) + answer = await chat_gpt_service.request_to_chatgpt(question=update.message.text) + await update.message.reply_text(answer) async def voice_recognize(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: diff --git a/bot_microservice/core/utils.py b/bot_microservice/core/utils.py index 1709c29..21d11a7 100644 --- a/bot_microservice/core/utils.py +++ b/bot_microservice/core/utils.py @@ -1,10 +1,14 @@ import os +import random import subprocess # noqa from concurrent.futures.thread import ThreadPoolExecutor from datetime import datetime, timedelta from functools import lru_cache, wraps from typing import Any +from uuid import uuid4 +import httpx +from httpx import AsyncClient, AsyncHTTPTransport, Response from loguru import logger from pydub import AudioSegment from speech_recognition import ( @@ -13,7 +17,12 @@ from speech_recognition import ( UnknownValueError as SpeechRecognizerError, ) -from constants import AUDIO_SEGMENT_DURATION +from constants import ( + AUDIO_SEGMENT_DURATION, + CHAT_GPT_BASE_URI, + INVALID_GPT_MODEL_MESSAGE, +) +from settings.config import settings def timed_cache(**timedelta_kwargs: Any) -> Any: @@ -103,3 +112,49 @@ class SpeechToTextService: os.remove(tmp_filename) logger.error("error recognizing text with google", error=error) raise error + + +class ChatGptService: + def __init__(self, chat_gpt_model: str) -> None: + self.chat_gpt_model = chat_gpt_model + + async def request_to_chatgpt(self, question: str | None) -> str: + question = question or "Привет!" + chat_gpt_request = self.build_request_data(question) + try: + response = await self.do_request(chat_gpt_request) + status = response.status_code + if response.text == INVALID_GPT_MODEL_MESSAGE: + message = f"{INVALID_GPT_MODEL_MESSAGE}: {settings.GPT_MODEL}" + logger.info(message, data=chat_gpt_request) + return message + if status != httpx.codes.OK: + logger.info(f"got response status: {status} from chat api", data=chat_gpt_request) + return "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору" + return response.text + except Exception as error: + logger.error("error get data from chat api", error=error) + return "Вообще всё сломалось :(" + + @staticmethod + async def do_request(data: dict[str, Any]) -> Response: + transport = AsyncHTTPTransport(retries=3) + async with AsyncClient(base_url=settings.GPT_BASE_HOST, transport=transport, timeout=50) as client: + return await client.post(CHAT_GPT_BASE_URI, json=data, timeout=50) + + def build_request_data(self, question: str) -> dict[str, Any]: + return { + "conversation_id": str(uuid4()), + "action": "_ask", + "model": self.chat_gpt_model, + "jailbreak": "default", + "meta": { + "id": random.randint(10**18, 10**19 - 1), # noqa: S311 + "content": { + "conversation": [], + "internet_access": False, + "content_type": "text", + "parts": [{"content": question, "role": "user"}], + }, + }, + } diff --git a/bot_microservice/tests/integration/bot/test_bot_updates.py b/bot_microservice/tests/integration/bot/test_bot_updates.py index cdcb7fe..75a9a4f 100644 --- a/bot_microservice/tests/integration/bot/test_bot_updates.py +++ b/bot_microservice/tests/integration/bot/test_bot_updates.py @@ -2,18 +2,18 @@ import asyncio from asyncio import AbstractEventLoop from unittest import mock +import httpx import pytest import telegram from assertpy import assert_that from faker import Faker -from httpx import AsyncClient +from httpx import AsyncClient, Response from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update from constants import BotStagesEnum from core.bot import BotApplication, BotQueue from main import Application from settings.config import AppSettings, settings -from tests.integration.bot.conftest import mocked_ask_question_api from tests.integration.bot.networking import MockedRequest from tests.integration.factories.bot import ( BotCallBackQueryFactory, @@ -21,6 +21,7 @@ from tests.integration.factories.bot import ( BotUpdateFactory, CallBackFactory, ) +from tests.integration.utils import mocked_ask_question_api pytestmark = [ pytest.mark.asyncio, @@ -31,11 +32,6 @@ pytestmark = [ faker = Faker() -async def test_bot_updates(rest_client: AsyncClient) -> None: - response = await rest_client.get("/api/healthcheck") - assert response.status_code == 200 - - async def test_bot_webhook_endpoint( rest_client: AsyncClient, main_application: Application, @@ -169,8 +165,8 @@ async def test_about_bot_callback_action( assert mocked_reply_text.call_args.args == ( f"Бот использует бесплатную модель {settings.GPT_MODEL} для ответов на вопросы. " - f"Принимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения в текст. " - f"Просто пришлите голосовуху и получите поток сознания в виде текста, но без знаков препинания", + f"\nПринимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения " + f"в текст. Просто пришлите голосовуху и получите поток сознания в виде текста, но без знаков препинания", ) assert mocked_reply_text.call_args.kwargs == {"parse_mode": "Markdown"} @@ -198,7 +194,10 @@ async def test_ask_question_action( ) -> None: with mock.patch.object( telegram._bot.Bot, "send_message", return_value=lambda *args, **kwargs: (args, kwargs) - ) as mocked_send_message, mocked_ask_question_api(host=test_settings.GPT_BASE_HOST): + ) as mocked_send_message, mocked_ask_question_api( + host=test_settings.GPT_BASE_HOST, + return_value=Response(status_code=httpx.codes.OK, text="Привет! Как я могу помочь вам сегодня?"), + ): bot_update = BotUpdateFactory(message=BotMessageFactory.create_instance(text="Привет!")) bot_update["message"].pop("entities") @@ -214,6 +213,55 @@ async def test_ask_question_action( ) +async def test_ask_question_action_not_success( + main_application: Application, + test_settings: AppSettings, +) -> None: + with mock.patch.object( + telegram._bot.Bot, "send_message", return_value=lambda *args, **kwargs: (args, kwargs) + ) as mocked_send_message, mocked_ask_question_api( + host=test_settings.GPT_BASE_HOST, return_value=Response(status_code=httpx.codes.INTERNAL_SERVER_ERROR) + ): + bot_update = BotUpdateFactory(message=BotMessageFactory.create_instance(text="Привет!")) + bot_update["message"].pop("entities") + + await main_application.bot_app.application.process_update( + update=Update.de_json(data=bot_update, bot=main_application.bot_app.bot) + ) + assert_that(mocked_send_message.call_args.kwargs).is_equal_to( + { + "text": "Что-то пошло не так, попробуйте еще раз или обратитесь к администратору", + "chat_id": bot_update["message"]["chat"]["id"], + }, + include=["text", "chat_id"], + ) + + +async def test_ask_question_action_critical_error( + main_application: Application, + test_settings: AppSettings, +) -> None: + with mock.patch.object( + telegram._bot.Bot, "send_message", return_value=lambda *args, **kwargs: (args, kwargs) + ) as mocked_send_message, mocked_ask_question_api( + host=test_settings.GPT_BASE_HOST, + side_effect=Exception(), + ): + bot_update = BotUpdateFactory(message=BotMessageFactory.create_instance(text="Привет!")) + bot_update["message"].pop("entities") + + await main_application.bot_app.application.process_update( + update=Update.de_json(data=bot_update, bot=main_application.bot_app.bot) + ) + assert_that(mocked_send_message.call_args.kwargs).is_equal_to( + { + "text": "Вообще всё сломалось :(", + "chat_id": bot_update["message"]["chat"]["id"], + }, + include=["text", "chat_id"], + ) + + async def test_no_update_message( main_application: Application, test_settings: AppSettings, diff --git a/bot_microservice/tests/integration/bot/conftest.py b/bot_microservice/tests/integration/conftest.py similarity index 93% rename from bot_microservice/tests/integration/bot/conftest.py rename to bot_microservice/tests/integration/conftest.py index dc68e7e..7f29288 100644 --- a/bot_microservice/tests/integration/bot/conftest.py +++ b/bot_microservice/tests/integration/conftest.py @@ -4,19 +4,16 @@ pytest framework. A common change is to allow monkeypatching of the class member enforcing slots in the subclasses.""" import asyncio from asyncio import AbstractEventLoop -from contextlib import contextmanager from datetime import tzinfo -from typing import Any, AsyncGenerator, Iterator +from typing import Any, AsyncGenerator import pytest import pytest_asyncio -import respx -from httpx import AsyncClient, Response +from httpx import AsyncClient from pytest_asyncio.plugin import SubRequest from telegram import Bot, User from telegram.ext import Application, ApplicationBuilder, Defaults, ExtBot -from constants import CHAT_GPT_BASE_URI from core.bot import BotApplication from core.handlers import bot_event_handlers from main import Application as AppApplication @@ -255,15 +252,3 @@ async def rest_client( headers={"Content-Type": "application/json"}, ) as client: yield client - - -@contextmanager -def mocked_ask_question_api(host: str) -> Iterator[respx.MockRouter]: - with respx.mock( - assert_all_mocked=True, - assert_all_called=True, - base_url=host, - ) as respx_mock: - ask_question_route = respx_mock.post(url=CHAT_GPT_BASE_URI, name="ask_question") - ask_question_route.return_value = Response(status_code=200, text="Привет! Как я могу помочь вам сегодня?") - yield respx_mock diff --git a/bot_microservice/tests/integration/test_system.py b/bot_microservice/tests/integration/test_system.py new file mode 100644 index 0000000..e9e1e61 --- /dev/null +++ b/bot_microservice/tests/integration/test_system.py @@ -0,0 +1,56 @@ +import httpx +import pytest +from faker import Faker +from httpx import AsyncClient, Response + +from settings.config import AppSettings +from tests.integration.utils import mocked_ask_question_api + +pytestmark = [ + pytest.mark.asyncio, + pytest.mark.enable_socket, +] + + +faker = Faker() + + +async def test_bot_updates(rest_client: AsyncClient) -> None: + response = await rest_client.get("/api/healthcheck") + assert response.status_code == 200 + + +async def test_bot_healthcheck_is_ok( + rest_client: AsyncClient, + test_settings: AppSettings, +) -> None: + with mocked_ask_question_api( + host=test_settings.GPT_BASE_HOST, + return_value=Response(status_code=httpx.codes.OK, text="Привет! Как я могу помочь вам сегодня?"), + ): + response = await rest_client.get("/api/bot-healthcheck") + assert response.status_code == httpx.codes.OK + + +async def test_bot_healthcheck_invalid_request_model( + rest_client: AsyncClient, + test_settings: AppSettings, +) -> None: + with mocked_ask_question_api( + host=test_settings.GPT_BASE_HOST, + return_value=Response(status_code=httpx.codes.OK, text="Invalid request model"), + ): + response = await rest_client.get("/api/bot-healthcheck") + assert response.status_code == httpx.codes.INTERNAL_SERVER_ERROR + + +async def test_bot_healthcheck_not_ok( + rest_client: AsyncClient, + test_settings: AppSettings, +) -> None: + with mocked_ask_question_api( + host=test_settings.GPT_BASE_HOST, + side_effect=Exception(), + ): + response = await rest_client.get("/api/bot-healthcheck") + assert response.status_code == httpx.codes.INTERNAL_SERVER_ERROR diff --git a/bot_microservice/tests/integration/utils.py b/bot_microservice/tests/integration/utils.py new file mode 100644 index 0000000..808ba9e --- /dev/null +++ b/bot_microservice/tests/integration/utils.py @@ -0,0 +1,22 @@ +from contextlib import contextmanager +from typing import Any, Iterator + +import respx +from httpx import Response + +from constants import CHAT_GPT_BASE_URI + + +@contextmanager +def mocked_ask_question_api( + host: str, return_value: Response | None = None, side_effect: Any | None = None +) -> Iterator[respx.MockRouter]: + with respx.mock( + assert_all_mocked=True, + assert_all_called=True, + base_url=host, + ) as respx_mock: + ask_question_route = respx_mock.post(url=CHAT_GPT_BASE_URI, name="ask_question") + ask_question_route.return_value = return_value + ask_question_route.side_effect = side_effect + yield respx_mock diff --git a/pyproject.toml b/pyproject.toml index 90ad5ef..a4ab99a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,7 +113,7 @@ ignore = [ per-file-ignores = [ # too complex queries "bot_microservice/tests/*: S101", - "bot_microservice/tests/integration/bot/conftest.py: NEW100", + "bot_microservice/tests/integration/conftest.py: NEW100", "bot_microservice/settings/config.py: S104" ]