diff --git a/README.md b/README.md index aa98618..1cf443a 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ git pull balshgit main sudo rsync -a --delete --progress /home/balsh/Pycharmprojects/gpt_chat_bot/* /opt/gpt_chat_bot/ --exclude .git cd /opt/gpt_chat_bot/ docker pull balshdocker/freegpt -STAGE=production docker compose build +docker compose build sudo systemctl stop gptchatbot.service sudo systemctl start gptchatbot.service ``` diff --git a/bot_microservice/core/commands.py b/bot_microservice/core/commands.py index a218158..a5adbf3 100644 --- a/bot_microservice/core/commands.py +++ b/bot_microservice/core/commands.py @@ -68,6 +68,7 @@ async def ask_question(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No await update.message.reply_text("Пожалуйста подождите, ответ в среднем занимает 10-15 секунд") chat_gpt_service = ChatGptService(chat_gpt_model=settings.GPT_MODEL) + logger.warning("question asked", user=update.message.from_user, question=update.message.text) answer = await chat_gpt_service.request_to_chatgpt(question=update.message.text) await update.message.reply_text(answer) diff --git a/bot_microservice/core/logging.py b/bot_microservice/core/logging.py index cc3a810..a43b3d5 100644 --- a/bot_microservice/core/logging.py +++ b/bot_microservice/core/logging.py @@ -1,4 +1,5 @@ import logging +import os import sys from types import FrameType from typing import TYPE_CHECKING, Any, cast @@ -8,7 +9,7 @@ from loguru import logger from sentry_sdk.integrations.logging import EventHandler from constants import LogLevelEnum -from settings.config import settings +from settings.config import DIR_LOGS, settings if TYPE_CHECKING: from loguru import Record @@ -36,39 +37,37 @@ class InterceptHandler(logging.Handler): ) -def configure_logging(*, level: LogLevelEnum, enable_json_logs: bool, enable_sentry_logs: bool) -> None: - logging_level = level.name - +def configure_logging( + *, level: LogLevelEnum, enable_json_logs: bool, enable_sentry_logs: bool, log_to_file: str | None = None +) -> None: intercept_handler = InterceptHandler() formatter = _json_formatter if enable_json_logs else _text_formatter base_config_handlers = [intercept_handler] + base_loguru_handler = { + "level": level.name, + "serialize": enable_json_logs, + "format": formatter, + "colorize": False, + } loguru_handlers = [ - { - "sink": sys.stdout, - "level": logging_level, - "serialize": enable_json_logs, - "format": formatter, - "colorize": True, - } + {**base_loguru_handler, "colorize": True, "sink": sys.stdout}, ] - if settings.GRAYLOG_HOST: - graylog_handler = graypy.GELFTCPHandler(settings.GRAYLOG_HOST, 12201) + if settings.GRAYLOG_HOST and settings.GRAYLOG_PORT: + graylog_handler = graypy.GELFUDPHandler(settings.GRAYLOG_HOST, settings.GRAYLOG_PORT) base_config_handlers.append(graylog_handler) - loguru_handlers.append( - { - "sink": graylog_handler, - "level": logging_level, - "serialize": enable_json_logs, - "format": formatter, - "colorize": False, - } - ) + loguru_handlers.append({**base_loguru_handler, "sink": graylog_handler}) + if log_to_file: + file_path = os.path.join(DIR_LOGS, log_to_file) + if not os.path.exists(log_to_file): + with open(file_path, 'w') as f: + f.write('') + loguru_handlers.append({**base_loguru_handler, "sink": file_path}) - logging.basicConfig(handlers=base_config_handlers, level=logging_level) + logging.basicConfig(handlers=base_config_handlers, level=level.name) logger.configure(handlers=loguru_handlers) # sentry sdk не умеет из коробки работать с loguru, нужно добавлять хандлер diff --git a/bot_microservice/main.py b/bot_microservice/main.py index 3ec940a..41d5ad7 100644 --- a/bot_microservice/main.py +++ b/bot_microservice/main.py @@ -30,11 +30,16 @@ class Application: self.app.include_router(api_router) self.configure_hooks() - configure_logging(level=LogLevelEnum.INFO, enable_json_logs=True, enable_sentry_logs=True) + configure_logging( + level=LogLevelEnum.INFO, + enable_json_logs=settings.ENABLE_JSON_LOGS, + enable_sentry_logs=settings.ENABLE_SENTRY_LOGS, + log_to_file=settings.LOG_TO_FILE, + ) if settings.SENTRY_DSN is not None: sentry_sdk.init( - dsn=settings.SENTRY_DSN, # type: ignore[arg-type] + dsn=settings.SENTRY_DSN, environment=settings.DEPLOY_ENVIRONMENT, traces_sample_rate=settings.SENTRY_TRACES_SAMPLE_RATE, send_client_reports=False, diff --git a/bot_microservice/settings/.env.ci.runtests b/bot_microservice/settings/.env.ci.runtests index c0218ec..bccd02a 100644 --- a/bot_microservice/settings/.env.ci.runtests +++ b/bot_microservice/settings/.env.ci.runtests @@ -1,24 +1,20 @@ STAGE="runtests" +# ==== start app settings ==== APP_HOST="0.0.0.0" APP_PORT="8000" -USER="web" -TZ="Europe/Moscow" - +# ==== telegram settings ==== TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890" - -# webhook settings -DOMAIN="http://localhost" -URL_PREFIX= - -GPT_BASE_HOST="http://localhost" - # set to true to start with webhook. Else bot will start on polling method START_WITH_WEBHOOK="false" -# quantity of workers for uvicorn -WORKERS_COUNT=1 -# Enable uvicorn reloading -RELOAD="true" -DEBUG="true" +# ==== domain settings ==== +DOMAIN="http://localhost" +URL_PREFIX= + +# ==== gpt settings ==== +GPT_BASE_HOST="http://localhost" + +# ==== other settings ==== +USER="web" \ No newline at end of file diff --git a/bot_microservice/settings/.env.local.runtests b/bot_microservice/settings/.env.local.runtests index c0218ec..bccd02a 100644 --- a/bot_microservice/settings/.env.local.runtests +++ b/bot_microservice/settings/.env.local.runtests @@ -1,24 +1,20 @@ STAGE="runtests" +# ==== start app settings ==== APP_HOST="0.0.0.0" APP_PORT="8000" -USER="web" -TZ="Europe/Moscow" - +# ==== telegram settings ==== TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890" - -# webhook settings -DOMAIN="http://localhost" -URL_PREFIX= - -GPT_BASE_HOST="http://localhost" - # set to true to start with webhook. Else bot will start on polling method START_WITH_WEBHOOK="false" -# quantity of workers for uvicorn -WORKERS_COUNT=1 -# Enable uvicorn reloading -RELOAD="true" -DEBUG="true" +# ==== domain settings ==== +DOMAIN="http://localhost" +URL_PREFIX= + +# ==== gpt settings ==== +GPT_BASE_HOST="http://localhost" + +# ==== other settings ==== +USER="web" \ No newline at end of file diff --git a/bot_microservice/settings/.env.template b/bot_microservice/settings/.env.template index c025a26..c106850 100644 --- a/bot_microservice/settings/.env.template +++ b/bot_microservice/settings/.env.template @@ -1,35 +1,43 @@ STAGE="dev" +# ==== start app settings ==== APP_HOST="0.0.0.0" APP_PORT="8000" - -# SENTRY_DSN= -SENTRY_TRACES_SAMPLE_RATE="0.95" - -# GRAYLOG_HOST= - -USER="web" -TZ="Europe/Moscow" - -TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890" - -# webhook settings -DOMAIN="https://mydomain.com" -URL_PREFIX="/gpt" - -GPT_BASE_HOST="http://chat_service:8858" - -# set to true to start with webhook. Else bot will start on polling method -START_WITH_WEBHOOK="false" - # quantity of workers for uvicorn WORKERS_COUNT=1 # Enable uvicorn reloading RELOAD="true" DEBUG="true" +# ==== sentry ==== +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE="0.95" +DEPLOY_ENVIRONMENT="stage" + +# ==== logs ====: +ENABLE_JSON_LOGS="true" +ENABLE_SENTRY_LOGS="false" +GRAYLOG_HOST= +GRAYLOG_PORT= +LOG_TO_FILE="example.log" + +# ==== telegram settings ==== +TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890" +# set to true to start with webhook. Else bot will start on polling method +START_WITH_WEBHOOK="false" + +# ==== domain settings ==== +DOMAIN="https://mydomain.com" +URL_PREFIX="/gpt" + +# ==== gpt settings ==== +GPT_BASE_HOST="http://chat_service:8858" GPT_MODEL="gpt-3.5-turbo-stream-DeepAi" +# ==== other settings ==== +USER="web" +TZ="Europe/Moscow" + # "gpt-3.5-turbo-stream-openai" # "gpt-3.5-turbo-Aichat" # "gpt-4-ChatgptAi" diff --git a/bot_microservice/settings/config.py b/bot_microservice/settings/config.py index 2b13707..68804d5 100644 --- a/bot_microservice/settings/config.py +++ b/bot_microservice/settings/config.py @@ -4,7 +4,7 @@ from pathlib import Path from typing import Any from dotenv import load_dotenv -from pydantic import HttpUrl, ValidationInfo, field_validator +from pydantic import model_validator from pydantic_settings import BaseSettings from constants import API_PREFIX @@ -28,7 +28,7 @@ load_dotenv(env_path, override=True) class SentrySettings(BaseSettings): - SENTRY_DSN: HttpUrl | None = None + SENTRY_DSN: str | None = None DEPLOY_ENVIRONMENT: str | None = None SENTRY_TRACES_SAMPLE_RATE: float = 0.95 @@ -41,6 +41,10 @@ class AppSettings(SentrySettings, BaseSettings): APP_PORT: int = 8000 STAGE: str = "dev" DEBUG: bool = False + # quantity of workers for uvicorn + WORKERS_COUNT: int = 1 + # Enable uvicorn reloading + RELOAD: bool = False TELEGRAM_API_TOKEN: str = "123456789:AABBCCDDEEFFaabbccddeeff-1234567890" # webhook settings @@ -48,21 +52,28 @@ class AppSettings(SentrySettings, BaseSettings): DOMAIN: str = "https://localhost" URL_PREFIX: str = "" - GRAYLOG_HOST: str | None = None - - GPT_MODEL: str = "gpt-3.5-turbo-stream-AItianhuSpace" + # ==== gpt settings ==== + GPT_MODEL: str = "gpt-3.5-turbo-stream-DeepAi" GPT_BASE_HOST: str = "http://chat_service:8858" - # quantity of workers for uvicorn - WORKERS_COUNT: int = 1 - # Enable uvicorn reloading - RELOAD: bool = False + ENABLE_JSON_LOGS: bool = True + ENABLE_SENTRY_LOGS: bool = False + GRAYLOG_HOST: str | None = None + GRAYLOG_PORT: int | None = None + LOG_TO_FILE: str | None = None - @field_validator("START_WITH_WEBHOOK") - def star_with_webhook_validator(cls, field_value: Any, info: ValidationInfo) -> Any: - if field_value == "false": - return False - return field_value + @model_validator(mode="before") # type: ignore[arg-type] + def validate_boolean_fields(self) -> Any: + for value in ( + "ENABLE_JSON_LOGS", + "ENABLE_SENTRY_LOGS", + "START_WITH_WEBHOOK", + "RELOAD", + "DEBUG", + ): + if self.get(value).lower() == "false": # type: ignore[attr-defined] + self[value] = False # type: ignore[index] + return self @cached_property def api_prefix(self) -> str: