microservices are able to run (#5)
6
.github/workflows/check-lint.yml
vendored
@ -62,10 +62,10 @@ jobs:
|
||||
- name: Analysing the code with mypy
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
poetry run mypy app
|
||||
poetry run mypy bot_microservice
|
||||
- name: Analysing the code with flake8
|
||||
run: |
|
||||
poetry run flake8 app
|
||||
poetry run flake8 bot_microservice
|
||||
- name: Analysing code with isort
|
||||
run: |
|
||||
poetry run isort --check-only app
|
||||
poetry run isort --check-only bot_microservice
|
4
.github/workflows/poetry-test.yml
vendored
@ -66,10 +66,10 @@ jobs:
|
||||
- name: Run tests
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
poetry run pytest -vv --exitfirst
|
||||
poetry run pytest bot_microservice/ -vv --exitfirst
|
||||
- name: Coverage report
|
||||
run: |
|
||||
poetry run coverage run -m pytest
|
||||
poetry run coverage run -m pytest bot_microservice/
|
||||
poetry run coverage report
|
||||
- name: Extended checks
|
||||
run: |
|
||||
|
4
Makefile
@ -7,12 +7,12 @@ RESET := $(shell tput -Txterm sgr0)
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: help app format lint check-style check-import-sorting lint-typing lint-imports lint-complexity lint-deps
|
||||
|
||||
PY_TARGET_DIRS=app settings tests
|
||||
PY_TARGET_DIRS=bot_microservice
|
||||
PORT=8000
|
||||
|
||||
## Запустить приложение
|
||||
app:
|
||||
poetry run uvicorn --host 0.0.0.0 --factory app.main:create_app --port $(PORT) --reload --reload-dir=app --reload-dir=settings
|
||||
poetry run uvicorn --host 0.0.0.0 --factory bot_microservice.main:create_app --port $(PORT) --reload --reload-dir=bot_microservice --reload-dir=settings
|
||||
|
||||
## Отформатировать код
|
||||
format:
|
||||
|
26
README.md
@ -18,17 +18,23 @@ sudo systemctl start chat_gpt_bot.service
|
||||
```
|
||||
|
||||
## Local start
|
||||
|
||||
### Bot:
|
||||
|
||||
```bash
|
||||
cd bot_microservice
|
||||
python main.py
|
||||
```
|
||||
|
||||
```shell
|
||||
poetry run uvicorn --host 0.0.0.0 --factory app.main:create_app --port 8000 --reload --reload-dir=app --reload-dir=settings
|
||||
cd bot_microservice
|
||||
poetry run uvicorn --host 0.0.0.0 --factory main:create_app --port 8000 --reload
|
||||
```
|
||||
|
||||
- set `START_WITH_WEBHOOK` to blank
|
||||
To start on polling mode set `START_WITH_WEBHOOK` to blank
|
||||
|
||||
## Delete or set webhook manually
|
||||
|
||||
### Delete or set webhook manually
|
||||
|
||||
url: https://api.telegram.org/bot{TELEGRAM_TOKEN}/{method}Webhook?url={WEBHOOK_URL}
|
||||
|
||||
@ -37,6 +43,20 @@ methods:
|
||||
- set
|
||||
|
||||
|
||||
## Chat:
|
||||
|
||||
```shell
|
||||
cd chat_gpt_microservice
|
||||
python3 run.py
|
||||
```
|
||||
|
||||
|
||||
```bash
|
||||
cd chat_gpt_microservice
|
||||
poetry run uvicorn --host 0.0.0.0 --factory run:create_app --port 1338 --reload
|
||||
```
|
||||
|
||||
|
||||
## Tests
|
||||
|
||||
```bash
|
||||
|
@ -1,33 +0,0 @@
|
||||
name: Docker Build and Push
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU - Support for more platforms
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ secrets.DOCKER_USERNAME }}/freegpt-webui:latest
|
19
app/chat-gpt/.github/workflows/sync-hugging.yml
vendored
@ -1,19 +0,0 @@
|
||||
name: Sync with Hugging Face Hub
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Sync with Hugging Face
|
||||
uses: ramonvc/huggingface-sync-action@v0.0.1
|
||||
with:
|
||||
github_repo_id: ramonvc/freegpt-webui
|
||||
huggingface_repo_id: monra/freegpt-webui
|
||||
repo_type: space
|
||||
space_sdk: docker
|
||||
hf_token: ${{ secrets.HF_TOKEN }}
|
@ -1,11 +0,0 @@
|
||||
:8080 {
|
||||
|
||||
# Serving dynamic requests:
|
||||
reverse_proxy chat_gpt:1338
|
||||
|
||||
# Logs:
|
||||
log {
|
||||
output stdout
|
||||
}
|
||||
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt requirements.txt
|
||||
|
||||
RUN python -m venv venv
|
||||
ENV PATH="/app/venv/bin:$PATH"
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential libffi-dev cmake libcurl4-openssl-dev \
|
||||
&& pip install --upgrade pip && pip3 install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN chmod -R 777 translations
|
||||
|
||||
CMD ["python3", "./run.py"]
|
@ -1 +0,0 @@
|
||||
API_PREFIX = "/api"
|
@ -1,14 +0,0 @@
|
||||
from telegram import Update
|
||||
from telegram.ext import ContextTypes
|
||||
|
||||
|
||||
async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Send a message when the command /help is issued."""
|
||||
|
||||
if update.message:
|
||||
await update.message.reply_text(
|
||||
"Help!",
|
||||
disable_notification=True,
|
||||
api_kwargs={"text": "Hello World"},
|
||||
)
|
||||
return None
|
@ -1,23 +1,12 @@
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import ORJSONResponse
|
||||
from starlette import status
|
||||
|
||||
from settings.config import get_settings
|
||||
from starlette import status
|
||||
|
||||
router = APIRouter()
|
||||
settings = get_settings()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/healthcheck",
|
||||
name="system:healthcheck",
|
||||
status_code=status.HTTP_200_OK,
|
||||
summary="Healthcheck service",
|
||||
)
|
||||
async def healthcheck() -> ORJSONResponse:
|
||||
return ORJSONResponse(content=None, status_code=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@router.post(
|
||||
f"/{settings.TELEGRAM_API_TOKEN}",
|
||||
name="system:process_bot_updates",
|
15
bot_microservice/api/system/controllers.py
Normal file
@ -0,0 +1,15 @@
|
||||
from fastapi import APIRouter
|
||||
from fastapi.responses import ORJSONResponse
|
||||
from starlette import status
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/healthcheck",
|
||||
name="system:healthcheck",
|
||||
status_code=status.HTTP_200_OK,
|
||||
summary="Healthcheck service",
|
||||
)
|
||||
async def healthcheck() -> ORJSONResponse:
|
||||
return ORJSONResponse(content=None, status_code=status.HTTP_200_OK)
|
13
bot_microservice/constants.py
Normal file
@ -0,0 +1,13 @@
|
||||
from enum import StrEnum
|
||||
|
||||
API_PREFIX = "/api"
|
||||
CHAT_GPT_BASE_URL = "http://chat_service:1338/backend-api/v2/conversation"
|
||||
|
||||
|
||||
class LogLevelEnum(StrEnum):
|
||||
CRITICAL = "critical"
|
||||
ERROR = "error"
|
||||
WARNING = "warning"
|
||||
INFO = "info"
|
||||
DEBUG = "debug"
|
||||
NOTSET = ""
|
@ -7,12 +7,11 @@ from http import HTTPStatus
|
||||
from typing import Any
|
||||
|
||||
from fastapi import Request, Response
|
||||
from loguru import logger
|
||||
from settings.config import AppSettings
|
||||
from telegram import Update
|
||||
from telegram.ext import Application
|
||||
|
||||
from app.core.utils import logger
|
||||
from settings.config import AppSettings
|
||||
|
||||
|
||||
class BotApplication:
|
||||
def __init__(
|
74
bot_microservice/core/commands.py
Normal file
@ -0,0 +1,74 @@
|
||||
import random
|
||||
import tempfile
|
||||
from uuid import uuid4
|
||||
|
||||
import httpx
|
||||
from constants import CHAT_GPT_BASE_URL
|
||||
from core.utils import convert_file_to_wav
|
||||
from httpx import AsyncClient, AsyncHTTPTransport
|
||||
from loguru import logger
|
||||
from telegram import Update
|
||||
from telegram.ext import ContextTypes
|
||||
|
||||
|
||||
async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Send a message when the command /help is issued."""
|
||||
|
||||
if update.message:
|
||||
await update.message.reply_text(
|
||||
"Help!",
|
||||
disable_notification=True,
|
||||
api_kwargs={"text": "Hello World"},
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
async def ask_question(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
await update.message.reply_text( # type: ignore[union-attr]
|
||||
"Пожалуйста подождите, ответ в среднем занимает 10-15 секунд"
|
||||
)
|
||||
|
||||
chat_gpt_request = {
|
||||
"conversation_id": str(uuid4()),
|
||||
"action": "_ask",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"jailbreak": "default",
|
||||
"meta": {
|
||||
"id": random.randint(10**18, 10**19 - 1), # noqa: S311
|
||||
"content": {
|
||||
"conversation": [],
|
||||
"internet_access": False,
|
||||
"content_type": "text",
|
||||
"parts": [{"content": update.message.text, "role": "user"}], # type: ignore[union-attr]
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
transport = AsyncHTTPTransport(retries=1)
|
||||
async with AsyncClient(transport=transport) as client:
|
||||
try:
|
||||
response = await client.post(CHAT_GPT_BASE_URL, json=chat_gpt_request)
|
||||
status = response.status_code
|
||||
if status != httpx.codes.OK:
|
||||
logger.info(f'got response status: {status} from chat api', data=chat_gpt_request)
|
||||
await update.message.reply_text( # type: ignore[union-attr]
|
||||
"Что-то пошло не так, попробуйте еще раз или обратитесь к администратору"
|
||||
)
|
||||
return
|
||||
|
||||
data = response.json()
|
||||
await update.message.reply_text(data) # type: ignore[union-attr]
|
||||
except Exception as error:
|
||||
logger.error("error get data from chat api", error=error)
|
||||
await update.message.reply_text("Вообще всё сломалось :(") # type: ignore[union-attr]
|
||||
|
||||
|
||||
async def voice_recognize(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
await update.message.reply_text( # type: ignore[union-attr]
|
||||
"Пожалуйста, ожидайте :)\nТрехминутная запись обрабатывается примерно 30 секунд"
|
||||
)
|
||||
sound_bytes = await update.message.voice.get_file() # type: ignore[union-attr]
|
||||
sound_bytes = await sound_bytes.download_as_bytearray()
|
||||
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
|
||||
tmpfile.write(sound_bytes)
|
||||
convert_file_to_wav(tmpfile.name)
|
@ -1,9 +1,8 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from telegram.ext import CommandHandler
|
||||
|
||||
from app.core.commands import help_command
|
||||
from core.commands import ask_question, help_command, voice_recognize
|
||||
from telegram.ext import CommandHandler, MessageHandler, filters
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -16,5 +15,6 @@ class CommandHandlers:
|
||||
|
||||
command_handlers = CommandHandlers()
|
||||
|
||||
|
||||
command_handlers.add_handler(CommandHandler("help", help_command))
|
||||
command_handlers.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, ask_question))
|
||||
command_handlers.add_handler(MessageHandler(filters.VOICE | filters.AUDIO, voice_recognize))
|
102
bot_microservice/core/logging.py
Normal file
@ -0,0 +1,102 @@
|
||||
import logging
|
||||
import sys
|
||||
from types import FrameType
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from constants import LogLevelEnum
|
||||
from loguru import logger
|
||||
from sentry_sdk.integrations.logging import EventHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from loguru import Record
|
||||
else:
|
||||
Record = dict[str, Any]
|
||||
|
||||
|
||||
class InterceptHandler(logging.Handler):
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
# Get corresponding Loguru level if it exists
|
||||
try:
|
||||
level = logger.level(record.levelname).name
|
||||
except ValueError:
|
||||
level = str(record.levelno)
|
||||
|
||||
# Find caller from where originated the logged message
|
||||
frame, depth = logging.currentframe(), 2
|
||||
while frame.f_code.co_filename == logging.__file__:
|
||||
frame = cast(FrameType, frame.f_back)
|
||||
depth += 1
|
||||
|
||||
logger.opt(depth=depth, exception=record.exc_info).log(
|
||||
level,
|
||||
record.getMessage(),
|
||||
)
|
||||
|
||||
|
||||
def configure_logging(*, level: LogLevelEnum, enable_json_logs: bool, enable_sentry_logs: bool) -> None:
|
||||
logging_level = level.name
|
||||
|
||||
intercept_handler = InterceptHandler()
|
||||
|
||||
logging.basicConfig(handlers=[intercept_handler], level=logging_level)
|
||||
|
||||
formatter = _json_formatter if enable_json_logs else _text_formatter
|
||||
logger.configure(
|
||||
handlers=[
|
||||
{
|
||||
"sink": sys.stdout,
|
||||
"level": logging_level,
|
||||
"serialize": enable_json_logs,
|
||||
"format": formatter,
|
||||
"colorize": True,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# sentry sdk не умеет из коробки работать с loguru, нужно добавлять хандлер
|
||||
# https://github.com/getsentry/sentry-python/issues/653#issuecomment-788854865
|
||||
# https://forum.sentry.io/t/changing-issue-title-when-logging-with-traceback/446
|
||||
if enable_sentry_logs:
|
||||
handler = EventHandler(level=logging.WARNING)
|
||||
logger.add(handler, diagnose=True, level=logging.WARNING, format=_sentry_formatter)
|
||||
|
||||
|
||||
def _json_formatter(record: Record) -> str:
|
||||
# Обрезаем `\n` в конце логов, т.к. в json формате переносы не нужны
|
||||
return record.get("message", "").strip()
|
||||
|
||||
|
||||
def _sentry_formatter(record: Record) -> str:
|
||||
return "{name}:{function} {message}"
|
||||
|
||||
|
||||
def _text_formatter(record: Record) -> str:
|
||||
# WARNING !!!
|
||||
# Функция должна возвращать строку, которая содержит только шаблоны для форматирования.
|
||||
# Если в строку прокидывать значения из record (или еще откуда-либо),
|
||||
# то loguru может принять их за f-строки и попытается обработать, что приведет к ошибке.
|
||||
# Например, если нужно достать какое-то значение из поля extra, вместо того чтобы прокидывать его в строку формата,
|
||||
# нужно прокидывать подстроку вида {extra[тут_ключ]}
|
||||
|
||||
# Стандартный формат loguru. Задается через env LOGURU_FORMAT
|
||||
format_ = (
|
||||
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
|
||||
"<level>{level: <8}</level> | "
|
||||
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
|
||||
)
|
||||
|
||||
# Добавляем мета параметры по типу user_id, art_id, которые передаются через logger.bind(...)
|
||||
extra = record["extra"]
|
||||
if extra:
|
||||
formatted = ", ".join(f"{key}" + "={extra[" + str(key) + "]}" for key, value in extra.items())
|
||||
format_ += f" - <cyan>{formatted}</cyan>"
|
||||
|
||||
format_ += "\n"
|
||||
|
||||
if record["exception"] is not None:
|
||||
format_ += "{exception}\n"
|
||||
|
||||
return format_
|
||||
|
||||
|
||||
configure_logging(level=LogLevelEnum.DEBUG, enable_json_logs=True, enable_sentry_logs=True)
|
@ -1,19 +1,9 @@
|
||||
import sys
|
||||
import subprocess # noqa
|
||||
from datetime import datetime, timedelta
|
||||
from functools import lru_cache, wraps
|
||||
from typing import Any
|
||||
|
||||
from loguru import logger as loguru_logger
|
||||
|
||||
logger = loguru_logger
|
||||
|
||||
logger.remove()
|
||||
logger.add(
|
||||
sink=sys.stdout,
|
||||
colorize=True,
|
||||
level='DEBUG',
|
||||
format="<cyan>{time:DD.MM.YYYY HH:mm:ss}</cyan> | <level>{level}</level> | <magenta>{message}</magenta>",
|
||||
)
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def timed_cache(**timedelta_kwargs: Any) -> Any:
|
||||
@ -35,3 +25,15 @@ def timed_cache(**timedelta_kwargs: Any) -> Any:
|
||||
return _wrapped
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def convert_file_to_wav(filename: str) -> str:
|
||||
new_filename = filename + '.wav'
|
||||
|
||||
cmd = ['ffmpeg', '-loglevel', 'quiet', '-i', filename, '-vn', new_filename]
|
||||
|
||||
try:
|
||||
subprocess.run(args=cmd) # noqa: S603
|
||||
except Exception as error:
|
||||
logger.error("cant convert voice: reason", error=error)
|
||||
return new_filename
|
@ -1,12 +1,12 @@
|
||||
import asyncio
|
||||
from functools import cached_property
|
||||
|
||||
import sentry_sdk
|
||||
from core.bot import BotApplication, BotQueue
|
||||
from core.handlers import command_handlers
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import UJSONResponse
|
||||
|
||||
from app.core.bot import BotApplication, BotQueue
|
||||
from app.core.handlers import command_handlers
|
||||
from app.routers import api_router
|
||||
from routers import api_router
|
||||
from settings.config import AppSettings, get_settings
|
||||
|
||||
|
||||
@ -28,6 +28,14 @@ class Application:
|
||||
self.app.include_router(api_router)
|
||||
self.configure_hooks()
|
||||
|
||||
if settings.SENTRY_DSN is not None:
|
||||
sentry_sdk.init(
|
||||
dsn=settings.SENTRY_DSN, # type: ignore[arg-type]
|
||||
environment=settings.DEPLOY_ENVIRONMENT,
|
||||
traces_sample_rate=settings.SENTRY_TRACES_SAMPLE_RATE,
|
||||
send_client_reports=False,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def fastapi_app(self) -> FastAPI:
|
||||
return self.app
|
||||
@ -63,7 +71,7 @@ def main() -> None:
|
||||
|
||||
"""Entrypoint of the application."""
|
||||
uvicorn.run(
|
||||
"app.main:create_app",
|
||||
"main:create_app",
|
||||
workers=app.state.settings.WORKERS_COUNT,
|
||||
host=app.state.settings.APP_HOST,
|
||||
port=app.state.settings.APP_PORT,
|
@ -1,7 +1,7 @@
|
||||
from api.bot.controllers import router as bot_router
|
||||
from api.system.controllers import router as system_router
|
||||
from fastapi import APIRouter
|
||||
from fastapi.responses import ORJSONResponse
|
||||
|
||||
from app.api.system.controllers import router as system_router
|
||||
from settings.config import get_settings
|
||||
|
||||
settings = get_settings()
|
||||
@ -13,3 +13,4 @@ api_router = APIRouter(
|
||||
|
||||
|
||||
api_router.include_router(system_router, tags=["system"])
|
||||
api_router.include_router(bot_router, tags=["bot"])
|
@ -4,6 +4,7 @@ APP_HOST="0.0.0.0"
|
||||
APP_PORT="8000"
|
||||
|
||||
USER="web"
|
||||
TZ="Europe/Moscow"
|
||||
|
||||
TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890"
|
||||
|
@ -4,6 +4,7 @@ APP_HOST="0.0.0.0"
|
||||
APP_PORT="8000"
|
||||
|
||||
USER="web"
|
||||
TZ="Europe/Moscow"
|
||||
|
||||
TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890"
|
||||
|
@ -3,7 +3,11 @@ STAGE="dev"
|
||||
APP_HOST="0.0.0.0"
|
||||
APP_PORT="8000"
|
||||
|
||||
# SENTRY_DSN=
|
||||
SENTRY_TRACES_SAMPLE_RATE="0.95"
|
||||
|
||||
USER="web"
|
||||
TZ="Europe/Moscow"
|
||||
|
||||
TELEGRAM_API_TOKEN="123456789:AABBCCDDEEFFaabbccddeeff-1234567890"
|
||||
|
@ -2,11 +2,11 @@ from functools import cached_property
|
||||
from os import environ
|
||||
from pathlib import Path
|
||||
|
||||
from constants import API_PREFIX
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import HttpUrl
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
from app.constants import API_PREFIX
|
||||
|
||||
BASE_DIR = Path(__file__).parent.parent
|
||||
SHARED_DIR = BASE_DIR.resolve().joinpath("shared")
|
||||
SHARED_DIR.mkdir(exist_ok=True)
|
||||
@ -25,7 +25,13 @@ if environ.get("STAGE") == "runtests":
|
||||
load_dotenv(env_path, override=True)
|
||||
|
||||
|
||||
class AppSettings(BaseSettings):
|
||||
class SentrySettings(BaseSettings):
|
||||
SENTRY_DSN: HttpUrl | None = None
|
||||
DEPLOY_ENVIRONMENT: str | None = None
|
||||
SENTRY_TRACES_SAMPLE_RATE: float = 0.95
|
||||
|
||||
|
||||
class AppSettings(SentrySettings, BaseSettings):
|
||||
"""Application settings."""
|
||||
|
||||
PROJECT_NAME: str = "chat gpt bot"
|
@ -9,16 +9,15 @@ from typing import Any, AsyncGenerator
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from core.bot import BotApplication
|
||||
from core.handlers import command_handlers
|
||||
from fastapi import FastAPI
|
||||
from httpx import AsyncClient
|
||||
from main import Application as AppApplication
|
||||
from pytest_asyncio.plugin import SubRequest
|
||||
from settings.config import AppSettings, get_settings
|
||||
from telegram import Bot, User
|
||||
from telegram.ext import Application, ApplicationBuilder, Defaults, ExtBot
|
||||
|
||||
from app.core.bot import BotApplication
|
||||
from app.core.handlers import command_handlers
|
||||
from app.main import Application as AppApplication
|
||||
from settings.config import AppSettings, get_settings
|
||||
from tests.integration.bot.networking import NonchalantHttpxRequest
|
||||
from tests.integration.factories.bot import BotInfoFactory
|
||||
|
@ -5,11 +5,10 @@ from typing import Any
|
||||
|
||||
import pytest
|
||||
from assertpy import assert_that
|
||||
from core.bot import BotApplication, BotQueue
|
||||
from faker import Faker
|
||||
from httpx import AsyncClient
|
||||
|
||||
from app.core.bot import BotApplication, BotQueue
|
||||
from app.main import Application
|
||||
from main import Application
|
||||
from tests.integration.bot.networking import MockedRequest
|
||||
from tests.integration.factories.bot import (
|
||||
BotChatFactory,
|
@ -2,7 +2,6 @@ import string
|
||||
|
||||
import factory
|
||||
from faker import Faker
|
||||
|
||||
from tests.integration.factories.models import Chat, User
|
||||
|
||||
faker = Faker("ru_RU")
|
Before Width: | Height: | Size: 9.3 KiB After Width: | Height: | Size: 9.3 KiB |
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 35 KiB |
Before Width: | Height: | Size: 8.6 KiB After Width: | Height: | Size: 8.6 KiB |
Before Width: | Height: | Size: 536 B After Width: | Height: | Size: 536 B |
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 9.4 KiB After Width: | Height: | Size: 9.4 KiB |
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 2.0 KiB |
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
@ -4,5 +4,5 @@
|
||||
"port": 1338,
|
||||
"debug": false
|
||||
},
|
||||
"url_prefix": ""
|
||||
"url_prefix": "/gpt"
|
||||
}
|
@ -1,6 +1,5 @@
|
||||
import os
|
||||
|
||||
from ..typing import get_type_hints
|
||||
from typing import get_type_hints
|
||||
|
||||
url = None
|
||||
model = None
|
@ -1,9 +1,8 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://aiservice.vercel.app/api/chat/answer"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = False
|
@ -1,10 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://hteyun.com"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
@ -4,10 +4,10 @@ import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Dict, get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import Dict, get_type_hints, sha256
|
||||
from g4f.typing import sha256
|
||||
|
||||
url: str = "https://ai.ls"
|
||||
model: str = "gpt-3.5-turbo"
|
@ -2,12 +2,11 @@ import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
from typing import get_type_hints
|
||||
|
||||
import browser_cookie3
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://bard.google.com"
|
||||
model = ["Palm2"]
|
||||
supports_stream = False
|
@ -4,13 +4,12 @@ import os
|
||||
import random
|
||||
import ssl
|
||||
import uuid
|
||||
from typing import get_type_hints
|
||||
|
||||
import aiohttp
|
||||
import certifi
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://bing.com/chat"
|
||||
model = ["gpt-4"]
|
||||
supports_stream = True
|
@ -1,10 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://v.chatfree.cc"
|
||||
model = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
|
||||
supports_stream = False
|
@ -1,10 +1,9 @@
|
||||
import os
|
||||
import re
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://chatgpt.ai/gpt-4/"
|
||||
model = ["gpt-4"]
|
||||
supports_stream = True
|
@ -1,11 +1,10 @@
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://chatgptlogin.ac"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = False
|
@ -2,11 +2,10 @@ import hashlib
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://deepai.org"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
@ -1,10 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://free.easychat.work"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
@ -1,10 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://gpt4.ezchat.top"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
@ -1,10 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://forefront.com"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
@ -1,12 +1,11 @@
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://chat.getgpt.world/"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
@ -1,9 +1,8 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://gpt4.xunika.uk/"
|
||||
model = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613"]
|
||||
supports_stream = True
|
@ -1,11 +1,10 @@
|
||||
import os
|
||||
from json import loads
|
||||
from typing import get_type_hints
|
||||
from uuid import uuid4
|
||||
|
||||
from requests import Session
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://gpt-gm.h2o.ai"
|
||||
model = ["falcon-40b", "falcon-7b", "llama-13b"]
|
||||
supports_stream = True
|
@ -1,9 +1,8 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://liaobots.com"
|
||||
model = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"]
|
||||
supports_stream = True
|
@ -1,10 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "http://supertest.lockchat.app"
|
||||
model = ["gpt-4", "gpt-3.5-turbo"]
|
||||
supports_stream = True
|
@ -1,9 +1,8 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import get_type_hints
|
||||
|
||||
url = "https://mishalsgpt.vercel.app"
|
||||
model = ["gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo"]
|
||||
supports_stream = True
|