mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-12-15 16:10:39 +03:00
remove unused providers (#93)
This commit is contained in:
@@ -58,13 +58,9 @@ class ChatGptModelsEnum(StrEnum):
|
||||
Llama_2_70b_chat_hf_stream_DeepInfra = "Llama-2-70b-chat-hf-stream-DeepInfra"
|
||||
gpt_4_stream_aivvm = "gpt-4-stream-aivvm"
|
||||
llama2_70B = "llama2-70B"
|
||||
gpt_3_5_turbo_gptChatly = "gpt-3.5-turbo-gptChatly"
|
||||
gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin"
|
||||
gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt"
|
||||
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
|
||||
gpt_3_5_turbo_stream_fakeGpt = "gpt-3.5-turbo-stream-fakeGpt"
|
||||
gpt_3_5_turbo_stream_aura = "gpt-3.5-turbo-stream-aura"
|
||||
gpt_3_5_turbo_stream_geminiProChat = "gpt-3.5-turbo-stream-geminiProChat"
|
||||
gpt_3_5_turbo_stream_flowgpt = "gpt-3.5-turbo-stream-flowgpt"
|
||||
|
||||
@classmethod
|
||||
@@ -83,8 +79,6 @@ class ChatGptModelsEnum(StrEnum):
|
||||
priority = 3
|
||||
case "gpt-3.5-turbo-stream-GeekGpt":
|
||||
priority = 2
|
||||
case "gpt-3.5-turbo-stream-fakeGpt":
|
||||
priority = 2
|
||||
fields = {"model": model, "priority": priority}
|
||||
models.append(fields)
|
||||
return models
|
||||
@@ -92,9 +86,9 @@ class ChatGptModelsEnum(StrEnum):
|
||||
@staticmethod
|
||||
def _deprecated() -> set[str]:
|
||||
return {
|
||||
"gpt-3.5-turbo-stream-gptforlove",
|
||||
"gpt-3.5-turbo-stream-gptalk",
|
||||
"gpt-3.5-turbo-stream-ChatForAi",
|
||||
"gpt-4-ChatGpt4Online",
|
||||
"gpt-3.5-turbo--stream-gptTalkRu",
|
||||
"gpt-3.5-turbo-stream-GeekGpt",
|
||||
"gpt-3.5-turbo-gptChatly",
|
||||
"gpt-3.5-turbo-stream-fakeGpt",
|
||||
"gpt-3.5-turbo-stream-aura",
|
||||
"gpt-3.5-turbo-stream-geminiProChat",
|
||||
}
|
||||
|
||||
@@ -80,11 +80,11 @@ class BotQueue:
|
||||
self.queue.put_nowait(tg_update)
|
||||
return Response(status_code=HTTPStatus.ACCEPTED)
|
||||
|
||||
async def get_updates_from_queue(self) -> None:
|
||||
async def get_updates_from_queue(self, wait_on_each_update: int = 0) -> None:
|
||||
while True:
|
||||
update = await self.queue.get()
|
||||
await asyncio.create_task(self.bot_app.application.process_update(update))
|
||||
await sleep(0)
|
||||
asyncio.create_task(self.bot_app.application.process_update(update))
|
||||
await sleep(wait_on_each_update)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
|
||||
@@ -82,5 +82,5 @@ class MockedRequest:
|
||||
def __init__(self, data: dict[str, Any]) -> None:
|
||||
self.data = data
|
||||
|
||||
async def json(self) -> dict[str, Any]:
|
||||
def json(self) -> dict[str, Any]:
|
||||
return self.data
|
||||
|
||||
@@ -65,6 +65,19 @@ async def test_bot_queue(
|
||||
assert bot_queue.queue.empty()
|
||||
|
||||
|
||||
async def test_get_update_from_bot_queue(
|
||||
bot: BotApplication,
|
||||
) -> None:
|
||||
bot_queue = BotQueue(bot_app=bot)
|
||||
asyncio.create_task(bot_queue.get_updates_from_queue())
|
||||
|
||||
bot_update = BotUpdateFactory(message=BotMessageFactory.create_instance(text="/help"))
|
||||
mocked_request = MockedRequest(bot_update)
|
||||
await bot_queue.put_updates_on_queue(mocked_request) # type: ignore
|
||||
update = await bot_queue.queue.get()
|
||||
assert update.json() == mocked_request.json()
|
||||
|
||||
|
||||
async def test_no_update_message(
|
||||
main_application: Application,
|
||||
test_settings: AppSettings,
|
||||
|
||||
Reference in New Issue
Block a user