update chat service (#31)

* rename chatgpt service

* add zeus tool for new provider

* add zeus tool for new provider

* update chat service

* update README.md
This commit is contained in:
Dmitry Afanasyev 2023-10-10 23:22:41 +03:00 committed by GitHub
parent f6f3865fb6
commit e9f76d0ea9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 8095 additions and 969 deletions

View File

@ -57,7 +57,8 @@ methods:
## Chat: ## Chat:
```shell ```shell
docker run -p 8858:8858 -it --name freegpt --rm -e CHAT_PATH=/chat balshdocker/freegpt:latest docker run --rm --net=host --name freegpt --rm -e CHAT_PATH=/chat balshdocker/freegpt:latest
docker run --rm --net=host --name zeus --rm balshdocker/freegpt-zeus:latest
``` ```
Open http://localhost:8858/chat/ Open http://localhost:8858/chat/

View File

@ -30,7 +30,7 @@ def get_database(settings: AppSettings = Depends(get_settings)) -> Database:
return Database(settings=settings) return Database(settings=settings)
def get_chat_gpt_repository( def get_chatgpt_repository(
db: Database = Depends(get_database), settings: AppSettings = Depends(get_settings) db: Database = Depends(get_database), settings: AppSettings = Depends(get_settings)
) -> ChatGPTRepository: ) -> ChatGPTRepository:
return ChatGPTRepository(settings=settings, db=db) return ChatGPTRepository(settings=settings, db=db)
@ -41,6 +41,6 @@ def new_bot_queue(bot_app: BotApplication = Depends(get_bot_app)) -> BotQueue:
def get_chatgpt_service( def get_chatgpt_service(
chat_gpt_repository: ChatGPTRepository = Depends(get_chat_gpt_repository), chatgpt_repository: ChatGPTRepository = Depends(get_chatgpt_repository),
) -> ChatGptService: ) -> ChatGptService:
return ChatGptService(repository=chat_gpt_repository) return ChatGptService(repository=chatgpt_repository)

View File

@ -3,7 +3,7 @@ from enum import StrEnum, unique
AUDIO_SEGMENT_DURATION = 120 * 1000 AUDIO_SEGMENT_DURATION = 120 * 1000
API_PREFIX = "/api" API_PREFIX = "/api"
CHAT_GPT_BASE_URI = "/backend-api/v2/conversation" CHATGPT_BASE_URI = "/backend-api/v2/conversation"
INVALID_GPT_REQUEST_MESSAGES = ("Invalid request model", "return unexpected http status code") INVALID_GPT_REQUEST_MESSAGES = ("Invalid request model", "return unexpected http status code")
@ -31,16 +31,12 @@ class LogLevelEnum(StrEnum):
@unique @unique
class ChatGptModelsEnum(StrEnum): class ChatGptModelsEnum(StrEnum):
gpt_3_5_turbo_stream_openai = "gpt-3.5-turbo-stream-openai" gpt_3_5_turbo_stream_openai = "gpt-3.5-turbo-stream-openai"
gpt_3_5_turbo_Aichat = "gpt-3.5-turbo-Aichat"
gpt_4_ChatgptAi = "gpt-4-ChatgptAi" gpt_4_ChatgptAi = "gpt-4-ChatgptAi"
gpt_3_5_turbo_weWordle = "gpt-3.5-turbo-weWordle" gpt_3_5_turbo_weWordle = "gpt-3.5-turbo-weWordle"
gpt_3_5_turbo_acytoo = "gpt-3.5-turbo-acytoo"
gpt_3_5_turbo_stream_DeepAi = "gpt-3.5-turbo-stream-DeepAi" gpt_3_5_turbo_stream_DeepAi = "gpt-3.5-turbo-stream-DeepAi"
gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o"
gpt_3_5_turbo_stream_yqcloud = "gpt-3.5-turbo-stream-yqcloud" gpt_3_5_turbo_stream_yqcloud = "gpt-3.5-turbo-stream-yqcloud"
gpt_OpenAssistant_stream_HuggingChat = "gpt-OpenAssistant-stream-HuggingChat" gpt_OpenAssistant_stream_HuggingChat = "gpt-OpenAssistant-stream-HuggingChat"
gpt_4_turbo_stream_you = "gpt-4-turbo-stream-you" gpt_4_turbo_stream_you = "gpt-4-turbo-stream-you"
gpt_3_5_turbo_AItianhu = "gpt-3.5-turbo-AItianhu"
gpt_3_stream_binjie = "gpt-3-stream-binjie" gpt_3_stream_binjie = "gpt-3-stream-binjie"
gpt_3_5_turbo_stream_CodeLinkAva = "gpt-3.5-turbo-stream-CodeLinkAva" gpt_3_5_turbo_stream_CodeLinkAva = "gpt-3.5-turbo-stream-CodeLinkAva"
gpt_4_stream_ChatBase = "gpt-4-stream-ChatBase" gpt_4_stream_ChatBase = "gpt-4-stream-ChatBase"
@ -48,14 +44,15 @@ class ChatGptModelsEnum(StrEnum):
gpt_3_5_turbo_16k_stream_Ylokh = "gpt-3.5-turbo-16k-stream-Ylokh" gpt_3_5_turbo_16k_stream_Ylokh = "gpt-3.5-turbo-16k-stream-Ylokh"
gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum" gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum"
gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo" gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo"
gpt_3_5_turbo_stream_AItianhuSpace = "gpt-3.5-turbo-stream-AItianhuSpace"
gpt_3_5_turbo_stream_Aibn = "gpt-3.5-turbo-stream-Aibn" gpt_3_5_turbo_stream_Aibn = "gpt-3.5-turbo-stream-Aibn"
gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo" gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo"
gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt" gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt"
gpt_3_5_turbo_stream_ChatForAi = "gpt-3.5-turbo-stream-ChatForAi"
gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle" gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle"
gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online" gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online"
gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk" gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk"
gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo"
gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o"
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
@classmethod @classmethod
def values(cls) -> set[str]: def values(cls) -> set[str]:
@ -64,9 +61,6 @@ class ChatGptModelsEnum(StrEnum):
@staticmethod @staticmethod
def _deprecated() -> set[str]: def _deprecated() -> set[str]:
return { return {
"gpt-3.5-turbo-Aichat", "gpt-3.5-turbo-stream-H2o",
"gpt-3.5-turbo-stream-ChatForAi", "gpt-3.5-turbo-stream-gptforlove",
"gpt-3.5-turbo-stream-AItianhuSpace",
"gpt-3.5-turbo-AItianhu",
"gpt-3.5-turbo-acytoo",
} }

View File

@ -32,8 +32,8 @@ async def about_me(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
async def about_bot(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: async def about_bot(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if not update.effective_message: if not update.effective_message:
return None return None
chat_gpt_service = ChatGptService.build() chatgpt_service = ChatGptService.build()
model = await chat_gpt_service.get_current_chatgpt_model() model = await chatgpt_service.get_current_chatgpt_model()
await update.effective_message.reply_text( await update.effective_message.reply_text(
f"Бот использует бесплатную модель {model} для ответов на вопросы. " f"Бот использует бесплатную модель {model} для ответов на вопросы. "
f"\nПринимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения в текст. " f"\nПринимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения в текст. "
@ -69,9 +69,9 @@ async def ask_question(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No
await update.message.reply_text("Пожалуйста подождите, ответ в среднем занимает 10-15 секунд") await update.message.reply_text("Пожалуйста подождите, ответ в среднем занимает 10-15 секунд")
chat_gpt_service = ChatGptService.build() chatgpt_service = ChatGptService.build()
logger.warning("question asked", user=update.message.from_user, question=update.message.text) logger.warning("question asked", user=update.message.from_user, question=update.message.text)
answer = await chat_gpt_service.request_to_chatgpt(question=update.message.text) answer = await chatgpt_service.request_to_chatgpt(question=update.message.text)
await update.message.reply_text(answer) await update.message.reply_text(answer)

View File

@ -9,7 +9,7 @@ from loguru import logger
from sqlalchemy import delete, desc, select, update from sqlalchemy import delete, desc, select, update
from sqlalchemy.dialects.sqlite import insert from sqlalchemy.dialects.sqlite import insert
from constants import CHAT_GPT_BASE_URI, INVALID_GPT_REQUEST_MESSAGES from constants import CHATGPT_BASE_URI, INVALID_GPT_REQUEST_MESSAGES
from core.bot.models.chat_gpt import ChatGpt from core.bot.models.chat_gpt import ChatGpt
from infra.database.db_adapter import Database from infra.database.db_adapter import Database
from settings.config import AppSettings from settings.config import AppSettings
@ -64,14 +64,14 @@ class ChatGPTRepository:
result = await session.execute(query) result = await session.execute(query)
return result.scalar_one() return result.scalar_one()
async def ask_question(self, question: str, chat_gpt_model: str) -> str: async def ask_question(self, question: str, chatgpt_model: str) -> str:
try: try:
response = await self.request_to_chatgpt_microservice(question=question, chat_gpt_model=chat_gpt_model) response = await self.request_to_chatgpt_microservice(question=question, chatgpt_model=chatgpt_model)
status = response.status_code status = response.status_code
for message in INVALID_GPT_REQUEST_MESSAGES: for message in INVALID_GPT_REQUEST_MESSAGES:
if message in response.text: if message in response.text:
message = f"{message}: {chat_gpt_model}" message = f"{message}: {chatgpt_model}"
logger.info(message, question=question, chat_gpt_model=chat_gpt_model) logger.info(message, question=question, chatgpt_model=chatgpt_model)
return message return message
if status != httpx.codes.OK: if status != httpx.codes.OK:
logger.info(f"got response status: {status} from chat api", response.text) logger.info(f"got response status: {status} from chat api", response.text)
@ -81,19 +81,19 @@ class ChatGPTRepository:
logger.error("error get data from chat api", error=error) logger.error("error get data from chat api", error=error)
return "Вообще всё сломалось :(" return "Вообще всё сломалось :("
async def request_to_chatgpt_microservice(self, question: str, chat_gpt_model: str) -> Response: async def request_to_chatgpt_microservice(self, question: str, chatgpt_model: str) -> Response:
data = self._build_request_data(question=question, chat_gpt_model=chat_gpt_model) data = self._build_request_data(question=question, chatgpt_model=chatgpt_model)
transport = AsyncHTTPTransport(retries=3) transport = AsyncHTTPTransport(retries=3)
async with AsyncClient(base_url=self.settings.GPT_BASE_HOST, transport=transport, timeout=50) as client: async with AsyncClient(base_url=self.settings.GPT_BASE_HOST, transport=transport, timeout=50) as client:
return await client.post(CHAT_GPT_BASE_URI, json=data, timeout=50) return await client.post(CHATGPT_BASE_URI, json=data, timeout=50)
@staticmethod @staticmethod
def _build_request_data(*, question: str, chat_gpt_model: str) -> dict[str, Any]: def _build_request_data(*, question: str, chatgpt_model: str) -> dict[str, Any]:
return { return {
"conversation_id": str(uuid4()), "conversation_id": str(uuid4()),
"action": "_ask", "action": "_ask",
"model": chat_gpt_model, "model": chatgpt_model,
"jailbreak": "default", "jailbreak": "default",
"meta": { "meta": {
"id": random.randint(10**18, 10**19 - 1), # noqa: S311 "id": random.randint(10**18, 10**19 - 1), # noqa: S311

View File

@ -96,12 +96,12 @@ class ChatGptService:
async def request_to_chatgpt(self, question: str | None) -> str: async def request_to_chatgpt(self, question: str | None) -> str:
question = question or "Привет!" question = question or "Привет!"
chat_gpt_model = await self.get_current_chatgpt_model() chatgpt_model = await self.get_current_chatgpt_model()
return await self.repository.ask_question(question=question, chat_gpt_model=chat_gpt_model) return await self.repository.ask_question(question=question, chatgpt_model=chatgpt_model)
async def request_to_chatgpt_microservice(self, question: str) -> Response: async def request_to_chatgpt_microservice(self, question: str) -> Response:
chat_gpt_model = await self.get_current_chatgpt_model() chatgpt_model = await self.get_current_chatgpt_model()
return await self.repository.request_to_chatgpt_microservice(question=question, chat_gpt_model=chat_gpt_model) return await self.repository.request_to_chatgpt_microservice(question=question, chatgpt_model=chatgpt_model)
async def get_current_chatgpt_model(self) -> str: async def get_current_chatgpt_model(self) -> str:
return await self.repository.get_current_chatgpt_model() return await self.repository.get_current_chatgpt_model()

View File

@ -1,18 +1,26 @@
from datetime import datetime, timedelta from datetime import datetime, timedelta
from functools import lru_cache, wraps from functools import cache, wraps
from inspect import cleandoc from inspect import cleandoc
from typing import Any from typing import Any, Callable
def timed_cache(**timedelta_kwargs: Any) -> Any: def timed_lru_cache(
def _wrapper(func: Any) -> Any: microseconds: int = 0,
update_delta = timedelta(**timedelta_kwargs) milliseconds: int = 0,
seconds: int = 0,
minutes: int = 0,
hours: int = 0,
) -> Any:
def _wrapper(func: Any) -> Callable[[Any], Any]:
update_delta = timedelta(
microseconds=microseconds, milliseconds=milliseconds, seconds=seconds, minutes=minutes, hours=hours
)
next_update = datetime.utcnow() + update_delta next_update = datetime.utcnow() + update_delta
# Apply @lru_cache to f with no cache size limit
cached_func = lru_cache(None)(func) cached_func = cache(func)
@wraps(func) @wraps(func)
def _wrapped(*args: Any, **kwargs: Any) -> Any: def _wrapped(*args: Any, **kwargs: Any) -> Callable[[Any], Any]:
nonlocal next_update nonlocal next_update
now = datetime.utcnow() now = datetime.utcnow()
if now >= next_update: if now >= next_update:

View File

@ -105,10 +105,11 @@ def configure_logging(
{**base_loguru_handler, "colorize": True, "sink": sys.stdout}, {**base_loguru_handler, "colorize": True, "sink": sys.stdout},
] ]
if settings.GRAYLOG_HOST and settings.GRAYLOG_PORT: if settings.ENABLE_GRAYLOG:
graylog_handler = graypy.GELFUDPHandler(settings.GRAYLOG_HOST, settings.GRAYLOG_PORT) graylog_handler = graypy.GELFUDPHandler(settings.GRAYLOG_HOST, settings.GRAYLOG_PORT)
base_config_handlers.append(graylog_handler) base_config_handlers.append(graylog_handler)
loguru_handlers.append({**base_loguru_handler, "sink": graylog_handler}) loguru_handlers.append({**base_loguru_handler, "sink": graylog_handler})
if log_to_file: if log_to_file:
file_path = DIR_LOGS / log_to_file file_path = DIR_LOGS / log_to_file
if not os.path.exists(log_to_file): if not os.path.exists(log_to_file):

View File

@ -42,7 +42,7 @@ class Application:
log_to_file=settings.LOG_TO_FILE, log_to_file=settings.LOG_TO_FILE,
) )
if settings.SENTRY_DSN is not None: if settings.ENABLE_SENTRY:
sentry_sdk.init( sentry_sdk.init(
dsn=settings.SENTRY_DSN, dsn=settings.SENTRY_DSN,
environment=settings.DEPLOY_ENVIRONMENT, environment=settings.DEPLOY_ENVIRONMENT,

View File

@ -10,6 +10,7 @@ RELOAD="true"
DEBUG="true" DEBUG="true"
# ==== sentry ==== # ==== sentry ====
ENABLE_SENTRY="false"
SENTRY_DSN= SENTRY_DSN=
SENTRY_TRACES_SAMPLE_RATE="0.95" SENTRY_TRACES_SAMPLE_RATE="0.95"
DEPLOY_ENVIRONMENT="stage" DEPLOY_ENVIRONMENT="stage"
@ -17,8 +18,11 @@ DEPLOY_ENVIRONMENT="stage"
# ==== logs ====: # ==== logs ====:
ENABLE_JSON_LOGS="true" ENABLE_JSON_LOGS="true"
ENABLE_SENTRY_LOGS="false" ENABLE_SENTRY_LOGS="false"
ENABLE_GRAYLOG="false"
GRAYLOG_HOST= GRAYLOG_HOST=
GRAYLOG_PORT= GRAYLOG_PORT=
LOG_TO_FILE="example.log" LOG_TO_FILE="example.log"
# ==== telegram settings ==== # ==== telegram settings ====
@ -31,7 +35,7 @@ DOMAIN="https://mydomain.com"
URL_PREFIX="/gpt" URL_PREFIX="/gpt"
# ==== gpt settings ==== # ==== gpt settings ====
GPT_BASE_HOST="http://chat_service:8858" GPT_BASE_HOST="http://chatgpt_chat_service:8858"
# ==== other settings ==== # ==== other settings ====
USER="web" USER="web"

View File

@ -29,12 +29,36 @@ load_dotenv(env_path, override=True)
class SentrySettings(BaseSettings): class SentrySettings(BaseSettings):
ENABLE_SENTRY: bool = False
SENTRY_DSN: str | None = None SENTRY_DSN: str | None = None
DEPLOY_ENVIRONMENT: str | None = None DEPLOY_ENVIRONMENT: str | None = None
SENTRY_TRACES_SAMPLE_RATE: float = 0.95 SENTRY_TRACES_SAMPLE_RATE: float = 0.95
@model_validator(mode="after")
def validate_sentry_enabled(self) -> "SentrySettings":
if self.ENABLE_SENTRY and not self.SENTRY_DSN:
raise RuntimeError("sentry dsn must be set")
return self
class AppSettings(SentrySettings, BaseSettings):
class LoggingSettings(BaseSettings):
ENABLE_JSON_LOGS: bool = True
ENABLE_SENTRY_LOGS: bool = False
ENABLE_GRAYLOG: bool = False
GRAYLOG_HOST: str | None = None
GRAYLOG_PORT: int | None = None
LOG_TO_FILE: str | None = None
@model_validator(mode="after")
def validate_graylog_enabled(self) -> "LoggingSettings":
if self.ENABLE_GRAYLOG and not all([self.GRAYLOG_HOST, self.GRAYLOG_PORT]):
raise RuntimeError("graylog host and port must be set")
return self
class AppSettings(SentrySettings, LoggingSettings, BaseSettings):
"""Application settings.""" """Application settings."""
PROJECT_NAME: str = "chat gpt bot" PROJECT_NAME: str = "chat gpt bot"
@ -58,13 +82,7 @@ class AppSettings(SentrySettings, BaseSettings):
# ==== gpt settings ==== # ==== gpt settings ====
GPT_MODEL: str = "gpt-3.5-turbo-stream-DeepAi" GPT_MODEL: str = "gpt-3.5-turbo-stream-DeepAi"
GPT_BASE_HOST: str = "http://chat_service:8858" GPT_BASE_HOST: str = "http://chathpt_chat_service:8858"
ENABLE_JSON_LOGS: bool = True
ENABLE_SENTRY_LOGS: bool = False
GRAYLOG_HOST: str | None = None
GRAYLOG_PORT: int | None = None
LOG_TO_FILE: str | None = None
@model_validator(mode="before") # type: ignore[arg-type] @model_validator(mode="before") # type: ignore[arg-type]
def validate_boolean_fields(self) -> Any: def validate_boolean_fields(self) -> Any:
@ -75,6 +93,8 @@ class AppSettings(SentrySettings, BaseSettings):
"START_WITH_WEBHOOK", "START_WITH_WEBHOOK",
"RELOAD", "RELOAD",
"DEBUG", "DEBUG",
"ENABLE_GRAYLOG",
"ENABLE_SENTRY",
): ):
setting_value: str | None = values_dict.get(value) setting_value: str | None = values_dict.get(value)
if setting_value and setting_value.lower() == "false": if setting_value and setting_value.lower() == "false":

View File

@ -47,7 +47,7 @@ async def test_get_chatgpt_models(
) )
async def test_change_chagpt_model_priority( async def test_change_chatgpt_model_priority(
dbsession: Session, dbsession: Session,
rest_client: AsyncClient, rest_client: AsyncClient,
faker: Faker, faker: Faker,
@ -61,10 +61,9 @@ async def test_change_chagpt_model_priority(
upd_model1, upd_model2 = dbsession.query(ChatGpt).order_by(ChatGpt.priority).all() upd_model1, upd_model2 = dbsession.query(ChatGpt).order_by(ChatGpt.priority).all()
assert model1.model == upd_model1.model assert model1.model == upd_model1.model
assert model1.priority == upd_model1.priority
assert model2.model == upd_model2.model assert model2.model == upd_model2.model
assert upd_model2.priority == priority
updated_from_db_model = dbsession.get(ChatGpt, model2.id)
assert updated_from_db_model.priority == priority # type: ignore[union-attr]
async def test_reset_chatgpt_models_priority( async def test_reset_chatgpt_models_priority(

View File

@ -4,7 +4,7 @@ from typing import Any, Iterator
import respx import respx
from httpx import Response from httpx import Response
from constants import CHAT_GPT_BASE_URI from constants import CHATGPT_BASE_URI
@contextmanager @contextmanager
@ -16,7 +16,7 @@ def mocked_ask_question_api(
assert_all_called=True, assert_all_called=True,
base_url=host, base_url=host,
) as respx_mock: ) as respx_mock:
ask_question_route = respx_mock.post(url=CHAT_GPT_BASE_URI, name="ask_question") ask_question_route = respx_mock.post(url=CHATGPT_BASE_URI, name="ask_question")
ask_question_route.return_value = return_value ask_question_route.return_value = return_value
ask_question_route.side_effect = side_effect ask_question_route.side_effect = side_effect
yield respx_mock yield respx_mock

View File

@ -51,14 +51,15 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build the Docker image to main - name: Build the Docker image to main
if: github.ref_name == 'main' if: github.ref_name == 'main' && github.repository_owner == 'balshgit'
run: | run: |
docker build . -t ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest docker build . -t ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest
cd tools
- name: Docker image push to dev docker build . -t ${{ secrets.DOCKERHUB_USERNAME }}/freegpt-zeus:latest
if: github.ref_name == 'dev' cd ..
run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:dev
- name: Docker image push main - name: Docker image push main
if: github.ref_name == 'main' if: github.ref_name == 'main' && github.repository_owner == 'balshgit'
run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest run: |
docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest
docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt-zeus:latest

View File

@ -7,6 +7,11 @@
This project features a WebUI utilizing the [G4F API](https://github.com/xtekky/gpt4free). <br> This project features a WebUI utilizing the [G4F API](https://github.com/xtekky/gpt4free). <br>
Experience the power of ChatGPT with a user-friendly interface, enhanced jailbreaks, and completely free. Experience the power of ChatGPT with a user-friendly interface, enhanced jailbreaks, and completely free.
## Support this repository:
- ⭐ **Star the project:** Star this. It means a lot to me! 💕
## Getting Started :white_check_mark: ## Getting Started :white_check_mark:
To get started with this project, you'll need to clone the repository and have g++ >= 13.1 installed on your system. To get started with this project, you'll need to clone the repository and have g++ >= 13.1 installed on your system.
@ -70,6 +75,16 @@ docker run -p 8858:8858 -it --name freegpt -e CHAT_PATH=/chat -e PROVIDERS="[\"g
docker run -p 8858:8858 -it --name freegpt -e IP_WHITE_LIST="[\"127.0.0.1\",\"192.168.1.1\"]" fantasypeak/freegpt:latest docker run -p 8858:8858 -it --name freegpt -e IP_WHITE_LIST="[\"127.0.0.1\",\"192.168.1.1\"]" fantasypeak/freegpt:latest
``` ```
### Start the Zeus Service
Zeus is a cpp-freegpt-webui auxiliary service, because some provider needs to perform specific operations such as get cookies and refreshing web pages etc.
If you need to use these specific providers, you need to start it(Zeus Docker)
```
docker pull fantasypeak/freegpt-zeus:latest
docker run --rm --net=host -it --name zeus fantasypeak/freegpt-zeus:latest
docker pull fantasypeak/freegpt:latest
docker run --rm --net=host -it --name freegpt fantasypeak/freegpt:latest
```
### Call OpenAi Api ### Call OpenAi Api
``` ```
// It supports calling OpenAI's API, but need set API_KEY // It supports calling OpenAI's API, but need set API_KEY
@ -84,6 +99,10 @@ The application interface was incorporated from the [chatgpt-clone](https://gith
### API G4F ### API G4F
The free GPT-4 API was incorporated from the [GPT4Free](https://github.com/xtekky/gpt4free) repository. The free GPT-4 API was incorporated from the [GPT4Free](https://github.com/xtekky/gpt4free) repository.
## Star History Chart:
[![Star History Chart](https://api.star-history.com/svg?repos=fantasy-peak/cpp-freegpt-webui&theme=light)](https://github.com/fantasy-peak/cpp-freegpt-webui/stargazers)
## Legal Notice ## Legal Notice
This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This
project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to

View File

@ -1,5 +1,36 @@
@import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap"); @import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap");
.adsbox {
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
background-color: var(--blur-bg);
height: 100%;
width: 100%;
border-radius: var(--border-radius-1);
border: 1px solid var(--blur-border);
}
.ads {
align-items: center;
margin: auto;
display: flex;
flex-direction: column;
gap: 16px;
max-width: 200px;
padding: var(--section-gap);
overflow: none;
flex-shrink: 0;
display: flex;
flex-direction: column;
justify-content: space-between;
}
@media screen and (max-width: 728px) {
.ads {
display: none;
}
}
/* :root { /* :root {
--colour-1: #ffffff; --colour-1: #ffffff;
--colour-2: #000000; --colour-2: #000000;
@ -28,6 +59,7 @@
--blur-border: #84719040; --blur-border: #84719040;
--user-input: #ac87bb; --user-input: #ac87bb;
--conversations: #c7a2ff; --conversations: #c7a2ff;
--conversations-hover: #c7a2ff4d;
} }
:root { :root {
@ -54,7 +86,7 @@ body {
padding: var(--section-gap); padding: var(--section-gap);
background: var(--colour-1); background: var(--colour-1);
color: var(--colour-3); color: var(--colour-3);
min-height: 100vh; height: 100vh;
} }
.row { .row {
@ -85,10 +117,6 @@ body {
.conversation { .conversation {
width: 100%; width: 100%;
min-height: 50%;
height: 100vh;
overflow-y: scroll;
overflow-x: hidden;
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 15px; gap: 15px;
@ -96,16 +124,16 @@ body {
.conversation #messages { .conversation #messages {
width: 100%; width: 100%;
height: 100%;
display: flex; display: flex;
flex-direction: column; flex-direction: column;
overflow: auto;
overflow-wrap: break-word; overflow-wrap: break-word;
overflow-y: inherit;
overflow-x: hidden;
padding-bottom: 50px; padding-bottom: 50px;
} }
.conversation .user-input { .conversation .user-input {
max-height: 10vh; max-height: 200px;
} }
.conversation .user-input input { .conversation .user-input input {
@ -150,8 +178,6 @@ body {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 16px; gap: 16px;
flex: auto;
min-width: 0;
} }
.conversations .title { .conversations .title {
@ -162,10 +188,12 @@ body {
.conversations .convo { .conversations .convo {
padding: 8px 12px; padding: 8px 12px;
display: flex; display: flex;
gap: 18px; gap: 10px;
align-items: center; align-items: center;
user-select: none; user-select: none;
justify-content: space-between; justify-content: space-between;
border: 1px dashed var(--conversations);
border-radius: var(--border-radius-1);
} }
.conversations .convo .left { .conversations .convo .left {
@ -173,8 +201,6 @@ body {
display: flex; display: flex;
align-items: center; align-items: center;
gap: 10px; gap: 10px;
flex: auto;
min-width: 0;
} }
.conversations i { .conversations i {
@ -185,8 +211,6 @@ body {
.convo-title { .convo-title {
color: var(--colour-3); color: var(--colour-3);
font-size: 14px; font-size: 14px;
overflow: hidden;
text-overflow: ellipsis;
} }
.message { .message {
@ -240,7 +264,6 @@ body {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 18px; gap: 18px;
min-width: 0;
} }
.message .content p, .message .content p,
@ -265,8 +288,13 @@ body {
cursor: pointer; cursor: pointer;
user-select: none; user-select: none;
background: transparent; background: transparent;
border: 1px dashed var(--conversations); border: 1px solid var(--conversations);
border-radius: var(--border-radius-1); border-radius: var(--border-radius-1);
transition: all 0.2s ease;
}
.new_convo:hover {
box-shadow: inset 0px 0px 20px var(--conversations-hover);
} }
.new_convo span { .new_convo span {
@ -274,9 +302,6 @@ body {
font-size: 14px; font-size: 14px;
} }
.new_convo:hover {
border-style: solid;
}
.stop_generating { .stop_generating {
position: absolute; position: absolute;
@ -388,9 +413,8 @@ input:checked+label:after {
} }
.buttons { .buttons {
min-height: 10vh;
display: flex; display: flex;
align-items: start; align-items: center;
justify-content: left; justify-content: left;
width: 100%; width: 100%;
} }
@ -408,15 +432,6 @@ input:checked+label:after {
color: var(--colour-3); color: var(--colour-3);
} }
.disable-scrollbars::-webkit-scrollbar {
background: transparent; /* Chrome/Safari/Webkit */
width: 0px;
}
.disable-scrollbars {
scrollbar-width: none; /* Firefox */
-ms-overflow-style: none; /* IE 10+ */
}
select { select {
-webkit-border-radius: 8px; -webkit-border-radius: 8px;
@ -474,7 +489,7 @@ select {
cursor: pointer; cursor: pointer;
user-select: none; user-select: none;
background: transparent; background: transparent;
border: 1px solid #c7a2ff; border: 1px solid var(--conversations);
border-radius: var(--border-radius-1); border-radius: var(--border-radius-1);
width: 100%; width: 100%;
} }
@ -491,6 +506,7 @@ select {
overflow: auto; overflow: auto;
} }
#cursor { #cursor {
line-height: 17px; line-height: 17px;
margin-left: 3px; margin-left: 3px;
@ -597,16 +613,14 @@ ul {
} }
.buttons { .buttons {
flex-wrap: wrap; align-items: flex-start;
gap: 5px; flex-wrap: wrap;
padding-bottom: 10vh; gap: 15px;
margin-bottom: 10vh; }
}
.field { .field {
min-height: 5%; width: fit-content;
width: fit-content; }
}
.mobile-sidebar { .mobile-sidebar {
display: flex !important; display: flex !important;
@ -743,7 +757,7 @@ a:-webkit-any-link {
} }
.color-picker input[type="radio"]#pink { .color-picker input[type="radio"]#pink {
--radio-color: pink; --radio-color: white;
} }
.color-picker input[type="radio"]#blue { .color-picker input[type="radio"]#blue {
@ -759,10 +773,18 @@ a:-webkit-any-link {
} }
.pink { .pink {
--colour-1: hsl(310 50% 90%); --colour-1: #ffffff;
--clr-card-bg: hsl(310 50% 100%); --colour-2: #000000;
--colour-3: hsl(310 50% 15%); --colour-3: #000000;
--conversations: hsl(310 50% 25%); --colour-4: #000000;
--colour-5: #000000;
--colour-6: #000000;
--accent: #ffffff;
--blur-bg: #98989866;
--blur-border: #00000040;
--user-input: #000000;
--conversations: #000000;
} }
.blue { .blue {
@ -787,10 +809,18 @@ a:-webkit-any-link {
} }
:root:has(#pink:checked) { :root:has(#pink:checked) {
--colour-1: hsl(310 50% 90%); --colour-1: #ffffff;
--clr-card-bg: hsl(310 50% 100%); --colour-2: #000000;
--colour-3: hsl(310 50% 15%); --colour-3: #000000;
--conversations: hsl(310 50% 25%); --colour-4: #000000;
--colour-5: #000000;
--colour-6: #000000;
--accent: #ffffff;
--blur-bg: #98989866;
--blur-border: #00000040;
--user-input: #000000;
--conversations: #000000;
} }
:root:has(#blue:checked) { :root:has(#blue:checked) {
@ -814,8 +844,18 @@ a:-webkit-any-link {
--conversations: hsl(209 50% 80%); --conversations: hsl(209 50% 80%);
} }
.trash-icon { #send-button {
position: absolute; border: 1px dashed #e4d4ffa6;
border-radius: 4px;
cursor: pointer;
padding-left: 8px;
padding-right: 5px;
padding-top: 2px;
padding-bottom: 2px;
top: 20px; top: 20px;
right: 20px; left: 8px;
}
#send-button:hover {
border: 1px solid #e4d4ffc9;
} }

View File

@ -77,10 +77,9 @@
<span>Clear Conversations</span> <span>Clear Conversations</span>
</button> </button>
<div class="info"> <div class="info">
<i class="fa-regular fa-circle-info"></i> <i class="fa-brands fa-github"></i>
<span class="convo-title">By: Balsh<br> <span class="convo-title">github: <a href="https://github.com/Balshgit/gpt_chat_bot">Balshgit</a><br>
Version: 0.0.7 <br> leave a star :)
Release: 2023-09-28<br>
</span> </span>
</div> </div>
</div> </div>

View File

@ -153,3 +153,777 @@ create_client:
}); });
co_return; co_return;
} }
boost::asio::awaitable<void> FreeGpt::gptgod(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
auto generate_token_hex = [](int32_t length) {
std::random_device rd;
std::stringstream ss;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, 15);
for (int i = 0; i < length; ++i)
ss << std::hex << dis(gen);
std::string token = ss.str();
token = std::string(length * 2 - token.length(), '0') + token;
return token;
};
CURLcode res;
int32_t response_code;
struct Input {
std::shared_ptr<Channel> ch;
std::string recv;
};
Input input{ch};
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, error_info);
co_return;
}
auto url = std::format("https://gptgod.site/api/session/free/gpt3p5?content={}&id={}", urlEncode(prompt),
generate_token_hex(16));
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
if (!m_cfg.http_proxy.empty())
curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str());
curlEasySetopt(curl);
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
auto input_ptr = static_cast<Input*>(userp);
std::string data{(char*)contents, size * nmemb};
auto& [ch, recv] = *input_ptr;
recv.append(data);
auto remove_quotes = [](const std::string& str) {
std::string result = str;
if (result.size() >= 2 && result.front() == '"' && result.back() == '"') {
result.erase(0, 1);
result.erase(result.size() - 1);
}
return result;
};
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty() || !msg.starts_with("data: "))
continue;
msg.erase(0, 6);
boost::system::error_code err{};
msg = remove_quotes(msg);
if (msg.empty())
continue;
boost::asio::post(ch->get_executor(), [=, content = std::move(msg)] { ch->try_send(err, content); });
}
return size * nmemb;
};
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json");
headers = curl_slist_append(headers, "Referer: https://gptgod.site/");
headers = curl_slist_append(headers, "Alt-Used: gptgod.site");
headers = curl_slist_append(headers, "Accept: text/event-stream");
uint64_t timestamp =
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
auto auth_timestamp = std::format("x-auth-timestamp: {}", timestamp);
headers = curl_slist_append(headers, auth_timestamp.c_str());
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] {
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
}};
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
ch->try_send(err, error_info);
co_return;
}
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
if (response_code != 200) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("gptgod http code:{}", response_code));
co_return;
}
}
boost::asio::awaitable<void> FreeGpt::aiChat(std::shared_ptr<Channel> ch, nlohmann::json json) {
ScopeExit auto_exit{[&] { ch->close(); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
constexpr std::string_view host = "chat-gpt.org";
constexpr std::string_view port = "443";
boost::beast::http::request<boost::beast::http::string_body> req{boost::beast::http::verb::post, "/api/text", 11};
req.set(boost::beast::http::field::host, host);
req.set("authority", "chat-gpt.org");
req.set("accept", "*/*");
req.set("cache-control", "no-cache");
req.set(boost::beast::http::field::content_type, "application/json");
req.set(boost::beast::http::field::origin, "https://chat-gpt.org");
req.set("pragma", "no-cache");
req.set(boost::beast::http::field::referer, "https://chat-gpt.org/chat");
req.set("sec-ch-ua-mobile", "?0");
req.set("sec-ch-ua-platform", R"("macOS")");
req.set("sec-fetch-dest", "empty");
req.set("sec-fetch-mode", "cors");
req.set("sec-fetch-site", "same-origin");
req.set(
boost::beast::http::field::user_agent,
R"(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36)");
nlohmann::json data{
{"message", std::format("user: {}\nassistant:", prompt)},
{"temperature", 0.5},
{"presence_penalty", 0},
{"top_p", 1},
{"frequency_penalty", 0},
};
req.body() = data.dump();
req.prepare_payload();
auto ret = co_await sendRequestRecvResponse(req, host, port, std::bind_front(&FreeGpt::createHttpClient, *this));
if (!ret.has_value()) {
co_await ch->async_send(err, ret.error(), use_nothrow_awaitable);
co_return;
}
auto& [res, ctx, stream_] = ret.value();
if (boost::beast::http::status::ok != res.result()) {
SPDLOG_ERROR("http status code: {}", res.result_int());
co_await ch->async_send(err, res.reason(), use_nothrow_awaitable);
co_return;
}
nlohmann::json rsp = nlohmann::json::parse(res.body(), nullptr, false);
if (rsp.is_discarded()) {
SPDLOG_ERROR("json parse error");
co_await ch->async_send(err, "json parse error", use_nothrow_awaitable);
co_return;
}
SPDLOG_INFO("rsp: {}", rsp.dump());
co_await ch->async_send(err, rsp.value("message", rsp.dump()), use_nothrow_awaitable);
co_return;
}
boost::asio::awaitable<void> FreeGpt::aiTianhuSpace(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
CURLcode res;
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, error_info);
co_return;
}
auto random = [](int len) {
static std::string chars{"abcdefghijklmnopqrstuvwxyz0123456789"};
static std::string letter{"abcdefghijklmnopqrstuvwxyz"};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, 1000000);
std::string random_string;
random_string += chars[dis(gen) % letter.length()];
len = len - 1;
for (int i = 0; i < len; i++)
random_string += chars[dis(gen) % chars.length()];
return random_string;
};
auto url = std::format("https://{}.aitianhu.space/api/chat-process", random(6));
SPDLOG_INFO("url: [{}]", url);
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
if (!m_cfg.http_proxy.empty())
curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str());
struct Input {
std::shared_ptr<Channel> ch;
std::string recv;
};
Input input{ch};
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
boost::system::error_code err{};
auto input_ptr = static_cast<Input*>(userp);
std::string data{(char*)contents, size * nmemb};
auto& [ch, recv] = *input_ptr;
recv.append(data);
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty() || !msg.contains("content"))
continue;
boost::system::error_code err{};
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", msg);
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); });
continue;
}
auto str = line_json["detail"]["choices"][0]["delta"]["content"].get<std::string>();
if (!str.empty())
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
}
return size * nmemb;
};
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
curlEasySetopt(curl);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
constexpr std::string_view request_str{R"({
"prompt":"hello",
"options":{},
"systemMessage":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature":0.8,
"top_p":1
})"};
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
request["prompt"] = prompt;
auto str = request.dump();
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str());
struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] {
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
}};
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
ch->try_send(err, error_info);
co_return;
}
int32_t response_code;
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
if (response_code != 200) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("you http code:{}", response_code));
co_return;
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::aiTianhu(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::asio::post(*m_thread_pool_ptr, [=, this] {
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
CURLcode res;
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, error_info); });
return;
}
curl_easy_setopt(curl, CURLOPT_URL, "https://www.aitianhu.com/api/chat-process");
if (!m_cfg.http_proxy.empty())
curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str());
auto cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
auto recv_data_ptr = static_cast<std::string*>(userp);
std::string data{(char*)contents, size * nmemb};
recv_data_ptr->append(data);
return size * nmemb;
};
size_t (*fn)(void* contents, size_t size, size_t nmemb, void* userp) = cb;
std::string recv_data;
curlEasySetopt(curl);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &recv_data);
constexpr std::string_view json_str = R"({
"prompt":"hello",
"options":{},
"systemMessage":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature":0.8,
"top_p":1
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["prompt"] = prompt;
SPDLOG_INFO("{}", request.dump(2));
auto str = request.dump();
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str());
struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] {
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
}};
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, error_info); });
return;
}
int32_t response_code;
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
if (response_code != 200) {
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, std::format("aiTianhu http code:{}", response_code)); });
return;
}
auto lines = recv_data | std::views::split('\n') | std::views::transform([](auto&& rng) {
return std::string_view(&*rng.begin(), std::ranges::distance(rng.begin(), rng.end()));
}) |
to<std::vector<std::string_view>>();
if (lines.empty()) {
SPDLOG_ERROR("lines empty");
return;
}
nlohmann::json rsp = nlohmann::json::parse(lines.back(), nullptr, false);
if (rsp.is_discarded()) {
SPDLOG_ERROR("json parse error");
ch->try_send(err, std::format("json parse error: {}", lines.back()));
return;
}
ch->try_send(err, rsp.value("text", rsp.dump()));
return;
});
co_return;
}
boost::asio::awaitable<void> FreeGpt::acytoo(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
constexpr std::string_view host = "chat.acytoo.com";
constexpr std::string_view port = "443";
boost::beast::http::request<boost::beast::http::string_body> req{boost::beast::http::verb::post,
"/api/completions", 11};
req.set(boost::beast::http::field::host, host);
req.set(
boost::beast::http::field::user_agent,
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36)");
req.set("Accept", "*/*");
req.set("Accept-Encoding", "gzip, deflate");
req.set(boost::beast::http::field::content_type, "application/json");
constexpr std::string_view json_str = R"({
"key":"",
"model":"gpt-3.5-turbo",
"messages":[
{
"role":"user",
"content":"user: hello\nassistant:",
"createdAt":1688518523500
}
],
"temperature":0.5,
"password":""
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"][0]["content"] = std::format("user: {}\nassistant:", prompt);
auto time_now = std::chrono::system_clock::now();
auto duration_in_ms = std::chrono::duration_cast<std::chrono::milliseconds>(time_now.time_since_epoch());
request["messages"][0]["createdAt"] = duration_in_ms.count();
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
req.prepare_payload();
auto ret = co_await sendRequestRecvResponse(req, host, port, std::bind_front(&FreeGpt::createHttpClient, *this));
if (!ret.has_value()) {
co_await ch->async_send(err, ret.error(), use_nothrow_awaitable);
co_return;
}
auto& [res, ctx, stream_] = ret.value();
if (boost::beast::http::status::ok != res.result()) {
SPDLOG_ERROR("http status code: {}", res.result_int());
co_await ch->async_send(err, res.reason(), use_nothrow_awaitable);
co_return;
}
auto decompress_value = decompress(res);
if (!decompress_value.has_value()) {
SPDLOG_ERROR("decompress error");
co_await ch->async_send(err, decompress_value.error(), use_nothrow_awaitable);
co_return;
}
auto& body = decompress_value.value();
co_await ch->async_send(err, std::move(body), use_nothrow_awaitable);
co_return;
}
boost::asio::awaitable<void> FreeGpt::cromicle(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
CURLcode res;
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, error_info);
co_return;
}
curl_easy_setopt(curl, CURLOPT_URL, "https://cromicle.top/chat");
if (!m_cfg.http_proxy.empty())
curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str());
struct Input {
std::shared_ptr<Channel> ch;
std::string recv;
};
Input input{ch};
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
boost::system::error_code err{};
auto input_ptr = static_cast<Input*>(userp);
std::string data{(char*)contents, size * nmemb};
auto& [ch, recv] = *input_ptr;
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, data); });
return size * nmemb;
};
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
curlEasySetopt(curl);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
auto generate_signature = [](const std::string& message) {
std::stringstream ss;
ss << "asdap" << message;
std::string data = ss.str();
unsigned char digest[SHA256_DIGEST_LENGTH];
SHA256(reinterpret_cast<const unsigned char*>(data.c_str()), data.length(), digest);
std::stringstream sha_stream;
for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) {
sha_stream << std::setfill('0') << std::setw(2) << std::hex << static_cast<int>(digest[i]);
}
return sha_stream.str();
};
std::string signature = generate_signature(prompt);
constexpr std::string_view request_str{R"({
"message": "hello",
"hash": "dda6ea4e1dc215f198084018b1df20cfeafe9fbdfe31d8a350d6917509158d8a",
"token": "asdap"
})"};
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
request["hash"] = signature;
request["message"] = prompt;
auto str = request.dump();
SPDLOG_INFO("request : [{}]", str);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str());
struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] {
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
}};
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
ch->try_send(err, error_info);
co_return;
}
int32_t response_code;
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
if (response_code != 200) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("you http code:{}", response_code));
co_return;
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::h2o(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
constexpr std::string_view host = "gpt-gm.h2o.ai";
constexpr std::string_view port = "443";
constexpr std::string_view user_agent{
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0)"};
boost::beast::http::request<boost::beast::http::empty_body> req_init_cookie{boost::beast::http::verb::get, "/",
11};
req_init_cookie.set(boost::beast::http::field::host, host);
req_init_cookie.set(boost::beast::http::field::user_agent, user_agent);
auto ret = co_await sendRequestRecvResponse(req_init_cookie, host, port,
std::bind_front(&FreeGpt::createHttpClient, *this));
if (!ret.has_value()) {
co_await ch->async_send(err, ret.error(), use_nothrow_awaitable);
co_return;
}
auto& [response, ctx, stream_] = ret.value();
if (boost::beast::http::status::ok != response.result()) {
SPDLOG_ERROR("http status code: {}", response.result_int());
co_await ch->async_send(err, response.reason(), use_nothrow_awaitable);
co_return;
}
auto fields = splitString(response["Set-Cookie"], " ");
if (fields.empty()) {
std::stringstream ss;
ss << response.base();
SPDLOG_ERROR("get cookie error: {}", ss.str());
co_await ch->async_send(err, "can't get cookie", use_nothrow_awaitable);
co_return;
}
fields[0].pop_back();
std::string cookie{std::move(fields[0])};
SPDLOG_INFO("cookie: {}", cookie);
{
boost::beast::http::request<boost::beast::http::string_body> req_init_setting{boost::beast::http::verb::post,
"/settings", 11};
req_init_setting.set("Cookie", cookie);
req_init_setting.set(boost::beast::http::field::host, host);
req_init_setting.set(boost::beast::http::field::user_agent, user_agent);
req_init_setting.set("Accept",
"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8");
req_init_setting.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3");
req_init_setting.set("Content-Type", "application/x-www-form-urlencoded");
req_init_setting.set("Upgrade-Insecure-Requests", "1");
req_init_setting.set("Sec-Fetch-Dest", "document");
req_init_setting.set("Sec-Fetch-Mode", "navigate");
req_init_setting.set("Sec-Fetch-Site", "same-origin");
req_init_setting.set("Sec-Fetch-User", "?1");
req_init_setting.set("Referer", "https://gpt-gm.h2o.ai/r/jGfKSwU");
std::stringstream ss1;
ss1 << "ethicsModalAccepted=true&";
ss1 << "shareConversationsWithModelAuthors=true&";
ss1 << "ethicsModalAcceptedAt="
<< "&";
ss1 << "activeModel=h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1&";
ss1 << "searchEnabled=true";
req_init_setting.body() = ss1.str();
req_init_setting.prepare_payload();
auto [ec, count] = co_await boost::beast::http::async_write(stream_, req_init_setting, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
boost::beast::flat_buffer b;
boost::beast::http::response<boost::beast::http::string_body> res;
std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
if (res.result_int() != 303) {
std::string reason{res.reason()};
SPDLOG_ERROR("reason: {}", reason);
co_await ch->async_send(
err, std::format("return unexpected http status code: {}({})", res.result_int(), reason),
use_nothrow_awaitable);
co_return;
}
{
boost::beast::http::request<boost::beast::http::empty_body> req_init_cookie{boost::beast::http::verb::get,
"/r/jGfKSwU", 11};
req_init_cookie.set(boost::beast::http::field::host, host);
req_init_cookie.set(boost::beast::http::field::user_agent, user_agent);
auto [ec, count] =
co_await boost::beast::http::async_write(stream_, req_init_cookie, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
boost::beast::flat_buffer b;
boost::beast::http::response<boost::beast::http::string_body> res;
std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
if (res.result_int() != 200) {
std::string reason{res.reason()};
SPDLOG_ERROR("reason: {}", reason);
co_await ch->async_send(
err, std::format("return unexpected http status code: {}({})", res.result_int(), reason),
use_nothrow_awaitable);
co_return;
}
}
}
std::string conversation_id;
{
boost::beast::http::request<boost::beast::http::string_body> req_init_conversation{
boost::beast::http::verb::post, "/conversation", 11};
req_init_conversation.set("Cookie", cookie);
req_init_conversation.set(boost::beast::http::field::host, host);
req_init_conversation.set(boost::beast::http::field::user_agent, user_agent);
req_init_conversation.set("Accept", "*/*");
req_init_conversation.set("Accept-Encoding", "gzip, deflate");
req_init_conversation.set("Content-Type", "application/json");
req_init_conversation.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3");
req_init_conversation.set("Sec-Fetch-Dest", "empty");
req_init_conversation.set("Sec-Fetch-Mode", "cors");
req_init_conversation.set("Sec-Fetch-Site", "same-origin");
req_init_conversation.set("Referer", "https://gpt-gm.h2o.ai/");
req_init_conversation.body() = R"({"model": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"})";
req_init_conversation.prepare_payload();
auto [ec, count] =
co_await boost::beast::http::async_write(stream_, req_init_conversation, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
boost::beast::flat_buffer b;
boost::beast::http::response<boost::beast::http::string_body> res;
std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
if (res.result_int() != 200) {
std::string reason{res.reason()};
SPDLOG_ERROR("reason: {}", reason);
co_await ch->async_send(
err, std::format("return unexpected http status code: {}({})", res.result_int(), reason),
use_nothrow_awaitable);
co_return;
}
std::cout << res.body() << std::endl;
nlohmann::json rsp_json = nlohmann::json::parse(res.body(), nullptr, false);
if (rsp_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", fields.back());
ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
co_return;
}
if (!rsp_json.contains("conversationId")) {
SPDLOG_ERROR("not contains conversationId: {}", res.body());
co_await ch->async_send(err, res.body(), use_nothrow_awaitable);
co_return;
}
conversation_id = rsp_json["conversationId"].get<std::string>();
}
constexpr std::string_view json_str = R"({
"inputs":"user: hello\nassistant: ",
"parameters":{
"temperature":0.4,
"truncate":2048,
"max_new_tokens":1024,
"do_sample":true,
"repetition_penalty":1.2,
"return_full_text":false
},
"stream":true,
"options":{
"id":"64cf9d83-7b0d-4851-82b5-6f9090652494",
"response_id":"f76711da-6761-4055-9a05-84a8afce0198",
"is_retry":false,
"use_cache":false,
"web_search_id":""
}
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["inputs"] = std::format("user: {}\nassistant: ", prompt);
request["response_id"] = conversation_id;
request["id"] = createUuidString();
boost::beast::http::request<boost::beast::http::string_body> req{
boost::beast::http::verb::post, std::format("/conversation/{}", conversation_id), 11};
req.set("Cookie", cookie);
req.set(boost::beast::http::field::host, host);
req.set(boost::beast::http::field::user_agent, user_agent);
req.set("Accept", "*/*");
// req.set("Accept-Encoding", "gzip, deflate");
req.set("Content-Type", "application/json");
req.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3");
req.set("Sec-Fetch-Dest", "empty");
req.set("Sec-Fetch-Mode", "cors");
req.set("Sec-Fetch-Site", "same-origin");
req.set("Referer", "https://gpt-gm.h2o.ai/");
req.body() = request.dump();
req.prepare_payload();
std::string recv;
co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch, &recv](std::string chunk_str) {
recv.append(chunk_str);
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty() || !msg.contains("text"))
continue;
auto fields = splitString(msg, "data:");
boost::system::error_code err{};
nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", fields.back());
ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
continue;
}
auto str = line_json["token"]["text"].get<std::string>();
if (!str.empty() && str != "<|endoftext|>")
ch->try_send(err, str);
}
});
co_return;
}

View File

@ -33,9 +33,9 @@ import re
import subprocess import subprocess
import sys import sys
usage = 'git clang-format [OPTIONS] [<commit>] [<commit>] [--] [<file>...]' usage = "git clang-format [OPTIONS] [<commit>] [<commit>] [--] [<file>...]"
desc = ''' desc = """
If zero or one commits are given, run clang-format on all lines that differ If zero or one commits are given, run clang-format on all lines that differ
between the working directory and <commit>, which defaults to HEAD. Changes are between the working directory and <commit>, which defaults to HEAD. Changes are
only applied to the working directory. only applied to the working directory.
@ -48,14 +48,14 @@ The following git-config settings set the default of the corresponding option:
clangFormat.commit clangFormat.commit
clangFormat.extension clangFormat.extension
clangFormat.style clangFormat.style
''' """
# Name of the temporary index file in which save the output of clang-format. # Name of the temporary index file in which save the output of clang-format.
# This file is created within the .git directory. # This file is created within the .git directory.
temp_index_basename = 'clang-format-index' temp_index_basename = "clang-format-index"
Range = collections.namedtuple('Range', 'start, count') Range = collections.namedtuple("Range", "start, count")
def main(): def main():
@ -66,61 +66,61 @@ def main():
# nargs=argparse.REMAINDER disallows options after positionals.) # nargs=argparse.REMAINDER disallows options after positionals.)
argv = sys.argv[1:] argv = sys.argv[1:]
try: try:
idx = argv.index('--') idx = argv.index("--")
except ValueError: except ValueError:
dash_dash = [] dash_dash = []
else: else:
dash_dash = argv[idx:] dash_dash = argv[idx:]
argv = argv[:idx] argv = argv[:idx]
default_extensions = ','.join( default_extensions = ",".join(
[ [
# From clang/lib/Frontend/FrontendOptions.cpp, all lower case # From clang/lib/Frontend/FrontendOptions.cpp, all lower case
'c', "c",
'h', # C "h", # C
'm', # ObjC "m", # ObjC
'mm', # ObjC++ "mm", # ObjC++
'cc', "cc",
'cp', "cp",
'cpp', "cpp",
'c++', "c++",
'cxx', "cxx",
'hh', "hh",
'hpp', "hpp",
'hxx', # C++ "hxx", # C++
'cu', # CUDA "cu", # CUDA
# Other languages that clang-format supports # Other languages that clang-format supports
'proto', "proto",
'protodevel', # Protocol Buffers "protodevel", # Protocol Buffers
'java', # Java "java", # Java
'js', # JavaScript "js", # JavaScript
'ts', # TypeScript "ts", # TypeScript
'cs', # C Sharp "cs", # C Sharp
] ]
) )
p = argparse.ArgumentParser(usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter, description=desc) p = argparse.ArgumentParser(usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
p.add_argument('--binary', default=config.get('clangformat.binary', 'clang-format'), help='path to clang-format'), p.add_argument("--binary", default=config.get("clangformat.binary", "clang-format"), help="path to clang-format"),
p.add_argument( p.add_argument(
'--commit', default=config.get('clangformat.commit', 'HEAD'), help='default commit to use if none is specified' "--commit", default=config.get("clangformat.commit", "HEAD"), help="default commit to use if none is specified"
), ),
p.add_argument('--diff', action='store_true', help='print a diff instead of applying the changes') p.add_argument("--diff", action="store_true", help="print a diff instead of applying the changes")
p.add_argument( p.add_argument(
'--extensions', "--extensions",
default=config.get('clangformat.extensions', default_extensions), default=config.get("clangformat.extensions", default_extensions),
help=('comma-separated list of file extensions to format, ' 'excluding the period and case-insensitive'), help=("comma-separated list of file extensions to format, " "excluding the period and case-insensitive"),
), ),
p.add_argument('-f', '--force', action='store_true', help='allow changes to unstaged files') p.add_argument("-f", "--force", action="store_true", help="allow changes to unstaged files")
p.add_argument('-p', '--patch', action='store_true', help='select hunks interactively') p.add_argument("-p", "--patch", action="store_true", help="select hunks interactively")
p.add_argument('-q', '--quiet', action='count', default=0, help='print less information') p.add_argument("-q", "--quiet", action="count", default=0, help="print less information")
p.add_argument('--style', default=config.get('clangformat.style', None), help='passed to clang-format'), p.add_argument("--style", default=config.get("clangformat.style", None), help="passed to clang-format"),
p.add_argument('-v', '--verbose', action='count', default=0, help='print extra information') p.add_argument("-v", "--verbose", action="count", default=0, help="print extra information")
# We gather all the remaining positional arguments into 'args' since we need # We gather all the remaining positional arguments into 'args' since we need
# to use some heuristics to determine whether or not <commit> was present. # to use some heuristics to determine whether or not <commit> was present.
# However, to print pretty messages, we make use of metavar and help. # However, to print pretty messages, we make use of metavar and help.
p.add_argument('args', nargs='*', metavar='<commit>', help='revision from which to compute the diff') p.add_argument("args", nargs="*", metavar="<commit>", help="revision from which to compute the diff")
p.add_argument( p.add_argument(
'ignored', nargs='*', metavar='<file>...', help='if specified, only consider differences in these files' "ignored", nargs="*", metavar="<file>...", help="if specified, only consider differences in these files"
) )
opts = p.parse_args(argv) opts = p.parse_args(argv)
@ -130,26 +130,26 @@ def main():
commits, files = interpret_args(opts.args, dash_dash, opts.commit) commits, files = interpret_args(opts.args, dash_dash, opts.commit)
if len(commits) > 1: if len(commits) > 1:
if not opts.diff: if not opts.diff:
die('--diff is required when two commits are given') die("--diff is required when two commits are given")
else: else:
if len(commits) > 2: if len(commits) > 2:
die('at most two commits allowed; %d given' % len(commits)) die("at most two commits allowed; %d given" % len(commits))
changed_lines = compute_diff_and_extract_lines(commits, files) changed_lines = compute_diff_and_extract_lines(commits, files)
if opts.verbose >= 1: if opts.verbose >= 1:
ignored_files = set(changed_lines) ignored_files = set(changed_lines)
filter_by_extension(changed_lines, opts.extensions.lower().split(',')) filter_by_extension(changed_lines, opts.extensions.lower().split(","))
if opts.verbose >= 1: if opts.verbose >= 1:
ignored_files.difference_update(changed_lines) ignored_files.difference_update(changed_lines)
if ignored_files: if ignored_files:
print('Ignoring changes in the following files (wrong extension):') print("Ignoring changes in the following files (wrong extension):")
for filename in ignored_files: for filename in ignored_files:
print(' %s' % filename) print(" %s" % filename)
if changed_lines: if changed_lines:
print('Running clang-format on the following files:') print("Running clang-format on the following files:")
for filename in changed_lines: for filename in changed_lines:
print(' %s' % filename) print(" %s" % filename)
if not changed_lines: if not changed_lines:
print('no modified files to format') print("no modified files to format")
return return
# The computed diff outputs absolute paths, so we must cd before accessing # The computed diff outputs absolute paths, so we must cd before accessing
# those files. # those files.
@ -163,19 +163,19 @@ def main():
old_tree = create_tree_from_workdir(changed_lines) old_tree = create_tree_from_workdir(changed_lines)
new_tree = run_clang_format_and_save_to_tree(changed_lines, binary=opts.binary, style=opts.style) new_tree = run_clang_format_and_save_to_tree(changed_lines, binary=opts.binary, style=opts.style)
if opts.verbose >= 1: if opts.verbose >= 1:
print('old tree: %s' % old_tree) print("old tree: %s" % old_tree)
print('new tree: %s' % new_tree) print("new tree: %s" % new_tree)
if old_tree == new_tree: if old_tree == new_tree:
if opts.verbose >= 0: if opts.verbose >= 0:
print('clang-format did not modify any files') print("clang-format did not modify any files")
elif opts.diff: elif opts.diff:
print_diff(old_tree, new_tree) print_diff(old_tree, new_tree)
else: else:
changed_files = apply_changes(old_tree, new_tree, force=opts.force, patch_mode=opts.patch) changed_files = apply_changes(old_tree, new_tree, force=opts.force, patch_mode=opts.patch)
if (opts.verbose >= 0 and not opts.patch) or opts.verbose >= 1: if (opts.verbose >= 0 and not opts.patch) or opts.verbose >= 1:
print('changed files:') print("changed files:")
for filename in changed_files: for filename in changed_files:
print(' %s' % filename) print(" %s" % filename)
def load_git_config(non_string_options=None): def load_git_config(non_string_options=None):
@ -187,11 +187,11 @@ def load_git_config(non_string_options=None):
if non_string_options is None: if non_string_options is None:
non_string_options = {} non_string_options = {}
out = {} out = {}
for entry in run('git', 'config', '--list', '--null').split('\0'): for entry in run("git", "config", "--list", "--null").split("\0"):
if entry: if entry:
name, value = entry.split('\n', 1) name, value = entry.split("\n", 1)
if name in non_string_options: if name in non_string_options:
value = run('git', 'config', non_string_options[name], name) value = run("git", "config", non_string_options[name], name)
out[name] = value out[name] = value
return out return out
@ -213,7 +213,7 @@ def interpret_args(args, dash_dash, default_commit):
commits = args commits = args
for commit in commits: for commit in commits:
object_type = get_object_type(commit) object_type = get_object_type(commit)
if object_type not in ('commit', 'tag'): if object_type not in ("commit", "tag"):
if object_type is None: if object_type is None:
die("'%s' is not a commit" % commit) die("'%s' is not a commit" % commit)
else: else:
@ -238,19 +238,19 @@ def disambiguate_revision(value):
"""Returns True if `value` is a revision, False if it is a file, or dies.""" """Returns True if `value` is a revision, False if it is a file, or dies."""
# If `value` is ambiguous (neither a commit nor a file), the following # If `value` is ambiguous (neither a commit nor a file), the following
# command will die with an appropriate error message. # command will die with an appropriate error message.
run('git', 'rev-parse', value, verbose=False) run("git", "rev-parse", value, verbose=False)
object_type = get_object_type(value) object_type = get_object_type(value)
if object_type is None: if object_type is None:
return False return False
if object_type in ('commit', 'tag'): if object_type in ("commit", "tag"):
return True return True
die('`%s` is a %s, but a commit or filename was expected' % (value, object_type)) die("`%s` is a %s, but a commit or filename was expected" % (value, object_type))
def get_object_type(value): def get_object_type(value):
"""Returns a string description of an object's type, or None if it is not """Returns a string description of an object's type, or None if it is not
a valid git object.""" a valid git object."""
cmd = ['git', 'cat-file', '-t', value] cmd = ["git", "cat-file", "-t", value]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
if p.returncode != 0: if p.returncode != 0:
@ -277,10 +277,10 @@ def compute_diff(commits, files):
differences between the working directory and the first commit if a single differences between the working directory and the first commit if a single
one was specified, or the difference between both specified commits, filtered one was specified, or the difference between both specified commits, filtered
on `files` (if non-empty). Zero context lines are used in the patch.""" on `files` (if non-empty). Zero context lines are used in the patch."""
git_tool = 'diff-index' git_tool = "diff-index"
if len(commits) > 1: if len(commits) > 1:
git_tool = 'diff-tree' git_tool = "diff-tree"
cmd = ['git', git_tool, '-p', '-U0'] + commits + ['--'] cmd = ["git", git_tool, "-p", "-U0"] + commits + ["--"]
cmd.extend(files) cmd.extend(files)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.close() p.stdin.close()
@ -299,10 +299,10 @@ def extract_lines(patch_file):
matches = {} matches = {}
for line in patch_file: for line in patch_file:
line = convert_string(line) line = convert_string(line)
match = re.search(r'^\+\+\+\ [^/]+/(.*)', line) match = re.search(r"^\+\+\+\ [^/]+/(.*)", line)
if match: if match:
filename = match.group(1).rstrip('\r\n') filename = match.group(1).rstrip("\r\n")
match = re.search(r'^@@ -[0-9,]+ \+(\d+)(,(\d+))?', line) match = re.search(r"^@@ -[0-9,]+ \+(\d+)(,(\d+))?", line)
if match: if match:
start_line = int(match.group(1)) start_line = int(match.group(1))
line_count = 1 line_count = 1
@ -320,8 +320,8 @@ def filter_by_extension(dictionary, allowed_extensions):
excluding the period.""" excluding the period."""
allowed_extensions = frozenset(allowed_extensions) allowed_extensions = frozenset(allowed_extensions)
for filename in list(dictionary.keys()): for filename in list(dictionary.keys()):
base_ext = filename.rsplit('.', 1) base_ext = filename.rsplit(".", 1)
if len(base_ext) == 1 and '' in allowed_extensions: if len(base_ext) == 1 and "" in allowed_extensions:
continue continue
if len(base_ext) == 1 or base_ext[1].lower() not in allowed_extensions: if len(base_ext) == 1 or base_ext[1].lower() not in allowed_extensions:
del dictionary[filename] del dictionary[filename]
@ -329,7 +329,7 @@ def filter_by_extension(dictionary, allowed_extensions):
def cd_to_toplevel(): def cd_to_toplevel():
"""Change to the top level of the git repository.""" """Change to the top level of the git repository."""
toplevel = run('git', 'rev-parse', '--show-toplevel') toplevel = run("git", "rev-parse", "--show-toplevel")
os.chdir(toplevel) os.chdir(toplevel)
@ -337,10 +337,10 @@ def create_tree_from_workdir(filenames):
"""Create a new git tree with the given files from the working directory. """Create a new git tree with the given files from the working directory.
Returns the object ID (SHA-1) of the created tree.""" Returns the object ID (SHA-1) of the created tree."""
return create_tree(filenames, '--stdin') return create_tree(filenames, "--stdin")
def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary='clang-format', style=None): def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary="clang-format", style=None):
"""Run clang-format on each file and save the result to a git tree. """Run clang-format on each file and save the result to a git tree.
Returns the object ID (SHA-1) of the created tree.""" Returns the object ID (SHA-1) of the created tree."""
@ -355,9 +355,9 @@ def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary='clan
for filename, line_ranges in iteritems(changed_lines): for filename, line_ranges in iteritems(changed_lines):
if revision: if revision:
git_metadata_cmd = [ git_metadata_cmd = [
'git', "git",
'ls-tree', "ls-tree",
'%s:%s' % (revision, os.path.dirname(filename)), "%s:%s" % (revision, os.path.dirname(filename)),
os.path.basename(filename), os.path.basename(filename),
] ]
git_metadata = subprocess.Popen(git_metadata_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) git_metadata = subprocess.Popen(git_metadata_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@ -366,12 +366,12 @@ def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary='clan
else: else:
mode = oct(os.stat(filename).st_mode) mode = oct(os.stat(filename).st_mode)
# Adjust python3 octal format so that it matches what git expects # Adjust python3 octal format so that it matches what git expects
if mode.startswith('0o'): if mode.startswith("0o"):
mode = '0' + mode[2:] mode = "0" + mode[2:]
blob_id = clang_format_to_blob(filename, line_ranges, revision=revision, binary=binary, style=style) blob_id = clang_format_to_blob(filename, line_ranges, revision=revision, binary=binary, style=style)
yield '%s %s\t%s' % (mode, blob_id, filename) yield "%s %s\t%s" % (mode, blob_id, filename)
return create_tree(index_info_generator(), '--index-info') return create_tree(index_info_generator(), "--index-info")
def create_tree(input_lines, mode): def create_tree(input_lines, mode):
@ -381,20 +381,20 @@ def create_tree(input_lines, mode):
'--index-info' is must be a list of values suitable for "git update-index '--index-info' is must be a list of values suitable for "git update-index
--index-info", such as "<mode> <SP> <sha1> <TAB> <filename>". Any other mode --index-info", such as "<mode> <SP> <sha1> <TAB> <filename>". Any other mode
is invalid.""" is invalid."""
assert mode in ('--stdin', '--index-info') assert mode in ("--stdin", "--index-info")
cmd = ['git', 'update-index', '--add', '-z', mode] cmd = ["git", "update-index", "--add", "-z", mode]
with temporary_index_file(): with temporary_index_file():
p = subprocess.Popen(cmd, stdin=subprocess.PIPE) p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
for line in input_lines: for line in input_lines:
p.stdin.write(to_bytes('%s\0' % line)) p.stdin.write(to_bytes("%s\0" % line))
p.stdin.close() p.stdin.close()
if p.wait() != 0: if p.wait() != 0:
die('`%s` failed' % ' '.join(cmd)) die("`%s` failed" % " ".join(cmd))
tree_id = run('git', 'write-tree') tree_id = run("git", "write-tree")
return tree_id return tree_id
def clang_format_to_blob(filename, line_ranges, revision=None, binary='clang-format', style=None): def clang_format_to_blob(filename, line_ranges, revision=None, binary="clang-format", style=None):
"""Run clang-format on the given file and save the result to a git blob. """Run clang-format on the given file and save the result to a git blob.
Runs on the file in `revision` if not None, or on the file in the working Runs on the file in `revision` if not None, or on the file in the working
@ -403,13 +403,13 @@ def clang_format_to_blob(filename, line_ranges, revision=None, binary='clang-for
Returns the object ID (SHA-1) of the created blob.""" Returns the object ID (SHA-1) of the created blob."""
clang_format_cmd = [binary] clang_format_cmd = [binary]
if style: if style:
clang_format_cmd.extend(['-style=' + style]) clang_format_cmd.extend(["-style=" + style])
clang_format_cmd.extend( clang_format_cmd.extend(
['-lines=%s:%s' % (start_line, start_line + line_count - 1) for start_line, line_count in line_ranges] ["-lines=%s:%s" % (start_line, start_line + line_count - 1) for start_line, line_count in line_ranges]
) )
if revision: if revision:
clang_format_cmd.extend(['-assume-filename=' + filename]) clang_format_cmd.extend(["-assume-filename=" + filename])
git_show_cmd = ['git', 'cat-file', 'blob', '%s:%s' % (revision, filename)] git_show_cmd = ["git", "cat-file", "blob", "%s:%s" % (revision, filename)]
git_show = subprocess.Popen(git_show_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) git_show = subprocess.Popen(git_show_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
git_show.stdin.close() git_show.stdin.close()
clang_format_stdin = git_show.stdout clang_format_stdin = git_show.stdout
@ -427,17 +427,17 @@ def clang_format_to_blob(filename, line_ranges, revision=None, binary='clang-for
else: else:
raise raise
clang_format_stdin.close() clang_format_stdin.close()
hash_object_cmd = ['git', 'hash-object', '-w', '--path=' + filename, '--stdin'] hash_object_cmd = ["git", "hash-object", "-w", "--path=" + filename, "--stdin"]
hash_object = subprocess.Popen(hash_object_cmd, stdin=clang_format.stdout, stdout=subprocess.PIPE) hash_object = subprocess.Popen(hash_object_cmd, stdin=clang_format.stdout, stdout=subprocess.PIPE)
clang_format.stdout.close() clang_format.stdout.close()
stdout = hash_object.communicate()[0] stdout = hash_object.communicate()[0]
if hash_object.returncode != 0: if hash_object.returncode != 0:
die('`%s` failed' % ' '.join(hash_object_cmd)) die("`%s` failed" % " ".join(hash_object_cmd))
if clang_format.wait() != 0: if clang_format.wait() != 0:
die('`%s` failed' % ' '.join(clang_format_cmd)) die("`%s` failed" % " ".join(clang_format_cmd))
if git_show and git_show.wait() != 0: if git_show and git_show.wait() != 0:
die('`%s` failed' % ' '.join(git_show_cmd)) die("`%s` failed" % " ".join(git_show_cmd))
return convert_string(stdout).rstrip('\r\n') return convert_string(stdout).rstrip("\r\n")
@contextlib.contextmanager @contextlib.contextmanager
@ -445,15 +445,15 @@ def temporary_index_file(tree=None):
"""Context manager for setting GIT_INDEX_FILE to a temporary file and deleting """Context manager for setting GIT_INDEX_FILE to a temporary file and deleting
the file afterward.""" the file afterward."""
index_path = create_temporary_index(tree) index_path = create_temporary_index(tree)
old_index_path = os.environ.get('GIT_INDEX_FILE') old_index_path = os.environ.get("GIT_INDEX_FILE")
os.environ['GIT_INDEX_FILE'] = index_path os.environ["GIT_INDEX_FILE"] = index_path
try: try:
yield yield
finally: finally:
if old_index_path is None: if old_index_path is None:
del os.environ['GIT_INDEX_FILE'] del os.environ["GIT_INDEX_FILE"]
else: else:
os.environ['GIT_INDEX_FILE'] = old_index_path os.environ["GIT_INDEX_FILE"] = old_index_path
os.remove(index_path) os.remove(index_path)
@ -462,11 +462,11 @@ def create_temporary_index(tree=None):
If `tree` is not None, use that as the tree to read in. Otherwise, an If `tree` is not None, use that as the tree to read in. Otherwise, an
empty index is created.""" empty index is created."""
gitdir = run('git', 'rev-parse', '--git-dir') gitdir = run("git", "rev-parse", "--git-dir")
path = os.path.join(gitdir, temp_index_basename) path = os.path.join(gitdir, temp_index_basename)
if tree is None: if tree is None:
tree = '--empty' tree = "--empty"
run('git', 'read-tree', '--index-output=' + path, tree) run("git", "read-tree", "--index-output=" + path, tree)
return path return path
@ -479,7 +479,7 @@ def print_diff(old_tree, new_tree):
# We also only print modified files since `new_tree` only contains the files # We also only print modified files since `new_tree` only contains the files
# that were modified, so unmodified files would show as deleted without the # that were modified, so unmodified files would show as deleted without the
# filter. # filter.
subprocess.check_call(['git', 'diff', '--diff-filter=M', old_tree, new_tree, '--']) subprocess.check_call(["git", "diff", "--diff-filter=M", old_tree, new_tree, "--"])
def apply_changes(old_tree, new_tree, force=False, patch_mode=False): def apply_changes(old_tree, new_tree, force=False, patch_mode=False):
@ -488,16 +488,16 @@ def apply_changes(old_tree, new_tree, force=False, patch_mode=False):
Bails if there are local changes in those files and not `force`. If Bails if there are local changes in those files and not `force`. If
`patch_mode`, runs `git checkout --patch` to select hunks interactively.""" `patch_mode`, runs `git checkout --patch` to select hunks interactively."""
changed_files = ( changed_files = (
run('git', 'diff-tree', '--diff-filter=M', '-r', '-z', '--name-only', old_tree, new_tree) run("git", "diff-tree", "--diff-filter=M", "-r", "-z", "--name-only", old_tree, new_tree)
.rstrip('\0') .rstrip("\0")
.split('\0') .split("\0")
) )
if not force: if not force:
unstaged_files = run('git', 'diff-files', '--name-status', *changed_files) unstaged_files = run("git", "diff-files", "--name-status", *changed_files)
if unstaged_files: if unstaged_files:
print('The following files would be modified but ' 'have unstaged changes:', file=sys.stderr) print("The following files would be modified but " "have unstaged changes:", file=sys.stderr)
print(unstaged_files, file=sys.stderr) print(unstaged_files, file=sys.stderr)
print('Please commit, stage, or stash them first.', file=sys.stderr) print("Please commit, stage, or stash them first.", file=sys.stderr)
sys.exit(2) sys.exit(2)
if patch_mode: if patch_mode:
# In patch mode, we could just as well create an index from the new tree # In patch mode, we could just as well create an index from the new tree
@ -507,17 +507,17 @@ def apply_changes(old_tree, new_tree, force=False, patch_mode=False):
# better message, "Apply ... to index and worktree". This is not quite # better message, "Apply ... to index and worktree". This is not quite
# right, since it won't be applied to the user's index, but oh well. # right, since it won't be applied to the user's index, but oh well.
with temporary_index_file(old_tree): with temporary_index_file(old_tree):
subprocess.check_call(['git', 'checkout', '--patch', new_tree]) subprocess.check_call(["git", "checkout", "--patch", new_tree])
else: else:
with temporary_index_file(new_tree): with temporary_index_file(new_tree):
run('git', 'checkout-index', '-a', '-f') run("git", "checkout-index", "-a", "-f")
return changed_files return changed_files
def run(*args, **kwargs): def run(*args, **kwargs):
stdin = kwargs.pop('stdin', '') stdin = kwargs.pop("stdin", "")
verbose = kwargs.pop('verbose', True) verbose = kwargs.pop("verbose", True)
strip = kwargs.pop('strip', True) strip = kwargs.pop("strip", True)
for name in kwargs: for name in kwargs:
raise TypeError("run() got an unexpected keyword argument '%s'" % name) raise TypeError("run() got an unexpected keyword argument '%s'" % name)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
@ -529,20 +529,20 @@ def run(*args, **kwargs):
if p.returncode == 0: if p.returncode == 0:
if stderr: if stderr:
if verbose: if verbose:
print('`%s` printed to stderr:' % ' '.join(args), file=sys.stderr) print("`%s` printed to stderr:" % " ".join(args), file=sys.stderr)
print(stderr.rstrip(), file=sys.stderr) print(stderr.rstrip(), file=sys.stderr)
if strip: if strip:
stdout = stdout.rstrip('\r\n') stdout = stdout.rstrip("\r\n")
return stdout return stdout
if verbose: if verbose:
print('`%s` returned %s' % (' '.join(args), p.returncode), file=sys.stderr) print("`%s` returned %s" % (" ".join(args), p.returncode), file=sys.stderr)
if stderr: if stderr:
print(stderr.rstrip(), file=sys.stderr) print(stderr.rstrip(), file=sys.stderr)
sys.exit(2) sys.exit(2)
def die(message): def die(message):
print('error:', message, file=sys.stderr) print("error:", message, file=sys.stderr)
sys.exit(2) sys.exit(2)
@ -550,23 +550,23 @@ def to_bytes(str_input):
# Encode to UTF-8 to get binary data. # Encode to UTF-8 to get binary data.
if isinstance(str_input, bytes): if isinstance(str_input, bytes):
return str_input return str_input
return str_input.encode('utf-8') return str_input.encode("utf-8")
def to_string(bytes_input): def to_string(bytes_input):
if isinstance(bytes_input, str): if isinstance(bytes_input, str):
return bytes_input return bytes_input
return bytes_input.encode('utf-8') return bytes_input.encode("utf-8")
def convert_string(bytes_input): def convert_string(bytes_input):
try: try:
return to_string(bytes_input.decode('utf-8')) return to_string(bytes_input.decode("utf-8"))
except AttributeError: # 'str' object has no attribute 'decode'. except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes_input) return str(bytes_input)
except UnicodeError: except UnicodeError:
return str(bytes_input) return str(bytes_input)
if __name__ == '__main__': if __name__ == "__main__":
main() main()

View File

@ -14,6 +14,7 @@ struct Config {
std::string http_proxy; std::string http_proxy;
std::string api_key; std::string api_key;
std::vector<std::string> ip_white_list; std::vector<std::string> ip_white_list;
std::string zeus{"http://chatgpt_zeus_service:8860"};
}; };
YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy, YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy,
http_proxy, api_key, ip_white_list) http_proxy, api_key, ip_white_list, zeus)

View File

@ -18,14 +18,9 @@ public:
FreeGpt(Config&); FreeGpt(Config&);
boost::asio::awaitable<void> aiTianhu(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> aiTianhuSpace(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> deepAi(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> deepAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> aiChat(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGptAi(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> chatGptAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> acytoo(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> openAi(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> openAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> h2o(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> yqcloud(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> yqcloud(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> huggingChat(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> huggingChat(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> you(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> you(std::shared_ptr<Channel>, nlohmann::json);
@ -39,9 +34,10 @@ public:
boost::asio::awaitable<void> chatGptDuo(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> chatGptDuo(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> freeGpt(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> freeGpt(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> cromicle(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptalk(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> gptalk(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptForLove(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGptDemo(std::shared_ptr<Channel>, nlohmann::json);
private: private:
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>> boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>

File diff suppressed because it is too large Load Diff

View File

@ -65,6 +65,8 @@ void setEnvironment(auto& cfg) {
if (!ip_white_list.is_discarded()) if (!ip_white_list.is_discarded())
cfg.ip_white_list = ip_white_list.get<std::vector<std::string>>(); cfg.ip_white_list = ip_white_list.get<std::vector<std::string>>();
} }
if (auto [zeus] = getEnv("ZEUS"); !zeus.empty())
cfg.zeus = std::move(zeus);
} }
std::string createIndexHtml(const std::string& file, const Config& cfg) { std::string createIndexHtml(const std::string& file, const Config& cfg) {
@ -324,10 +326,6 @@ int main(int argc, char** argv) {
setEnvironment(cfg); setEnvironment(cfg);
auto [yaml_cfg_str, _] = yaml_cpp_struct::to_yaml(cfg); auto [yaml_cfg_str, _] = yaml_cpp_struct::to_yaml(cfg);
SPDLOG_INFO("\n{}", yaml_cpp_struct::yaml_to_json(yaml_cfg_str.value()).dump(2));
std::cout << "\033[32m"
<< "GitHub: https://github.com/fantasy-peak/cpp-freegpt-webui"
<< "\033[0m" << std::endl;
FreeGpt app{cfg}; FreeGpt app{cfg};
@ -336,7 +334,6 @@ int main(int argc, char** argv) {
ADD_METHOD("gpt-4-ChatgptAi", FreeGpt::chatGptAi); ADD_METHOD("gpt-4-ChatgptAi", FreeGpt::chatGptAi);
ADD_METHOD("gpt-3.5-turbo-stream-DeepAi", FreeGpt::deepAi); ADD_METHOD("gpt-3.5-turbo-stream-DeepAi", FreeGpt::deepAi);
ADD_METHOD("gpt-3.5-turbo-stream-H2o", FreeGpt::h2o);
ADD_METHOD("gpt-3.5-turbo-stream-yqcloud", FreeGpt::yqcloud); ADD_METHOD("gpt-3.5-turbo-stream-yqcloud", FreeGpt::yqcloud);
ADD_METHOD("gpt-OpenAssistant-stream-HuggingChat", FreeGpt::huggingChat) ADD_METHOD("gpt-OpenAssistant-stream-HuggingChat", FreeGpt::huggingChat)
ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you); ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you);
@ -349,14 +346,20 @@ int main(int argc, char** argv) {
ADD_METHOD("gpt-3.5-turbo-stream-Aibn", FreeGpt::aibn); ADD_METHOD("gpt-3.5-turbo-stream-Aibn", FreeGpt::aibn);
ADD_METHOD("gpt-3.5-turbo-ChatgptDuo", FreeGpt::chatGptDuo); ADD_METHOD("gpt-3.5-turbo-ChatgptDuo", FreeGpt::chatGptDuo);
ADD_METHOD("gpt-3.5-turbo-stream-FreeGpt", FreeGpt::freeGpt); ADD_METHOD("gpt-3.5-turbo-stream-FreeGpt", FreeGpt::freeGpt);
ADD_METHOD("gpt-3.5-turbo-stream-Cromicle", FreeGpt::cromicle);
ADD_METHOD("gpt-4-stream-Chatgpt4Online", FreeGpt::chatGpt4Online); ADD_METHOD("gpt-4-stream-Chatgpt4Online", FreeGpt::chatGpt4Online);
ADD_METHOD("gpt-3.5-turbo-stream-gptalk", FreeGpt::gptalk); ADD_METHOD("gpt-3.5-turbo-stream-gptalk", FreeGpt::gptalk);
// ADD_METHOD("gpt-3.5-turbo-Aichat", FreeGpt::aiChat); ADD_METHOD("gpt-3.5-turbo-stream-ChatForAi", FreeGpt::chatForAi);
// ADD_METHOD("gpt-3.5-turbo-stream-ChatForAi", FreeGpt::chatForAi); ADD_METHOD("gpt-3.5-turbo-stream-gptforlove", FreeGpt::gptForLove);
// ADD_METHOD("gpt-3.5-turbo-stream-AItianhuSpace", FreeGpt::aiTianhuSpace); ADD_METHOD("gpt-3.5-turbo-stream-ChatgptDemo", FreeGpt::chatGptDemo);
// ADD_METHOD("gpt-3.5-turbo-AItianhu", FreeGpt::aiTianhu);
// ADD_METHOD("gpt-3.5-turbo-acytoo", FreeGpt::acytoo); SPDLOG_INFO("active provider:");
for (auto& [provider, _] : gpt_function)
SPDLOG_INFO(" {}", provider);
SPDLOG_INFO("\n{}", yaml_cpp_struct::yaml_to_json(yaml_cfg_str.value()).dump(2));
std::cout << "\033[32m"
<< "GitHub: https://github.com/fantasy-peak/cpp-freegpt-webui"
<< "\033[0m" << std::endl;
IoContextPool pool{cfg.work_thread_num}; IoContextPool pool{cfg.work_thread_num};
pool.start(); pool.start();

View File

@ -0,0 +1,25 @@
FROM ubuntu:23.04
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
RUN apt-get update -y
RUN apt-get install -y python3/lunar python3.11-venv dbus-x11/lunar curl nodejs/lunar tree
# install Chrome
# https://stackoverflow.com/questions/70955307/how-to-install-google-chrome-in-a-docker-container
RUN curl -LO https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
RUN apt-get install -y ./google-chrome-stable_current_amd64.deb
RUN rm google-chrome-stable_current_amd64.deb
# Check chrome version
RUN echo "Chrome: " && google-chrome --version
WORKDIR /app
COPY ./npm /app/npm
ADD requirements.txt /app
ADD zeus.py /app
RUN python3 -m venv venv \
&& source venv/bin/activate \
&& pip install -r requirements.txt
ENTRYPOINT ["sh", "-c", "source venv/bin/activate && python3 zeus.py"]

View File

@ -0,0 +1,261 @@
# crypto-js [![Build Status](https://travis-ci.org/brix/crypto-js.svg?branch=develop)](https://travis-ci.org/brix/crypto-js)
JavaScript library of crypto standards.
## Node.js (Install)
Requirements:
- Node.js
- npm (Node.js package manager)
```bash
npm install crypto-js
```
### Usage
ES6 import for typical API call signing use case:
```javascript
import sha256 from 'crypto-js/sha256';
import hmacSHA512 from 'crypto-js/hmac-sha512';
import Base64 from 'crypto-js/enc-base64';
const message, nonce, path, privateKey; // ...
const hashDigest = sha256(nonce + message);
const hmacDigest = Base64.stringify(hmacSHA512(path + hashDigest, privateKey));
```
Modular include:
```javascript
var AES = require("crypto-js/aes");
var SHA256 = require("crypto-js/sha256");
...
console.log(SHA256("Message"));
```
Including all libraries, for access to extra methods:
```javascript
var CryptoJS = require("crypto-js");
console.log(CryptoJS.HmacSHA1("Message", "Key"));
```
## Client (browser)
Requirements:
- Node.js
- Bower (package manager for frontend)
```bash
bower install crypto-js
```
### Usage
Modular include:
```javascript
require.config({
packages: [
{
name: 'crypto-js',
location: 'path-to/bower_components/crypto-js',
main: 'index'
}
]
});
require(["crypto-js/aes", "crypto-js/sha256"], function (AES, SHA256) {
console.log(SHA256("Message"));
});
```
Including all libraries, for access to extra methods:
```javascript
// Above-mentioned will work or use this simple form
require.config({
paths: {
'crypto-js': 'path-to/bower_components/crypto-js/crypto-js'
}
});
require(["crypto-js"], function (CryptoJS) {
console.log(CryptoJS.HmacSHA1("Message", "Key"));
});
```
### Usage without RequireJS
```html
<script type="text/javascript" src="path-to/bower_components/crypto-js/crypto-js.js"></script>
<script type="text/javascript">
var encrypted = CryptoJS.AES(...);
var encrypted = CryptoJS.SHA256(...);
</script>
```
## API
See: https://cryptojs.gitbook.io/docs/
### AES Encryption
#### Plain text encryption
```javascript
var CryptoJS = require("crypto-js");
// Encrypt
var ciphertext = CryptoJS.AES.encrypt('my message', 'secret key 123').toString();
// Decrypt
var bytes = CryptoJS.AES.decrypt(ciphertext, 'secret key 123');
var originalText = bytes.toString(CryptoJS.enc.Utf8);
console.log(originalText); // 'my message'
```
#### Object encryption
```javascript
var CryptoJS = require("crypto-js");
var data = [{id: 1}, {id: 2}]
// Encrypt
var ciphertext = CryptoJS.AES.encrypt(JSON.stringify(data), 'secret key 123').toString();
// Decrypt
var bytes = CryptoJS.AES.decrypt(ciphertext, 'secret key 123');
var decryptedData = JSON.parse(bytes.toString(CryptoJS.enc.Utf8));
console.log(decryptedData); // [{id: 1}, {id: 2}]
```
### List of modules
- ```crypto-js/core```
- ```crypto-js/x64-core```
- ```crypto-js/lib-typedarrays```
---
- ```crypto-js/md5```
- ```crypto-js/sha1```
- ```crypto-js/sha256```
- ```crypto-js/sha224```
- ```crypto-js/sha512```
- ```crypto-js/sha384```
- ```crypto-js/sha3```
- ```crypto-js/ripemd160```
---
- ```crypto-js/hmac-md5```
- ```crypto-js/hmac-sha1```
- ```crypto-js/hmac-sha256```
- ```crypto-js/hmac-sha224```
- ```crypto-js/hmac-sha512```
- ```crypto-js/hmac-sha384```
- ```crypto-js/hmac-sha3```
- ```crypto-js/hmac-ripemd160```
---
- ```crypto-js/pbkdf2```
---
- ```crypto-js/aes```
- ```crypto-js/tripledes```
- ```crypto-js/rc4```
- ```crypto-js/rabbit```
- ```crypto-js/rabbit-legacy```
- ```crypto-js/evpkdf```
---
- ```crypto-js/format-openssl```
- ```crypto-js/format-hex```
---
- ```crypto-js/enc-latin1```
- ```crypto-js/enc-utf8```
- ```crypto-js/enc-hex```
- ```crypto-js/enc-utf16```
- ```crypto-js/enc-base64```
---
- ```crypto-js/mode-cfb```
- ```crypto-js/mode-ctr```
- ```crypto-js/mode-ctr-gladman```
- ```crypto-js/mode-ofb```
- ```crypto-js/mode-ecb```
---
- ```crypto-js/pad-pkcs7```
- ```crypto-js/pad-ansix923```
- ```crypto-js/pad-iso10126```
- ```crypto-js/pad-iso97971```
- ```crypto-js/pad-zeropadding```
- ```crypto-js/pad-nopadding```
## Release notes
### 4.1.1
Fix module order in bundled release.
Include the browser field in the released package.json.
### 4.1.0
Added url safe variant of base64 encoding. [357](https://github.com/brix/crypto-js/pull/357)
Avoid webpack to add crypto-browser package. [364](https://github.com/brix/crypto-js/pull/364)
### 4.0.0
This is an update including breaking changes for some environments.
In this version `Math.random()` has been replaced by the random methods of the native crypto module.
For this reason CryptoJS might not run in some JavaScript environments without native crypto module. Such as IE 10 or before or React Native.
### 3.3.0
Rollback, `3.3.0` is the same as `3.1.9-1`.
The move of using native secure crypto module will be shifted to a new `4.x.x` version. As it is a breaking change the impact is too big for a minor release.
### 3.2.1
The usage of the native crypto module has been fixed. The import and access of the native crypto module has been improved.
### 3.2.0
In this version `Math.random()` has been replaced by the random methods of the native crypto module.
For this reason CryptoJS might does not run in some JavaScript environments without native crypto module. Such as IE 10 or before.
If it's absolute required to run CryptoJS in such an environment, stay with `3.1.x` version. Encrypting and decrypting stays compatible. But keep in mind `3.1.x` versions still use `Math.random()` which is cryptographically not secure, as it's not random enough.
This version came along with `CRITICAL` `BUG`.
DO NOT USE THIS VERSION! Please, go for a newer version!
### 3.1.x
The `3.1.x` are based on the original CryptoJS, wrapped in CommonJS modules.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
altgraph==0.17.4
attrs==23.1.0
black==23.9.1
blinker==1.6.2
browser-cookie3==0.19.1
certifi==2023.7.22
charset-normalizer==3.3.0
click==8.1.7
docopt==0.6.2
Flask==3.0.0
Flask-Cors==4.0.0
h11==0.14.0
idna==3.4
itsdangerous==2.1.2
jeepney==0.8.0
Jinja2==3.1.2
Js2Py==0.74
lz4==4.3.2
MarkupSafe==2.1.3
mypy-extensions==1.0.0
outcome==1.2.0
packaging==23.2
pathspec==0.11.2
pipreqs==0.4.13
platformdirs==3.11.0
pycryptodomex==3.19.0
PyExecJS==1.5.1
pyinstaller==6.0.0
pyinstaller-hooks-contrib==2023.9
pyjsparser==2.7.1
PySocks==1.7.1
requests==2.31.0
selenium==4.13.0
six==1.16.0
sniffio==1.3.0
sortedcontainers==2.4.0
trio==0.22.2
trio-websocket==0.11.1
typing_extensions==4.8.0
tzlocal==5.1
urllib3==2.0.6
Werkzeug==3.0.0
wsproto==1.2.0
yarg==0.1.9

View File

@ -0,0 +1,65 @@
import json
import os
import threading
import time
import traceback
import execjs
from flask import Flask
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from werkzeug.serving import ThreadedWSGIServer
app = Flask(__name__)
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-extensions")
options.add_argument("--disable-gpu")
options.add_argument("--disable-dev-shm-usage")
def deepai_refresh():
while True:
driver = webdriver.Chrome(options=options)
try:
driver.get("https://deepai.org")
WebDriverWait(driver, 15)
cookies = driver.get_cookies()
print(cookies)
except Exception:
traceback.print_exc()
driver.quit()
time.sleep(600)
# curl -X POST -d '{}' -H "Content-Type: application/json" http://127.0.0.1:8860/gptforlove
@app.route("/gptforlove", methods=["POST"])
def get_gptforlove_secret():
dir = os.path.dirname(__file__)
dir += "/npm/node_modules/crypto-js"
source = """
CryptoJS = require('{dir}/crypto-js')
var k = '14487141bvirvvG'
, e = Math.floor(new Date().getTime() / 1e3);
var t = CryptoJS.enc.Utf8.parse(e)
, o = CryptoJS.AES.encrypt(t, k, {
mode: CryptoJS.mode.ECB,
padding: CryptoJS.pad.Pkcs7
});
return o.toString()
"""
source = source.replace("{dir}", dir)
dict = {"secret": execjs.compile(source).call("")}
return json.dumps(dict)
if __name__ == "__main__":
thread = threading.Thread(target=deepai_refresh)
thread.start()
port = os.getenv("PORT", "8860")
ip = os.getenv("IP", "0.0.0.0")
print(f"start zeus at {ip}:{port}")
server = ThreadedWSGIServer(ip, port, app)
server.serve_forever()

View File

@ -1,4 +1,4 @@
# Telegram bot. Redirects to bot-service # Redirects to bot-service
:8083 { :8083 {
reverse_proxy bot_service:8000 reverse_proxy bot_service:8000
header Strict-Transport-Security max-age=31536000; header Strict-Transport-Security max-age=31536000;
@ -7,9 +7,9 @@
header -Server header -Server
} }
# Telegram bot. Redirects to test-server ip # Redirects to chat service
:8084 { :8084 {
reverse_proxy chat_service:8858 reverse_proxy chatgpt_chat_service:8858
header Strict-Transport-Security max-age=31536000; header Strict-Transport-Security max-age=31536000;
# Removing some headers for improved security: # Removing some headers for improved security:

View File

@ -1,9 +1,9 @@
version: '3.9' version: '3.9'
networks: networks:
chat-gpt-network: chatgpt-network:
name: name:
"chat_gpt_network" "chatgpt_network"
ipam: ipam:
config: config:
- subnet: 200.20.0.0/24 - subnet: 200.20.0.0/24
@ -11,8 +11,8 @@ networks:
services: services:
bot: bot:
image: "chat_gpt_bot_service:latest" image: "chatgpt_bot_service:latest"
container_name: "chat_gpt_bot_service" container_name: "chatgpt_bot_service"
hostname: "bot_service" hostname: "bot_service"
build: build:
context: . context: .
@ -27,35 +27,35 @@ services:
- ./bot_microservice/settings:/app/settings:ro - ./bot_microservice/settings:/app/settings:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
networks: networks:
chat-gpt-network: chatgpt-network:
ipv4_address: 200.20.0.10 ipv4_address: 200.20.0.10
expose: expose:
- "8000" - "8000"
command: bash start-bot.sh command: bash start-bot.sh
chat-gpt: chatgpt_chat:
image: "balshdocker/freegpt:latest" image: "balshdocker/freegpt:latest"
container_name: "chat_gpt_chat_service" container_name: "chatgpt_chat_service"
hostname: "chat_service" hostname: "chatgpt_chat_service"
restart: unless-stopped restart: unless-stopped
environment: environment:
CHAT_PATH: "/gpt/chat" CHAT_PATH: "/gpt/chat"
networks: networks:
chat-gpt-network: chatgpt-network:
ipv4_address: 200.20.0.11 ipv4_address: 200.20.0.11
expose: expose:
- "8858" - "8858"
caddy: caddy:
image: "caddy:2.7.4" image: "caddy:2.7.4"
container_name: "chat_gpt_caddy_service" container_name: "chatgpt_caddy_service"
hostname: "caddy_service" hostname: "caddy_service"
restart: unless-stopped restart: unless-stopped
ports: ports:
- '8083:8083' - "8083:8083"
- '8084:8084' - "8084:8084"
volumes: volumes:
- ./deploy/Caddyfile:/etc/caddy/Caddyfile:ro - ./deploy/Caddyfile:/etc/caddy/Caddyfile:ro
networks: networks:
chat-gpt-network: chatgpt-network:
ipv4_address: 200.20.0.12 ipv4_address: 200.20.0.12