From e9f76d0ea9fe7e401f75da168d502b9b09dbb231 Mon Sep 17 00:00:00 2001 From: Dmitry Afanasyev <71835315+Balshgit@users.noreply.github.com> Date: Tue, 10 Oct 2023 23:22:41 +0300 Subject: [PATCH] update chat service (#31) * rename chatgpt service * add zeus tool for new provider * add zeus tool for new provider * update chat service * update README.md --- README.md | 3 +- bot_microservice/api/deps.py | 6 +- bot_microservice/constants.py | 18 +- bot_microservice/core/bot/commands.py | 8 +- bot_microservice/core/bot/repository.py | 20 +- bot_microservice/core/bot/services.py | 8 +- bot_microservice/core/utils.py | 24 +- bot_microservice/infra/logging_conf.py | 3 +- bot_microservice/main.py | 2 +- bot_microservice/settings/.env.template | 6 +- bot_microservice/settings/config.py | 36 +- .../tests/integration/bot/test_bot_api.py | 7 +- bot_microservice/tests/integration/utils.py | 4 +- .../.github/workflows/build.yaml | 15 +- chatgpt_microservice/README.md | 21 +- chatgpt_microservice/client/css/style.css | 144 +- chatgpt_microservice/client/html/index.html | 7 +- chatgpt_microservice/deprecated/free_gpt.cpp | 774 +++ chatgpt_microservice/git-clang-format.py | 240 +- chatgpt_microservice/include/cfg.h | 3 +- chatgpt_microservice/include/free_gpt.h | 8 +- chatgpt_microservice/src/free_gpt.cpp | 1064 +-- chatgpt_microservice/src/main.cpp | 25 +- chatgpt_microservice/tools/Dockerfile | 25 + .../npm/node_modules/crypto-js/README.md | 261 + .../npm/node_modules/crypto-js/crypto-js.js | 6191 +++++++++++++++++ chatgpt_microservice/tools/requirements.txt | 44 + chatgpt_microservice/tools/zeus.py | 65 + deploy/Caddyfile | 6 +- docker-compose.yml | 26 +- 30 files changed, 8095 insertions(+), 969 deletions(-) create mode 100644 chatgpt_microservice/tools/Dockerfile create mode 100644 chatgpt_microservice/tools/npm/node_modules/crypto-js/README.md create mode 100644 chatgpt_microservice/tools/npm/node_modules/crypto-js/crypto-js.js create mode 100644 chatgpt_microservice/tools/requirements.txt create mode 100644 chatgpt_microservice/tools/zeus.py diff --git a/README.md b/README.md index 77e8b0d..e61ec22 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,8 @@ methods: ## Chat: ```shell -docker run -p 8858:8858 -it --name freegpt --rm -e CHAT_PATH=/chat balshdocker/freegpt:latest +docker run --rm --net=host --name freegpt --rm -e CHAT_PATH=/chat balshdocker/freegpt:latest +docker run --rm --net=host --name zeus --rm balshdocker/freegpt-zeus:latest ``` Open http://localhost:8858/chat/ diff --git a/bot_microservice/api/deps.py b/bot_microservice/api/deps.py index 3943dda..90fb35b 100644 --- a/bot_microservice/api/deps.py +++ b/bot_microservice/api/deps.py @@ -30,7 +30,7 @@ def get_database(settings: AppSettings = Depends(get_settings)) -> Database: return Database(settings=settings) -def get_chat_gpt_repository( +def get_chatgpt_repository( db: Database = Depends(get_database), settings: AppSettings = Depends(get_settings) ) -> ChatGPTRepository: return ChatGPTRepository(settings=settings, db=db) @@ -41,6 +41,6 @@ def new_bot_queue(bot_app: BotApplication = Depends(get_bot_app)) -> BotQueue: def get_chatgpt_service( - chat_gpt_repository: ChatGPTRepository = Depends(get_chat_gpt_repository), + chatgpt_repository: ChatGPTRepository = Depends(get_chatgpt_repository), ) -> ChatGptService: - return ChatGptService(repository=chat_gpt_repository) + return ChatGptService(repository=chatgpt_repository) diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 9c6c7d4..0f93633 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -3,7 +3,7 @@ from enum import StrEnum, unique AUDIO_SEGMENT_DURATION = 120 * 1000 API_PREFIX = "/api" -CHAT_GPT_BASE_URI = "/backend-api/v2/conversation" +CHATGPT_BASE_URI = "/backend-api/v2/conversation" INVALID_GPT_REQUEST_MESSAGES = ("Invalid request model", "return unexpected http status code") @@ -31,16 +31,12 @@ class LogLevelEnum(StrEnum): @unique class ChatGptModelsEnum(StrEnum): gpt_3_5_turbo_stream_openai = "gpt-3.5-turbo-stream-openai" - gpt_3_5_turbo_Aichat = "gpt-3.5-turbo-Aichat" gpt_4_ChatgptAi = "gpt-4-ChatgptAi" gpt_3_5_turbo_weWordle = "gpt-3.5-turbo-weWordle" - gpt_3_5_turbo_acytoo = "gpt-3.5-turbo-acytoo" gpt_3_5_turbo_stream_DeepAi = "gpt-3.5-turbo-stream-DeepAi" - gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o" gpt_3_5_turbo_stream_yqcloud = "gpt-3.5-turbo-stream-yqcloud" gpt_OpenAssistant_stream_HuggingChat = "gpt-OpenAssistant-stream-HuggingChat" gpt_4_turbo_stream_you = "gpt-4-turbo-stream-you" - gpt_3_5_turbo_AItianhu = "gpt-3.5-turbo-AItianhu" gpt_3_stream_binjie = "gpt-3-stream-binjie" gpt_3_5_turbo_stream_CodeLinkAva = "gpt-3.5-turbo-stream-CodeLinkAva" gpt_4_stream_ChatBase = "gpt-4-stream-ChatBase" @@ -48,14 +44,15 @@ class ChatGptModelsEnum(StrEnum): gpt_3_5_turbo_16k_stream_Ylokh = "gpt-3.5-turbo-16k-stream-Ylokh" gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum" gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo" - gpt_3_5_turbo_stream_AItianhuSpace = "gpt-3.5-turbo-stream-AItianhuSpace" gpt_3_5_turbo_stream_Aibn = "gpt-3.5-turbo-stream-Aibn" gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo" gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt" - gpt_3_5_turbo_stream_ChatForAi = "gpt-3.5-turbo-stream-ChatForAi" gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle" gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online" gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk" + gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo" + gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o" + gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove" @classmethod def values(cls) -> set[str]: @@ -64,9 +61,6 @@ class ChatGptModelsEnum(StrEnum): @staticmethod def _deprecated() -> set[str]: return { - "gpt-3.5-turbo-Aichat", - "gpt-3.5-turbo-stream-ChatForAi", - "gpt-3.5-turbo-stream-AItianhuSpace", - "gpt-3.5-turbo-AItianhu", - "gpt-3.5-turbo-acytoo", + "gpt-3.5-turbo-stream-H2o", + "gpt-3.5-turbo-stream-gptforlove", } diff --git a/bot_microservice/core/bot/commands.py b/bot_microservice/core/bot/commands.py index 683ffe9..0384862 100644 --- a/bot_microservice/core/bot/commands.py +++ b/bot_microservice/core/bot/commands.py @@ -32,8 +32,8 @@ async def about_me(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: async def about_bot(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: if not update.effective_message: return None - chat_gpt_service = ChatGptService.build() - model = await chat_gpt_service.get_current_chatgpt_model() + chatgpt_service = ChatGptService.build() + model = await chatgpt_service.get_current_chatgpt_model() await update.effective_message.reply_text( f"Бот использует бесплатную модель {model} для ответов на вопросы. " f"\nПринимает запросы на разных языках.\n\nБот так же умеет переводить русские голосовые сообщения в текст. " @@ -69,9 +69,9 @@ async def ask_question(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No await update.message.reply_text("Пожалуйста подождите, ответ в среднем занимает 10-15 секунд") - chat_gpt_service = ChatGptService.build() + chatgpt_service = ChatGptService.build() logger.warning("question asked", user=update.message.from_user, question=update.message.text) - answer = await chat_gpt_service.request_to_chatgpt(question=update.message.text) + answer = await chatgpt_service.request_to_chatgpt(question=update.message.text) await update.message.reply_text(answer) diff --git a/bot_microservice/core/bot/repository.py b/bot_microservice/core/bot/repository.py index c1ba803..589b659 100644 --- a/bot_microservice/core/bot/repository.py +++ b/bot_microservice/core/bot/repository.py @@ -9,7 +9,7 @@ from loguru import logger from sqlalchemy import delete, desc, select, update from sqlalchemy.dialects.sqlite import insert -from constants import CHAT_GPT_BASE_URI, INVALID_GPT_REQUEST_MESSAGES +from constants import CHATGPT_BASE_URI, INVALID_GPT_REQUEST_MESSAGES from core.bot.models.chat_gpt import ChatGpt from infra.database.db_adapter import Database from settings.config import AppSettings @@ -64,14 +64,14 @@ class ChatGPTRepository: result = await session.execute(query) return result.scalar_one() - async def ask_question(self, question: str, chat_gpt_model: str) -> str: + async def ask_question(self, question: str, chatgpt_model: str) -> str: try: - response = await self.request_to_chatgpt_microservice(question=question, chat_gpt_model=chat_gpt_model) + response = await self.request_to_chatgpt_microservice(question=question, chatgpt_model=chatgpt_model) status = response.status_code for message in INVALID_GPT_REQUEST_MESSAGES: if message in response.text: - message = f"{message}: {chat_gpt_model}" - logger.info(message, question=question, chat_gpt_model=chat_gpt_model) + message = f"{message}: {chatgpt_model}" + logger.info(message, question=question, chatgpt_model=chatgpt_model) return message if status != httpx.codes.OK: logger.info(f"got response status: {status} from chat api", response.text) @@ -81,19 +81,19 @@ class ChatGPTRepository: logger.error("error get data from chat api", error=error) return "Вообще всё сломалось :(" - async def request_to_chatgpt_microservice(self, question: str, chat_gpt_model: str) -> Response: - data = self._build_request_data(question=question, chat_gpt_model=chat_gpt_model) + async def request_to_chatgpt_microservice(self, question: str, chatgpt_model: str) -> Response: + data = self._build_request_data(question=question, chatgpt_model=chatgpt_model) transport = AsyncHTTPTransport(retries=3) async with AsyncClient(base_url=self.settings.GPT_BASE_HOST, transport=transport, timeout=50) as client: - return await client.post(CHAT_GPT_BASE_URI, json=data, timeout=50) + return await client.post(CHATGPT_BASE_URI, json=data, timeout=50) @staticmethod - def _build_request_data(*, question: str, chat_gpt_model: str) -> dict[str, Any]: + def _build_request_data(*, question: str, chatgpt_model: str) -> dict[str, Any]: return { "conversation_id": str(uuid4()), "action": "_ask", - "model": chat_gpt_model, + "model": chatgpt_model, "jailbreak": "default", "meta": { "id": random.randint(10**18, 10**19 - 1), # noqa: S311 diff --git a/bot_microservice/core/bot/services.py b/bot_microservice/core/bot/services.py index 2ef4307..edf7f68 100644 --- a/bot_microservice/core/bot/services.py +++ b/bot_microservice/core/bot/services.py @@ -96,12 +96,12 @@ class ChatGptService: async def request_to_chatgpt(self, question: str | None) -> str: question = question or "Привет!" - chat_gpt_model = await self.get_current_chatgpt_model() - return await self.repository.ask_question(question=question, chat_gpt_model=chat_gpt_model) + chatgpt_model = await self.get_current_chatgpt_model() + return await self.repository.ask_question(question=question, chatgpt_model=chatgpt_model) async def request_to_chatgpt_microservice(self, question: str) -> Response: - chat_gpt_model = await self.get_current_chatgpt_model() - return await self.repository.request_to_chatgpt_microservice(question=question, chat_gpt_model=chat_gpt_model) + chatgpt_model = await self.get_current_chatgpt_model() + return await self.repository.request_to_chatgpt_microservice(question=question, chatgpt_model=chatgpt_model) async def get_current_chatgpt_model(self) -> str: return await self.repository.get_current_chatgpt_model() diff --git a/bot_microservice/core/utils.py b/bot_microservice/core/utils.py index 548642d..2c97406 100644 --- a/bot_microservice/core/utils.py +++ b/bot_microservice/core/utils.py @@ -1,18 +1,26 @@ from datetime import datetime, timedelta -from functools import lru_cache, wraps +from functools import cache, wraps from inspect import cleandoc -from typing import Any +from typing import Any, Callable -def timed_cache(**timedelta_kwargs: Any) -> Any: - def _wrapper(func: Any) -> Any: - update_delta = timedelta(**timedelta_kwargs) +def timed_lru_cache( + microseconds: int = 0, + milliseconds: int = 0, + seconds: int = 0, + minutes: int = 0, + hours: int = 0, +) -> Any: + def _wrapper(func: Any) -> Callable[[Any], Any]: + update_delta = timedelta( + microseconds=microseconds, milliseconds=milliseconds, seconds=seconds, minutes=minutes, hours=hours + ) next_update = datetime.utcnow() + update_delta - # Apply @lru_cache to f with no cache size limit - cached_func = lru_cache(None)(func) + + cached_func = cache(func) @wraps(func) - def _wrapped(*args: Any, **kwargs: Any) -> Any: + def _wrapped(*args: Any, **kwargs: Any) -> Callable[[Any], Any]: nonlocal next_update now = datetime.utcnow() if now >= next_update: diff --git a/bot_microservice/infra/logging_conf.py b/bot_microservice/infra/logging_conf.py index 190f97d..26511e0 100644 --- a/bot_microservice/infra/logging_conf.py +++ b/bot_microservice/infra/logging_conf.py @@ -105,10 +105,11 @@ def configure_logging( {**base_loguru_handler, "colorize": True, "sink": sys.stdout}, ] - if settings.GRAYLOG_HOST and settings.GRAYLOG_PORT: + if settings.ENABLE_GRAYLOG: graylog_handler = graypy.GELFUDPHandler(settings.GRAYLOG_HOST, settings.GRAYLOG_PORT) base_config_handlers.append(graylog_handler) loguru_handlers.append({**base_loguru_handler, "sink": graylog_handler}) + if log_to_file: file_path = DIR_LOGS / log_to_file if not os.path.exists(log_to_file): diff --git a/bot_microservice/main.py b/bot_microservice/main.py index 981fcce..cd17243 100644 --- a/bot_microservice/main.py +++ b/bot_microservice/main.py @@ -42,7 +42,7 @@ class Application: log_to_file=settings.LOG_TO_FILE, ) - if settings.SENTRY_DSN is not None: + if settings.ENABLE_SENTRY: sentry_sdk.init( dsn=settings.SENTRY_DSN, environment=settings.DEPLOY_ENVIRONMENT, diff --git a/bot_microservice/settings/.env.template b/bot_microservice/settings/.env.template index 0c044fb..ffc4742 100644 --- a/bot_microservice/settings/.env.template +++ b/bot_microservice/settings/.env.template @@ -10,6 +10,7 @@ RELOAD="true" DEBUG="true" # ==== sentry ==== +ENABLE_SENTRY="false" SENTRY_DSN= SENTRY_TRACES_SAMPLE_RATE="0.95" DEPLOY_ENVIRONMENT="stage" @@ -17,8 +18,11 @@ DEPLOY_ENVIRONMENT="stage" # ==== logs ====: ENABLE_JSON_LOGS="true" ENABLE_SENTRY_LOGS="false" + +ENABLE_GRAYLOG="false" GRAYLOG_HOST= GRAYLOG_PORT= + LOG_TO_FILE="example.log" # ==== telegram settings ==== @@ -31,7 +35,7 @@ DOMAIN="https://mydomain.com" URL_PREFIX="/gpt" # ==== gpt settings ==== -GPT_BASE_HOST="http://chat_service:8858" +GPT_BASE_HOST="http://chatgpt_chat_service:8858" # ==== other settings ==== USER="web" diff --git a/bot_microservice/settings/config.py b/bot_microservice/settings/config.py index 247d314..39be065 100644 --- a/bot_microservice/settings/config.py +++ b/bot_microservice/settings/config.py @@ -29,12 +29,36 @@ load_dotenv(env_path, override=True) class SentrySettings(BaseSettings): + ENABLE_SENTRY: bool = False SENTRY_DSN: str | None = None DEPLOY_ENVIRONMENT: str | None = None SENTRY_TRACES_SAMPLE_RATE: float = 0.95 + @model_validator(mode="after") + def validate_sentry_enabled(self) -> "SentrySettings": + if self.ENABLE_SENTRY and not self.SENTRY_DSN: + raise RuntimeError("sentry dsn must be set") + return self -class AppSettings(SentrySettings, BaseSettings): + +class LoggingSettings(BaseSettings): + ENABLE_JSON_LOGS: bool = True + ENABLE_SENTRY_LOGS: bool = False + + ENABLE_GRAYLOG: bool = False + GRAYLOG_HOST: str | None = None + GRAYLOG_PORT: int | None = None + + LOG_TO_FILE: str | None = None + + @model_validator(mode="after") + def validate_graylog_enabled(self) -> "LoggingSettings": + if self.ENABLE_GRAYLOG and not all([self.GRAYLOG_HOST, self.GRAYLOG_PORT]): + raise RuntimeError("graylog host and port must be set") + return self + + +class AppSettings(SentrySettings, LoggingSettings, BaseSettings): """Application settings.""" PROJECT_NAME: str = "chat gpt bot" @@ -58,13 +82,7 @@ class AppSettings(SentrySettings, BaseSettings): # ==== gpt settings ==== GPT_MODEL: str = "gpt-3.5-turbo-stream-DeepAi" - GPT_BASE_HOST: str = "http://chat_service:8858" - - ENABLE_JSON_LOGS: bool = True - ENABLE_SENTRY_LOGS: bool = False - GRAYLOG_HOST: str | None = None - GRAYLOG_PORT: int | None = None - LOG_TO_FILE: str | None = None + GPT_BASE_HOST: str = "http://chathpt_chat_service:8858" @model_validator(mode="before") # type: ignore[arg-type] def validate_boolean_fields(self) -> Any: @@ -75,6 +93,8 @@ class AppSettings(SentrySettings, BaseSettings): "START_WITH_WEBHOOK", "RELOAD", "DEBUG", + "ENABLE_GRAYLOG", + "ENABLE_SENTRY", ): setting_value: str | None = values_dict.get(value) if setting_value and setting_value.lower() == "false": diff --git a/bot_microservice/tests/integration/bot/test_bot_api.py b/bot_microservice/tests/integration/bot/test_bot_api.py index 9b0706d..39de19f 100644 --- a/bot_microservice/tests/integration/bot/test_bot_api.py +++ b/bot_microservice/tests/integration/bot/test_bot_api.py @@ -47,7 +47,7 @@ async def test_get_chatgpt_models( ) -async def test_change_chagpt_model_priority( +async def test_change_chatgpt_model_priority( dbsession: Session, rest_client: AsyncClient, faker: Faker, @@ -61,10 +61,9 @@ async def test_change_chagpt_model_priority( upd_model1, upd_model2 = dbsession.query(ChatGpt).order_by(ChatGpt.priority).all() assert model1.model == upd_model1.model + assert model1.priority == upd_model1.priority assert model2.model == upd_model2.model - - updated_from_db_model = dbsession.get(ChatGpt, model2.id) - assert updated_from_db_model.priority == priority # type: ignore[union-attr] + assert upd_model2.priority == priority async def test_reset_chatgpt_models_priority( diff --git a/bot_microservice/tests/integration/utils.py b/bot_microservice/tests/integration/utils.py index 808ba9e..b952cc0 100644 --- a/bot_microservice/tests/integration/utils.py +++ b/bot_microservice/tests/integration/utils.py @@ -4,7 +4,7 @@ from typing import Any, Iterator import respx from httpx import Response -from constants import CHAT_GPT_BASE_URI +from constants import CHATGPT_BASE_URI @contextmanager @@ -16,7 +16,7 @@ def mocked_ask_question_api( assert_all_called=True, base_url=host, ) as respx_mock: - ask_question_route = respx_mock.post(url=CHAT_GPT_BASE_URI, name="ask_question") + ask_question_route = respx_mock.post(url=CHATGPT_BASE_URI, name="ask_question") ask_question_route.return_value = return_value ask_question_route.side_effect = side_effect yield respx_mock diff --git a/chatgpt_microservice/.github/workflows/build.yaml b/chatgpt_microservice/.github/workflows/build.yaml index 1209042..36e6dea 100644 --- a/chatgpt_microservice/.github/workflows/build.yaml +++ b/chatgpt_microservice/.github/workflows/build.yaml @@ -51,14 +51,15 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build the Docker image to main - if: github.ref_name == 'main' + if: github.ref_name == 'main' && github.repository_owner == 'balshgit' run: | docker build . -t ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest - - - name: Docker image push to dev - if: github.ref_name == 'dev' - run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:dev + cd tools + docker build . -t ${{ secrets.DOCKERHUB_USERNAME }}/freegpt-zeus:latest + cd .. - name: Docker image push main - if: github.ref_name == 'main' - run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest + if: github.ref_name == 'main' && github.repository_owner == 'balshgit' + run: | + docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt:latest + docker push ${{ secrets.DOCKERHUB_USERNAME }}/freegpt-zeus:latest diff --git a/chatgpt_microservice/README.md b/chatgpt_microservice/README.md index 7b2dcb9..705fbb1 100644 --- a/chatgpt_microservice/README.md +++ b/chatgpt_microservice/README.md @@ -7,9 +7,14 @@ This project features a WebUI utilizing the [G4F API](https://github.com/xtekky/gpt4free).
Experience the power of ChatGPT with a user-friendly interface, enhanced jailbreaks, and completely free. + +## Support this repository: + +- ⭐ **Star the project:** Star this. It means a lot to me! 💕 + ## Getting Started :white_check_mark: To get started with this project, you'll need to clone the repository and have g++ >= 13.1 installed on your system. - + ### Cloning the Repository :inbox_tray: Run the following command to clone the repository: @@ -70,6 +75,16 @@ docker run -p 8858:8858 -it --name freegpt -e CHAT_PATH=/chat -e PROVIDERS="[\"g docker run -p 8858:8858 -it --name freegpt -e IP_WHITE_LIST="[\"127.0.0.1\",\"192.168.1.1\"]" fantasypeak/freegpt:latest ``` +### Start the Zeus Service +Zeus is a cpp-freegpt-webui auxiliary service, because some provider needs to perform specific operations such as get cookies and refreshing web pages etc. +If you need to use these specific providers, you need to start it(Zeus Docker) +``` +docker pull fantasypeak/freegpt-zeus:latest +docker run --rm --net=host -it --name zeus fantasypeak/freegpt-zeus:latest +docker pull fantasypeak/freegpt:latest +docker run --rm --net=host -it --name freegpt fantasypeak/freegpt:latest +``` + ### Call OpenAi Api ``` // It supports calling OpenAI's API, but need set API_KEY @@ -84,6 +99,10 @@ The application interface was incorporated from the [chatgpt-clone](https://gith ### API G4F The free GPT-4 API was incorporated from the [GPT4Free](https://github.com/xtekky/gpt4free) repository. +## Star History Chart: + +[![Star History Chart](https://api.star-history.com/svg?repos=fantasy-peak/cpp-freegpt-webui&theme=light)](https://github.com/fantasy-peak/cpp-freegpt-webui/stargazers) + ## Legal Notice This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to diff --git a/chatgpt_microservice/client/css/style.css b/chatgpt_microservice/client/css/style.css index a0327cb..e68b909 100644 --- a/chatgpt_microservice/client/css/style.css +++ b/chatgpt_microservice/client/css/style.css @@ -1,5 +1,36 @@ @import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap"); +.adsbox { + backdrop-filter: blur(20px); + -webkit-backdrop-filter: blur(20px); + background-color: var(--blur-bg); + height: 100%; + width: 100%; + border-radius: var(--border-radius-1); + border: 1px solid var(--blur-border); +} + +.ads { + align-items: center; + margin: auto; + display: flex; + flex-direction: column; + gap: 16px; + max-width: 200px; + padding: var(--section-gap); + overflow: none; + flex-shrink: 0; + display: flex; + flex-direction: column; + justify-content: space-between; +} + +@media screen and (max-width: 728px) { + .ads { + display: none; + } +} + /* :root { --colour-1: #ffffff; --colour-2: #000000; @@ -28,6 +59,7 @@ --blur-border: #84719040; --user-input: #ac87bb; --conversations: #c7a2ff; + --conversations-hover: #c7a2ff4d; } :root { @@ -54,7 +86,7 @@ body { padding: var(--section-gap); background: var(--colour-1); color: var(--colour-3); - min-height: 100vh; + height: 100vh; } .row { @@ -85,10 +117,6 @@ body { .conversation { width: 100%; - min-height: 50%; - height: 100vh; - overflow-y: scroll; - overflow-x: hidden; display: flex; flex-direction: column; gap: 15px; @@ -96,16 +124,16 @@ body { .conversation #messages { width: 100%; + height: 100%; display: flex; flex-direction: column; + overflow: auto; overflow-wrap: break-word; - overflow-y: inherit; - overflow-x: hidden; padding-bottom: 50px; } .conversation .user-input { - max-height: 10vh; + max-height: 200px; } .conversation .user-input input { @@ -150,8 +178,6 @@ body { display: flex; flex-direction: column; gap: 16px; - flex: auto; - min-width: 0; } .conversations .title { @@ -162,10 +188,12 @@ body { .conversations .convo { padding: 8px 12px; display: flex; - gap: 18px; + gap: 10px; align-items: center; user-select: none; justify-content: space-between; + border: 1px dashed var(--conversations); + border-radius: var(--border-radius-1); } .conversations .convo .left { @@ -173,8 +201,6 @@ body { display: flex; align-items: center; gap: 10px; - flex: auto; - min-width: 0; } .conversations i { @@ -185,8 +211,6 @@ body { .convo-title { color: var(--colour-3); font-size: 14px; - overflow: hidden; - text-overflow: ellipsis; } .message { @@ -240,7 +264,6 @@ body { display: flex; flex-direction: column; gap: 18px; - min-width: 0; } .message .content p, @@ -265,8 +288,13 @@ body { cursor: pointer; user-select: none; background: transparent; - border: 1px dashed var(--conversations); + border: 1px solid var(--conversations); border-radius: var(--border-radius-1); + transition: all 0.2s ease; +} + +.new_convo:hover { + box-shadow: inset 0px 0px 20px var(--conversations-hover); } .new_convo span { @@ -274,9 +302,6 @@ body { font-size: 14px; } -.new_convo:hover { - border-style: solid; -} .stop_generating { position: absolute; @@ -388,9 +413,8 @@ input:checked+label:after { } .buttons { - min-height: 10vh; display: flex; - align-items: start; + align-items: center; justify-content: left; width: 100%; } @@ -408,15 +432,6 @@ input:checked+label:after { color: var(--colour-3); } -.disable-scrollbars::-webkit-scrollbar { - background: transparent; /* Chrome/Safari/Webkit */ - width: 0px; -} - -.disable-scrollbars { - scrollbar-width: none; /* Firefox */ - -ms-overflow-style: none; /* IE 10+ */ -} select { -webkit-border-radius: 8px; @@ -474,7 +489,7 @@ select { cursor: pointer; user-select: none; background: transparent; - border: 1px solid #c7a2ff; + border: 1px solid var(--conversations); border-radius: var(--border-radius-1); width: 100%; } @@ -491,6 +506,7 @@ select { overflow: auto; } + #cursor { line-height: 17px; margin-left: 3px; @@ -597,16 +613,14 @@ ul { } .buttons { - flex-wrap: wrap; - gap: 5px; - padding-bottom: 10vh; - margin-bottom: 10vh; -} + align-items: flex-start; + flex-wrap: wrap; + gap: 15px; + } .field { - min-height: 5%; - width: fit-content; -} + width: fit-content; + } .mobile-sidebar { display: flex !important; @@ -743,7 +757,7 @@ a:-webkit-any-link { } .color-picker input[type="radio"]#pink { - --radio-color: pink; + --radio-color: white; } .color-picker input[type="radio"]#blue { @@ -759,10 +773,18 @@ a:-webkit-any-link { } .pink { - --colour-1: hsl(310 50% 90%); - --clr-card-bg: hsl(310 50% 100%); - --colour-3: hsl(310 50% 15%); - --conversations: hsl(310 50% 25%); + --colour-1: #ffffff; + --colour-2: #000000; + --colour-3: #000000; + --colour-4: #000000; + --colour-5: #000000; + --colour-6: #000000; + + --accent: #ffffff; + --blur-bg: #98989866; + --blur-border: #00000040; + --user-input: #000000; + --conversations: #000000; } .blue { @@ -787,10 +809,18 @@ a:-webkit-any-link { } :root:has(#pink:checked) { - --colour-1: hsl(310 50% 90%); - --clr-card-bg: hsl(310 50% 100%); - --colour-3: hsl(310 50% 15%); - --conversations: hsl(310 50% 25%); + --colour-1: #ffffff; + --colour-2: #000000; + --colour-3: #000000; + --colour-4: #000000; + --colour-5: #000000; + --colour-6: #000000; + + --accent: #ffffff; + --blur-bg: #98989866; + --blur-border: #00000040; + --user-input: #000000; + --conversations: #000000; } :root:has(#blue:checked) { @@ -814,8 +844,18 @@ a:-webkit-any-link { --conversations: hsl(209 50% 80%); } -.trash-icon { - position: absolute; +#send-button { + border: 1px dashed #e4d4ffa6; + border-radius: 4px; + cursor: pointer; + padding-left: 8px; + padding-right: 5px; + padding-top: 2px; + padding-bottom: 2px; top: 20px; - right: 20px; + left: 8px; } + +#send-button:hover { + border: 1px solid #e4d4ffc9; +} \ No newline at end of file diff --git a/chatgpt_microservice/client/html/index.html b/chatgpt_microservice/client/html/index.html index 42a9317..7e1641c 100644 --- a/chatgpt_microservice/client/html/index.html +++ b/chatgpt_microservice/client/html/index.html @@ -77,10 +77,9 @@ Clear Conversations
- - By: Balsh
- Version: 0.0.7
- Release: 2023-09-28
+ + github: Balshgit
+ leave a star :)
diff --git a/chatgpt_microservice/deprecated/free_gpt.cpp b/chatgpt_microservice/deprecated/free_gpt.cpp index 5d8656c..6d09082 100644 --- a/chatgpt_microservice/deprecated/free_gpt.cpp +++ b/chatgpt_microservice/deprecated/free_gpt.cpp @@ -153,3 +153,777 @@ create_client: }); co_return; } + +boost::asio::awaitable FreeGpt::gptgod(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + boost::system::error_code err{}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + auto generate_token_hex = [](int32_t length) { + std::random_device rd; + std::stringstream ss; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(0, 15); + for (int i = 0; i < length; ++i) + ss << std::hex << dis(gen); + std::string token = ss.str(); + token = std::string(length * 2 - token.length(), '0') + token; + return token; + }; + + CURLcode res; + int32_t response_code; + + struct Input { + std::shared_ptr ch; + std::string recv; + }; + Input input{ch}; + + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, error_info); + co_return; + } + auto url = std::format("https://gptgod.site/api/session/free/gpt3p5?content={}&id={}", urlEncode(prompt), + generate_token_hex(16)); + curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); + + if (!m_cfg.http_proxy.empty()) + curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); + curlEasySetopt(curl); + + auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [ch, recv] = *input_ptr; + recv.append(data); + auto remove_quotes = [](const std::string& str) { + std::string result = str; + if (result.size() >= 2 && result.front() == '"' && result.back() == '"') { + result.erase(0, 1); + result.erase(result.size() - 1); + } + return result; + }; + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.starts_with("data: ")) + continue; + msg.erase(0, 6); + boost::system::error_code err{}; + msg = remove_quotes(msg); + if (msg.empty()) + continue; + boost::asio::post(ch->get_executor(), [=, content = std::move(msg)] { ch->try_send(err, content); }); + } + return size * nmemb; + }; + size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + headers = curl_slist_append(headers, "Referer: https://gptgod.site/"); + headers = curl_slist_append(headers, "Alt-Used: gptgod.site"); + headers = curl_slist_append(headers, "Accept: text/event-stream"); + uint64_t timestamp = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + auto auth_timestamp = std::format("x-auth-timestamp: {}", timestamp); + headers = curl_slist_append(headers, auth_timestamp.c_str()); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + if (res != CURLE_OK) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); + ch->try_send(err, error_info); + co_return; + } + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("gptgod http code:{}", response_code)); + co_return; + } +} + +boost::asio::awaitable FreeGpt::aiChat(std::shared_ptr ch, nlohmann::json json) { + ScopeExit auto_exit{[&] { ch->close(); }}; + boost::system::error_code err{}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + constexpr std::string_view host = "chat-gpt.org"; + constexpr std::string_view port = "443"; + + boost::beast::http::request req{boost::beast::http::verb::post, "/api/text", 11}; + req.set(boost::beast::http::field::host, host); + req.set("authority", "chat-gpt.org"); + req.set("accept", "*/*"); + req.set("cache-control", "no-cache"); + req.set(boost::beast::http::field::content_type, "application/json"); + req.set(boost::beast::http::field::origin, "https://chat-gpt.org"); + req.set("pragma", "no-cache"); + req.set(boost::beast::http::field::referer, "https://chat-gpt.org/chat"); + req.set("sec-ch-ua-mobile", "?0"); + req.set("sec-ch-ua-platform", R"("macOS")"); + req.set("sec-fetch-dest", "empty"); + req.set("sec-fetch-mode", "cors"); + req.set("sec-fetch-site", "same-origin"); + req.set( + boost::beast::http::field::user_agent, + R"(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36)"); + + nlohmann::json data{ + {"message", std::format("user: {}\nassistant:", prompt)}, + {"temperature", 0.5}, + {"presence_penalty", 0}, + {"top_p", 1}, + {"frequency_penalty", 0}, + }; + req.body() = data.dump(); + req.prepare_payload(); + + auto ret = co_await sendRequestRecvResponse(req, host, port, std::bind_front(&FreeGpt::createHttpClient, *this)); + if (!ret.has_value()) { + co_await ch->async_send(err, ret.error(), use_nothrow_awaitable); + co_return; + } + auto& [res, ctx, stream_] = ret.value(); + if (boost::beast::http::status::ok != res.result()) { + SPDLOG_ERROR("http status code: {}", res.result_int()); + co_await ch->async_send(err, res.reason(), use_nothrow_awaitable); + co_return; + } + + nlohmann::json rsp = nlohmann::json::parse(res.body(), nullptr, false); + if (rsp.is_discarded()) { + SPDLOG_ERROR("json parse error"); + co_await ch->async_send(err, "json parse error", use_nothrow_awaitable); + co_return; + } + SPDLOG_INFO("rsp: {}", rsp.dump()); + co_await ch->async_send(err, rsp.value("message", rsp.dump()), use_nothrow_awaitable); + co_return; +} + +boost::asio::awaitable FreeGpt::aiTianhuSpace(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + + boost::system::error_code err{}; + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + CURLcode res; + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, error_info); + co_return; + } + auto random = [](int len) { + static std::string chars{"abcdefghijklmnopqrstuvwxyz0123456789"}; + static std::string letter{"abcdefghijklmnopqrstuvwxyz"}; + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(0, 1000000); + std::string random_string; + random_string += chars[dis(gen) % letter.length()]; + len = len - 1; + for (int i = 0; i < len; i++) + random_string += chars[dis(gen) % chars.length()]; + return random_string; + }; + auto url = std::format("https://{}.aitianhu.space/api/chat-process", random(6)); + SPDLOG_INFO("url: [{}]", url); + curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); + if (!m_cfg.http_proxy.empty()) + curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); + + struct Input { + std::shared_ptr ch; + std::string recv; + }; + Input input{ch}; + auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + boost::system::error_code err{}; + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [ch, recv] = *input_ptr; + recv.append(data); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.contains("content")) + continue; + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", msg); + boost::asio::post(ch->get_executor(), + [=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); }); + continue; + } + auto str = line_json["detail"]["choices"][0]["delta"]["content"].get(); + if (!str.empty()) + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); }); + } + return size * nmemb; + }; + size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; + curlEasySetopt(curl); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); + + constexpr std::string_view request_str{R"({ + "prompt":"hello", + "options":{}, + "systemMessage":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature":0.8, + "top_p":1 + })"}; + nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); + request["prompt"] = prompt; + auto str = request.dump(); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + + if (res != CURLE_OK) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); + ch->try_send(err, error_info); + co_return; + } + int32_t response_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("you http code:{}", response_code)); + co_return; + } + co_return; +} + +boost::asio::awaitable FreeGpt::aiTianhu(std::shared_ptr ch, nlohmann::json json) { + boost::asio::post(*m_thread_pool_ptr, [=, this] { + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + boost::system::error_code err{}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + CURLcode res; + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, error_info); }); + return; + } + curl_easy_setopt(curl, CURLOPT_URL, "https://www.aitianhu.com/api/chat-process"); + + if (!m_cfg.http_proxy.empty()) + curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); + + auto cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + auto recv_data_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + recv_data_ptr->append(data); + return size * nmemb; + }; + size_t (*fn)(void* contents, size_t size, size_t nmemb, void* userp) = cb; + + std::string recv_data; + curlEasySetopt(curl); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &recv_data); + + constexpr std::string_view json_str = R"({ + "prompt":"hello", + "options":{}, + "systemMessage":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature":0.8, + "top_p":1 + })"; + nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); + + request["prompt"] = prompt; + SPDLOG_INFO("{}", request.dump(2)); + auto str = request.dump(); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + + if (res != CURLE_OK) { + auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, error_info); }); + return; + } + int32_t response_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + boost::asio::post(ch->get_executor(), + [=] { ch->try_send(err, std::format("aiTianhu http code:{}", response_code)); }); + return; + } + auto lines = recv_data | std::views::split('\n') | std::views::transform([](auto&& rng) { + return std::string_view(&*rng.begin(), std::ranges::distance(rng.begin(), rng.end())); + }) | + to>(); + if (lines.empty()) { + SPDLOG_ERROR("lines empty"); + return; + } + nlohmann::json rsp = nlohmann::json::parse(lines.back(), nullptr, false); + if (rsp.is_discarded()) { + SPDLOG_ERROR("json parse error"); + ch->try_send(err, std::format("json parse error: {}", lines.back())); + return; + } + ch->try_send(err, rsp.value("text", rsp.dump())); + return; + }); + co_return; +} + +boost::asio::awaitable FreeGpt::acytoo(std::shared_ptr ch, nlohmann::json json) { + boost::system::error_code err{}; + ScopeExit auto_exit{[&] { ch->close(); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + constexpr std::string_view host = "chat.acytoo.com"; + constexpr std::string_view port = "443"; + + boost::beast::http::request req{boost::beast::http::verb::post, + "/api/completions", 11}; + req.set(boost::beast::http::field::host, host); + req.set( + boost::beast::http::field::user_agent, + R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36)"); + req.set("Accept", "*/*"); + req.set("Accept-Encoding", "gzip, deflate"); + req.set(boost::beast::http::field::content_type, "application/json"); + + constexpr std::string_view json_str = R"({ + "key":"", + "model":"gpt-3.5-turbo", + "messages":[ + { + "role":"user", + "content":"user: hello\nassistant:", + "createdAt":1688518523500 + } + ], + "temperature":0.5, + "password":"" + })"; + nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); + + request["messages"][0]["content"] = std::format("user: {}\nassistant:", prompt); + auto time_now = std::chrono::system_clock::now(); + auto duration_in_ms = std::chrono::duration_cast(time_now.time_since_epoch()); + request["messages"][0]["createdAt"] = duration_in_ms.count(); + SPDLOG_INFO("{}", request.dump(2)); + + req.body() = request.dump(); + req.prepare_payload(); + + auto ret = co_await sendRequestRecvResponse(req, host, port, std::bind_front(&FreeGpt::createHttpClient, *this)); + if (!ret.has_value()) { + co_await ch->async_send(err, ret.error(), use_nothrow_awaitable); + co_return; + } + auto& [res, ctx, stream_] = ret.value(); + if (boost::beast::http::status::ok != res.result()) { + SPDLOG_ERROR("http status code: {}", res.result_int()); + co_await ch->async_send(err, res.reason(), use_nothrow_awaitable); + co_return; + } + auto decompress_value = decompress(res); + if (!decompress_value.has_value()) { + SPDLOG_ERROR("decompress error"); + co_await ch->async_send(err, decompress_value.error(), use_nothrow_awaitable); + co_return; + } + auto& body = decompress_value.value(); + co_await ch->async_send(err, std::move(body), use_nothrow_awaitable); + co_return; +} + +boost::asio::awaitable FreeGpt::cromicle(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + + boost::system::error_code err{}; + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + CURLcode res; + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, error_info); + co_return; + } + curl_easy_setopt(curl, CURLOPT_URL, "https://cromicle.top/chat"); + if (!m_cfg.http_proxy.empty()) + curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); + + struct Input { + std::shared_ptr ch; + std::string recv; + }; + Input input{ch}; + auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + boost::system::error_code err{}; + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [ch, recv] = *input_ptr; + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, data); }); + return size * nmemb; + }; + size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; + curlEasySetopt(curl); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); + + auto generate_signature = [](const std::string& message) { + std::stringstream ss; + ss << "asdap" << message; + std::string data = ss.str(); + + unsigned char digest[SHA256_DIGEST_LENGTH]; + SHA256(reinterpret_cast(data.c_str()), data.length(), digest); + + std::stringstream sha_stream; + for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) { + sha_stream << std::setfill('0') << std::setw(2) << std::hex << static_cast(digest[i]); + } + return sha_stream.str(); + }; + std::string signature = generate_signature(prompt); + + constexpr std::string_view request_str{R"({ + "message": "hello", + "hash": "dda6ea4e1dc215f198084018b1df20cfeafe9fbdfe31d8a350d6917509158d8a", + "token": "asdap" + })"}; + nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); + + request["hash"] = signature; + request["message"] = prompt; + + auto str = request.dump(); + SPDLOG_INFO("request : [{}]", str); + + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + + if (res != CURLE_OK) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); + ch->try_send(err, error_info); + co_return; + } + int32_t response_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("you http code:{}", response_code)); + co_return; + } + co_return; +} + +boost::asio::awaitable FreeGpt::h2o(std::shared_ptr ch, nlohmann::json json) { + boost::system::error_code err{}; + ScopeExit auto_exit{[&] { ch->close(); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + constexpr std::string_view host = "gpt-gm.h2o.ai"; + constexpr std::string_view port = "443"; + + constexpr std::string_view user_agent{ + R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0)"}; + + boost::beast::http::request req_init_cookie{boost::beast::http::verb::get, "/", + 11}; + req_init_cookie.set(boost::beast::http::field::host, host); + req_init_cookie.set(boost::beast::http::field::user_agent, user_agent); + + auto ret = co_await sendRequestRecvResponse(req_init_cookie, host, port, + std::bind_front(&FreeGpt::createHttpClient, *this)); + if (!ret.has_value()) { + co_await ch->async_send(err, ret.error(), use_nothrow_awaitable); + co_return; + } + auto& [response, ctx, stream_] = ret.value(); + if (boost::beast::http::status::ok != response.result()) { + SPDLOG_ERROR("http status code: {}", response.result_int()); + co_await ch->async_send(err, response.reason(), use_nothrow_awaitable); + co_return; + } + auto fields = splitString(response["Set-Cookie"], " "); + if (fields.empty()) { + std::stringstream ss; + ss << response.base(); + SPDLOG_ERROR("get cookie error: {}", ss.str()); + co_await ch->async_send(err, "can't get cookie", use_nothrow_awaitable); + co_return; + } + fields[0].pop_back(); + std::string cookie{std::move(fields[0])}; + SPDLOG_INFO("cookie: {}", cookie); + { + boost::beast::http::request req_init_setting{boost::beast::http::verb::post, + "/settings", 11}; + req_init_setting.set("Cookie", cookie); + req_init_setting.set(boost::beast::http::field::host, host); + req_init_setting.set(boost::beast::http::field::user_agent, user_agent); + req_init_setting.set("Accept", + "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8"); + req_init_setting.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3"); + req_init_setting.set("Content-Type", "application/x-www-form-urlencoded"); + req_init_setting.set("Upgrade-Insecure-Requests", "1"); + req_init_setting.set("Sec-Fetch-Dest", "document"); + req_init_setting.set("Sec-Fetch-Mode", "navigate"); + req_init_setting.set("Sec-Fetch-Site", "same-origin"); + req_init_setting.set("Sec-Fetch-User", "?1"); + req_init_setting.set("Referer", "https://gpt-gm.h2o.ai/r/jGfKSwU"); + + std::stringstream ss1; + ss1 << "ethicsModalAccepted=true&"; + ss1 << "shareConversationsWithModelAuthors=true&"; + ss1 << "ethicsModalAcceptedAt=" + << "&"; + ss1 << "activeModel=h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1&"; + ss1 << "searchEnabled=true"; + + req_init_setting.body() = ss1.str(); + req_init_setting.prepare_payload(); + + auto [ec, count] = co_await boost::beast::http::async_write(stream_, req_init_setting, use_nothrow_awaitable); + if (ec) { + SPDLOG_ERROR("{}", ec.message()); + co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); + co_return; + } + boost::beast::flat_buffer b; + boost::beast::http::response res; + std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable); + if (ec) { + SPDLOG_ERROR("{}", ec.message()); + co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); + co_return; + } + if (res.result_int() != 303) { + std::string reason{res.reason()}; + SPDLOG_ERROR("reason: {}", reason); + co_await ch->async_send( + err, std::format("return unexpected http status code: {}({})", res.result_int(), reason), + use_nothrow_awaitable); + co_return; + } + { + boost::beast::http::request req_init_cookie{boost::beast::http::verb::get, + "/r/jGfKSwU", 11}; + req_init_cookie.set(boost::beast::http::field::host, host); + req_init_cookie.set(boost::beast::http::field::user_agent, user_agent); + auto [ec, count] = + co_await boost::beast::http::async_write(stream_, req_init_cookie, use_nothrow_awaitable); + if (ec) { + SPDLOG_ERROR("{}", ec.message()); + co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); + co_return; + } + boost::beast::flat_buffer b; + boost::beast::http::response res; + std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable); + if (ec) { + SPDLOG_ERROR("{}", ec.message()); + co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); + co_return; + } + if (res.result_int() != 200) { + std::string reason{res.reason()}; + SPDLOG_ERROR("reason: {}", reason); + co_await ch->async_send( + err, std::format("return unexpected http status code: {}({})", res.result_int(), reason), + use_nothrow_awaitable); + co_return; + } + } + } + std::string conversation_id; + { + boost::beast::http::request req_init_conversation{ + boost::beast::http::verb::post, "/conversation", 11}; + req_init_conversation.set("Cookie", cookie); + req_init_conversation.set(boost::beast::http::field::host, host); + req_init_conversation.set(boost::beast::http::field::user_agent, user_agent); + req_init_conversation.set("Accept", "*/*"); + req_init_conversation.set("Accept-Encoding", "gzip, deflate"); + req_init_conversation.set("Content-Type", "application/json"); + req_init_conversation.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3"); + req_init_conversation.set("Sec-Fetch-Dest", "empty"); + req_init_conversation.set("Sec-Fetch-Mode", "cors"); + req_init_conversation.set("Sec-Fetch-Site", "same-origin"); + req_init_conversation.set("Referer", "https://gpt-gm.h2o.ai/"); + req_init_conversation.body() = R"({"model": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"})"; + req_init_conversation.prepare_payload(); + + auto [ec, count] = + co_await boost::beast::http::async_write(stream_, req_init_conversation, use_nothrow_awaitable); + if (ec) { + SPDLOG_ERROR("{}", ec.message()); + co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); + co_return; + } + boost::beast::flat_buffer b; + boost::beast::http::response res; + std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable); + if (ec) { + SPDLOG_ERROR("{}", ec.message()); + co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); + co_return; + } + if (res.result_int() != 200) { + std::string reason{res.reason()}; + SPDLOG_ERROR("reason: {}", reason); + co_await ch->async_send( + err, std::format("return unexpected http status code: {}({})", res.result_int(), reason), + use_nothrow_awaitable); + co_return; + } + std::cout << res.body() << std::endl; + nlohmann::json rsp_json = nlohmann::json::parse(res.body(), nullptr, false); + if (rsp_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", fields.back()); + ch->try_send(err, std::format("json parse error: [{}]", fields.back())); + co_return; + } + if (!rsp_json.contains("conversationId")) { + SPDLOG_ERROR("not contains conversationId: {}", res.body()); + co_await ch->async_send(err, res.body(), use_nothrow_awaitable); + co_return; + } + conversation_id = rsp_json["conversationId"].get(); + } + + constexpr std::string_view json_str = R"({ + "inputs":"user: hello\nassistant: ", + "parameters":{ + "temperature":0.4, + "truncate":2048, + "max_new_tokens":1024, + "do_sample":true, + "repetition_penalty":1.2, + "return_full_text":false + }, + "stream":true, + "options":{ + "id":"64cf9d83-7b0d-4851-82b5-6f9090652494", + "response_id":"f76711da-6761-4055-9a05-84a8afce0198", + "is_retry":false, + "use_cache":false, + "web_search_id":"" + } + })"; + nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); + request["inputs"] = std::format("user: {}\nassistant: ", prompt); + request["response_id"] = conversation_id; + request["id"] = createUuidString(); + + boost::beast::http::request req{ + boost::beast::http::verb::post, std::format("/conversation/{}", conversation_id), 11}; + req.set("Cookie", cookie); + req.set(boost::beast::http::field::host, host); + req.set(boost::beast::http::field::user_agent, user_agent); + req.set("Accept", "*/*"); + // req.set("Accept-Encoding", "gzip, deflate"); + req.set("Content-Type", "application/json"); + req.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3"); + req.set("Sec-Fetch-Dest", "empty"); + req.set("Sec-Fetch-Mode", "cors"); + req.set("Sec-Fetch-Site", "same-origin"); + req.set("Referer", "https://gpt-gm.h2o.ai/"); + req.body() = request.dump(); + req.prepare_payload(); + + std::string recv; + co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch, &recv](std::string chunk_str) { + recv.append(chunk_str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.contains("text")) + continue; + auto fields = splitString(msg, "data:"); + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", fields.back()); + ch->try_send(err, std::format("json parse error: [{}]", fields.back())); + continue; + } + auto str = line_json["token"]["text"].get(); + if (!str.empty() && str != "<|endoftext|>") + ch->try_send(err, str); + } + }); + co_return; +} diff --git a/chatgpt_microservice/git-clang-format.py b/chatgpt_microservice/git-clang-format.py index d7821ac..dec5e6f 100644 --- a/chatgpt_microservice/git-clang-format.py +++ b/chatgpt_microservice/git-clang-format.py @@ -33,9 +33,9 @@ import re import subprocess import sys -usage = 'git clang-format [OPTIONS] [] [] [--] [...]' +usage = "git clang-format [OPTIONS] [] [] [--] [...]" -desc = ''' +desc = """ If zero or one commits are given, run clang-format on all lines that differ between the working directory and , which defaults to HEAD. Changes are only applied to the working directory. @@ -48,14 +48,14 @@ The following git-config settings set the default of the corresponding option: clangFormat.commit clangFormat.extension clangFormat.style -''' +""" # Name of the temporary index file in which save the output of clang-format. # This file is created within the .git directory. -temp_index_basename = 'clang-format-index' +temp_index_basename = "clang-format-index" -Range = collections.namedtuple('Range', 'start, count') +Range = collections.namedtuple("Range", "start, count") def main(): @@ -66,61 +66,61 @@ def main(): # nargs=argparse.REMAINDER disallows options after positionals.) argv = sys.argv[1:] try: - idx = argv.index('--') + idx = argv.index("--") except ValueError: dash_dash = [] else: dash_dash = argv[idx:] argv = argv[:idx] - default_extensions = ','.join( + default_extensions = ",".join( [ # From clang/lib/Frontend/FrontendOptions.cpp, all lower case - 'c', - 'h', # C - 'm', # ObjC - 'mm', # ObjC++ - 'cc', - 'cp', - 'cpp', - 'c++', - 'cxx', - 'hh', - 'hpp', - 'hxx', # C++ - 'cu', # CUDA + "c", + "h", # C + "m", # ObjC + "mm", # ObjC++ + "cc", + "cp", + "cpp", + "c++", + "cxx", + "hh", + "hpp", + "hxx", # C++ + "cu", # CUDA # Other languages that clang-format supports - 'proto', - 'protodevel', # Protocol Buffers - 'java', # Java - 'js', # JavaScript - 'ts', # TypeScript - 'cs', # C Sharp + "proto", + "protodevel", # Protocol Buffers + "java", # Java + "js", # JavaScript + "ts", # TypeScript + "cs", # C Sharp ] ) p = argparse.ArgumentParser(usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter, description=desc) - p.add_argument('--binary', default=config.get('clangformat.binary', 'clang-format'), help='path to clang-format'), + p.add_argument("--binary", default=config.get("clangformat.binary", "clang-format"), help="path to clang-format"), p.add_argument( - '--commit', default=config.get('clangformat.commit', 'HEAD'), help='default commit to use if none is specified' + "--commit", default=config.get("clangformat.commit", "HEAD"), help="default commit to use if none is specified" ), - p.add_argument('--diff', action='store_true', help='print a diff instead of applying the changes') + p.add_argument("--diff", action="store_true", help="print a diff instead of applying the changes") p.add_argument( - '--extensions', - default=config.get('clangformat.extensions', default_extensions), - help=('comma-separated list of file extensions to format, ' 'excluding the period and case-insensitive'), + "--extensions", + default=config.get("clangformat.extensions", default_extensions), + help=("comma-separated list of file extensions to format, " "excluding the period and case-insensitive"), ), - p.add_argument('-f', '--force', action='store_true', help='allow changes to unstaged files') - p.add_argument('-p', '--patch', action='store_true', help='select hunks interactively') - p.add_argument('-q', '--quiet', action='count', default=0, help='print less information') - p.add_argument('--style', default=config.get('clangformat.style', None), help='passed to clang-format'), - p.add_argument('-v', '--verbose', action='count', default=0, help='print extra information') + p.add_argument("-f", "--force", action="store_true", help="allow changes to unstaged files") + p.add_argument("-p", "--patch", action="store_true", help="select hunks interactively") + p.add_argument("-q", "--quiet", action="count", default=0, help="print less information") + p.add_argument("--style", default=config.get("clangformat.style", None), help="passed to clang-format"), + p.add_argument("-v", "--verbose", action="count", default=0, help="print extra information") # We gather all the remaining positional arguments into 'args' since we need # to use some heuristics to determine whether or not was present. # However, to print pretty messages, we make use of metavar and help. - p.add_argument('args', nargs='*', metavar='', help='revision from which to compute the diff') + p.add_argument("args", nargs="*", metavar="", help="revision from which to compute the diff") p.add_argument( - 'ignored', nargs='*', metavar='...', help='if specified, only consider differences in these files' + "ignored", nargs="*", metavar="...", help="if specified, only consider differences in these files" ) opts = p.parse_args(argv) @@ -130,26 +130,26 @@ def main(): commits, files = interpret_args(opts.args, dash_dash, opts.commit) if len(commits) > 1: if not opts.diff: - die('--diff is required when two commits are given') + die("--diff is required when two commits are given") else: if len(commits) > 2: - die('at most two commits allowed; %d given' % len(commits)) + die("at most two commits allowed; %d given" % len(commits)) changed_lines = compute_diff_and_extract_lines(commits, files) if opts.verbose >= 1: ignored_files = set(changed_lines) - filter_by_extension(changed_lines, opts.extensions.lower().split(',')) + filter_by_extension(changed_lines, opts.extensions.lower().split(",")) if opts.verbose >= 1: ignored_files.difference_update(changed_lines) if ignored_files: - print('Ignoring changes in the following files (wrong extension):') + print("Ignoring changes in the following files (wrong extension):") for filename in ignored_files: - print(' %s' % filename) + print(" %s" % filename) if changed_lines: - print('Running clang-format on the following files:') + print("Running clang-format on the following files:") for filename in changed_lines: - print(' %s' % filename) + print(" %s" % filename) if not changed_lines: - print('no modified files to format') + print("no modified files to format") return # The computed diff outputs absolute paths, so we must cd before accessing # those files. @@ -163,19 +163,19 @@ def main(): old_tree = create_tree_from_workdir(changed_lines) new_tree = run_clang_format_and_save_to_tree(changed_lines, binary=opts.binary, style=opts.style) if opts.verbose >= 1: - print('old tree: %s' % old_tree) - print('new tree: %s' % new_tree) + print("old tree: %s" % old_tree) + print("new tree: %s" % new_tree) if old_tree == new_tree: if opts.verbose >= 0: - print('clang-format did not modify any files') + print("clang-format did not modify any files") elif opts.diff: print_diff(old_tree, new_tree) else: changed_files = apply_changes(old_tree, new_tree, force=opts.force, patch_mode=opts.patch) if (opts.verbose >= 0 and not opts.patch) or opts.verbose >= 1: - print('changed files:') + print("changed files:") for filename in changed_files: - print(' %s' % filename) + print(" %s" % filename) def load_git_config(non_string_options=None): @@ -187,11 +187,11 @@ def load_git_config(non_string_options=None): if non_string_options is None: non_string_options = {} out = {} - for entry in run('git', 'config', '--list', '--null').split('\0'): + for entry in run("git", "config", "--list", "--null").split("\0"): if entry: - name, value = entry.split('\n', 1) + name, value = entry.split("\n", 1) if name in non_string_options: - value = run('git', 'config', non_string_options[name], name) + value = run("git", "config", non_string_options[name], name) out[name] = value return out @@ -213,7 +213,7 @@ def interpret_args(args, dash_dash, default_commit): commits = args for commit in commits: object_type = get_object_type(commit) - if object_type not in ('commit', 'tag'): + if object_type not in ("commit", "tag"): if object_type is None: die("'%s' is not a commit" % commit) else: @@ -238,19 +238,19 @@ def disambiguate_revision(value): """Returns True if `value` is a revision, False if it is a file, or dies.""" # If `value` is ambiguous (neither a commit nor a file), the following # command will die with an appropriate error message. - run('git', 'rev-parse', value, verbose=False) + run("git", "rev-parse", value, verbose=False) object_type = get_object_type(value) if object_type is None: return False - if object_type in ('commit', 'tag'): + if object_type in ("commit", "tag"): return True - die('`%s` is a %s, but a commit or filename was expected' % (value, object_type)) + die("`%s` is a %s, but a commit or filename was expected" % (value, object_type)) def get_object_type(value): """Returns a string description of an object's type, or None if it is not a valid git object.""" - cmd = ['git', 'cat-file', '-t', value] + cmd = ["git", "cat-file", "-t", value] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: @@ -277,10 +277,10 @@ def compute_diff(commits, files): differences between the working directory and the first commit if a single one was specified, or the difference between both specified commits, filtered on `files` (if non-empty). Zero context lines are used in the patch.""" - git_tool = 'diff-index' + git_tool = "diff-index" if len(commits) > 1: - git_tool = 'diff-tree' - cmd = ['git', git_tool, '-p', '-U0'] + commits + ['--'] + git_tool = "diff-tree" + cmd = ["git", git_tool, "-p", "-U0"] + commits + ["--"] cmd.extend(files) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) p.stdin.close() @@ -299,10 +299,10 @@ def extract_lines(patch_file): matches = {} for line in patch_file: line = convert_string(line) - match = re.search(r'^\+\+\+\ [^/]+/(.*)', line) + match = re.search(r"^\+\+\+\ [^/]+/(.*)", line) if match: - filename = match.group(1).rstrip('\r\n') - match = re.search(r'^@@ -[0-9,]+ \+(\d+)(,(\d+))?', line) + filename = match.group(1).rstrip("\r\n") + match = re.search(r"^@@ -[0-9,]+ \+(\d+)(,(\d+))?", line) if match: start_line = int(match.group(1)) line_count = 1 @@ -320,8 +320,8 @@ def filter_by_extension(dictionary, allowed_extensions): excluding the period.""" allowed_extensions = frozenset(allowed_extensions) for filename in list(dictionary.keys()): - base_ext = filename.rsplit('.', 1) - if len(base_ext) == 1 and '' in allowed_extensions: + base_ext = filename.rsplit(".", 1) + if len(base_ext) == 1 and "" in allowed_extensions: continue if len(base_ext) == 1 or base_ext[1].lower() not in allowed_extensions: del dictionary[filename] @@ -329,7 +329,7 @@ def filter_by_extension(dictionary, allowed_extensions): def cd_to_toplevel(): """Change to the top level of the git repository.""" - toplevel = run('git', 'rev-parse', '--show-toplevel') + toplevel = run("git", "rev-parse", "--show-toplevel") os.chdir(toplevel) @@ -337,10 +337,10 @@ def create_tree_from_workdir(filenames): """Create a new git tree with the given files from the working directory. Returns the object ID (SHA-1) of the created tree.""" - return create_tree(filenames, '--stdin') + return create_tree(filenames, "--stdin") -def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary='clang-format', style=None): +def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary="clang-format", style=None): """Run clang-format on each file and save the result to a git tree. Returns the object ID (SHA-1) of the created tree.""" @@ -355,9 +355,9 @@ def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary='clan for filename, line_ranges in iteritems(changed_lines): if revision: git_metadata_cmd = [ - 'git', - 'ls-tree', - '%s:%s' % (revision, os.path.dirname(filename)), + "git", + "ls-tree", + "%s:%s" % (revision, os.path.dirname(filename)), os.path.basename(filename), ] git_metadata = subprocess.Popen(git_metadata_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) @@ -366,12 +366,12 @@ def run_clang_format_and_save_to_tree(changed_lines, revision=None, binary='clan else: mode = oct(os.stat(filename).st_mode) # Adjust python3 octal format so that it matches what git expects - if mode.startswith('0o'): - mode = '0' + mode[2:] + if mode.startswith("0o"): + mode = "0" + mode[2:] blob_id = clang_format_to_blob(filename, line_ranges, revision=revision, binary=binary, style=style) - yield '%s %s\t%s' % (mode, blob_id, filename) + yield "%s %s\t%s" % (mode, blob_id, filename) - return create_tree(index_info_generator(), '--index-info') + return create_tree(index_info_generator(), "--index-info") def create_tree(input_lines, mode): @@ -381,20 +381,20 @@ def create_tree(input_lines, mode): '--index-info' is must be a list of values suitable for "git update-index --index-info", such as " ". Any other mode is invalid.""" - assert mode in ('--stdin', '--index-info') - cmd = ['git', 'update-index', '--add', '-z', mode] + assert mode in ("--stdin", "--index-info") + cmd = ["git", "update-index", "--add", "-z", mode] with temporary_index_file(): p = subprocess.Popen(cmd, stdin=subprocess.PIPE) for line in input_lines: - p.stdin.write(to_bytes('%s\0' % line)) + p.stdin.write(to_bytes("%s\0" % line)) p.stdin.close() if p.wait() != 0: - die('`%s` failed' % ' '.join(cmd)) - tree_id = run('git', 'write-tree') + die("`%s` failed" % " ".join(cmd)) + tree_id = run("git", "write-tree") return tree_id -def clang_format_to_blob(filename, line_ranges, revision=None, binary='clang-format', style=None): +def clang_format_to_blob(filename, line_ranges, revision=None, binary="clang-format", style=None): """Run clang-format on the given file and save the result to a git blob. Runs on the file in `revision` if not None, or on the file in the working @@ -403,13 +403,13 @@ def clang_format_to_blob(filename, line_ranges, revision=None, binary='clang-for Returns the object ID (SHA-1) of the created blob.""" clang_format_cmd = [binary] if style: - clang_format_cmd.extend(['-style=' + style]) + clang_format_cmd.extend(["-style=" + style]) clang_format_cmd.extend( - ['-lines=%s:%s' % (start_line, start_line + line_count - 1) for start_line, line_count in line_ranges] + ["-lines=%s:%s" % (start_line, start_line + line_count - 1) for start_line, line_count in line_ranges] ) if revision: - clang_format_cmd.extend(['-assume-filename=' + filename]) - git_show_cmd = ['git', 'cat-file', 'blob', '%s:%s' % (revision, filename)] + clang_format_cmd.extend(["-assume-filename=" + filename]) + git_show_cmd = ["git", "cat-file", "blob", "%s:%s" % (revision, filename)] git_show = subprocess.Popen(git_show_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) git_show.stdin.close() clang_format_stdin = git_show.stdout @@ -427,17 +427,17 @@ def clang_format_to_blob(filename, line_ranges, revision=None, binary='clang-for else: raise clang_format_stdin.close() - hash_object_cmd = ['git', 'hash-object', '-w', '--path=' + filename, '--stdin'] + hash_object_cmd = ["git", "hash-object", "-w", "--path=" + filename, "--stdin"] hash_object = subprocess.Popen(hash_object_cmd, stdin=clang_format.stdout, stdout=subprocess.PIPE) clang_format.stdout.close() stdout = hash_object.communicate()[0] if hash_object.returncode != 0: - die('`%s` failed' % ' '.join(hash_object_cmd)) + die("`%s` failed" % " ".join(hash_object_cmd)) if clang_format.wait() != 0: - die('`%s` failed' % ' '.join(clang_format_cmd)) + die("`%s` failed" % " ".join(clang_format_cmd)) if git_show and git_show.wait() != 0: - die('`%s` failed' % ' '.join(git_show_cmd)) - return convert_string(stdout).rstrip('\r\n') + die("`%s` failed" % " ".join(git_show_cmd)) + return convert_string(stdout).rstrip("\r\n") @contextlib.contextmanager @@ -445,15 +445,15 @@ def temporary_index_file(tree=None): """Context manager for setting GIT_INDEX_FILE to a temporary file and deleting the file afterward.""" index_path = create_temporary_index(tree) - old_index_path = os.environ.get('GIT_INDEX_FILE') - os.environ['GIT_INDEX_FILE'] = index_path + old_index_path = os.environ.get("GIT_INDEX_FILE") + os.environ["GIT_INDEX_FILE"] = index_path try: yield finally: if old_index_path is None: - del os.environ['GIT_INDEX_FILE'] + del os.environ["GIT_INDEX_FILE"] else: - os.environ['GIT_INDEX_FILE'] = old_index_path + os.environ["GIT_INDEX_FILE"] = old_index_path os.remove(index_path) @@ -462,11 +462,11 @@ def create_temporary_index(tree=None): If `tree` is not None, use that as the tree to read in. Otherwise, an empty index is created.""" - gitdir = run('git', 'rev-parse', '--git-dir') + gitdir = run("git", "rev-parse", "--git-dir") path = os.path.join(gitdir, temp_index_basename) if tree is None: - tree = '--empty' - run('git', 'read-tree', '--index-output=' + path, tree) + tree = "--empty" + run("git", "read-tree", "--index-output=" + path, tree) return path @@ -479,7 +479,7 @@ def print_diff(old_tree, new_tree): # We also only print modified files since `new_tree` only contains the files # that were modified, so unmodified files would show as deleted without the # filter. - subprocess.check_call(['git', 'diff', '--diff-filter=M', old_tree, new_tree, '--']) + subprocess.check_call(["git", "diff", "--diff-filter=M", old_tree, new_tree, "--"]) def apply_changes(old_tree, new_tree, force=False, patch_mode=False): @@ -488,16 +488,16 @@ def apply_changes(old_tree, new_tree, force=False, patch_mode=False): Bails if there are local changes in those files and not `force`. If `patch_mode`, runs `git checkout --patch` to select hunks interactively.""" changed_files = ( - run('git', 'diff-tree', '--diff-filter=M', '-r', '-z', '--name-only', old_tree, new_tree) - .rstrip('\0') - .split('\0') + run("git", "diff-tree", "--diff-filter=M", "-r", "-z", "--name-only", old_tree, new_tree) + .rstrip("\0") + .split("\0") ) if not force: - unstaged_files = run('git', 'diff-files', '--name-status', *changed_files) + unstaged_files = run("git", "diff-files", "--name-status", *changed_files) if unstaged_files: - print('The following files would be modified but ' 'have unstaged changes:', file=sys.stderr) + print("The following files would be modified but " "have unstaged changes:", file=sys.stderr) print(unstaged_files, file=sys.stderr) - print('Please commit, stage, or stash them first.', file=sys.stderr) + print("Please commit, stage, or stash them first.", file=sys.stderr) sys.exit(2) if patch_mode: # In patch mode, we could just as well create an index from the new tree @@ -507,17 +507,17 @@ def apply_changes(old_tree, new_tree, force=False, patch_mode=False): # better message, "Apply ... to index and worktree". This is not quite # right, since it won't be applied to the user's index, but oh well. with temporary_index_file(old_tree): - subprocess.check_call(['git', 'checkout', '--patch', new_tree]) + subprocess.check_call(["git", "checkout", "--patch", new_tree]) else: with temporary_index_file(new_tree): - run('git', 'checkout-index', '-a', '-f') + run("git", "checkout-index", "-a", "-f") return changed_files def run(*args, **kwargs): - stdin = kwargs.pop('stdin', '') - verbose = kwargs.pop('verbose', True) - strip = kwargs.pop('strip', True) + stdin = kwargs.pop("stdin", "") + verbose = kwargs.pop("verbose", True) + strip = kwargs.pop("strip", True) for name in kwargs: raise TypeError("run() got an unexpected keyword argument '%s'" % name) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) @@ -529,20 +529,20 @@ def run(*args, **kwargs): if p.returncode == 0: if stderr: if verbose: - print('`%s` printed to stderr:' % ' '.join(args), file=sys.stderr) + print("`%s` printed to stderr:" % " ".join(args), file=sys.stderr) print(stderr.rstrip(), file=sys.stderr) if strip: - stdout = stdout.rstrip('\r\n') + stdout = stdout.rstrip("\r\n") return stdout if verbose: - print('`%s` returned %s' % (' '.join(args), p.returncode), file=sys.stderr) + print("`%s` returned %s" % (" ".join(args), p.returncode), file=sys.stderr) if stderr: print(stderr.rstrip(), file=sys.stderr) sys.exit(2) def die(message): - print('error:', message, file=sys.stderr) + print("error:", message, file=sys.stderr) sys.exit(2) @@ -550,23 +550,23 @@ def to_bytes(str_input): # Encode to UTF-8 to get binary data. if isinstance(str_input, bytes): return str_input - return str_input.encode('utf-8') + return str_input.encode("utf-8") def to_string(bytes_input): if isinstance(bytes_input, str): return bytes_input - return bytes_input.encode('utf-8') + return bytes_input.encode("utf-8") def convert_string(bytes_input): try: - return to_string(bytes_input.decode('utf-8')) + return to_string(bytes_input.decode("utf-8")) except AttributeError: # 'str' object has no attribute 'decode'. return str(bytes_input) except UnicodeError: return str(bytes_input) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/chatgpt_microservice/include/cfg.h b/chatgpt_microservice/include/cfg.h index 2111993..a3fa2a2 100644 --- a/chatgpt_microservice/include/cfg.h +++ b/chatgpt_microservice/include/cfg.h @@ -14,6 +14,7 @@ struct Config { std::string http_proxy; std::string api_key; std::vector ip_white_list; + std::string zeus{"http://chatgpt_zeus_service:8860"}; }; YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy, - http_proxy, api_key, ip_white_list) + http_proxy, api_key, ip_white_list, zeus) diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index 6fb6a6e..aeccc11 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -18,14 +18,9 @@ public: FreeGpt(Config&); - boost::asio::awaitable aiTianhu(std::shared_ptr, nlohmann::json); - boost::asio::awaitable aiTianhuSpace(std::shared_ptr, nlohmann::json); boost::asio::awaitable deepAi(std::shared_ptr, nlohmann::json); - boost::asio::awaitable aiChat(std::shared_ptr, nlohmann::json); boost::asio::awaitable chatGptAi(std::shared_ptr, nlohmann::json); - boost::asio::awaitable acytoo(std::shared_ptr, nlohmann::json); boost::asio::awaitable openAi(std::shared_ptr, nlohmann::json); - boost::asio::awaitable h2o(std::shared_ptr, nlohmann::json); boost::asio::awaitable yqcloud(std::shared_ptr, nlohmann::json); boost::asio::awaitable huggingChat(std::shared_ptr, nlohmann::json); boost::asio::awaitable you(std::shared_ptr, nlohmann::json); @@ -39,9 +34,10 @@ public: boost::asio::awaitable chatGptDuo(std::shared_ptr, nlohmann::json); boost::asio::awaitable chatForAi(std::shared_ptr, nlohmann::json); boost::asio::awaitable freeGpt(std::shared_ptr, nlohmann::json); - boost::asio::awaitable cromicle(std::shared_ptr, nlohmann::json); boost::asio::awaitable chatGpt4Online(std::shared_ptr, nlohmann::json); boost::asio::awaitable gptalk(std::shared_ptr, nlohmann::json); + boost::asio::awaitable gptForLove(std::shared_ptr, nlohmann::json); + boost::asio::awaitable chatGptDemo(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index c492161..b53fe52 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -514,6 +514,61 @@ std::optional getCookie(CURL* curl, const std::string& url, const return http_response; } +std::expected callZeus(const std::string& host, const std::string& request_body) { + CURLcode res; + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("callZeus curl_easy_init() failed:{}", curl_easy_strerror(res)); + return std::unexpected(error_info); + } + curl_easy_setopt(curl, CURLOPT_URL, host.data()); + + struct Input { + std::string recv; + }; + Input input; + auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [recv] = *input_ptr; + recv.append(data); + return size * nmemb; + }; + size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 120L); + curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 10L); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_body.c_str()); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + + if (res != CURLE_OK) { + auto error_info = std::format("callZeus curl_easy_perform() failed:{}", curl_easy_strerror(res)); + return std::unexpected(error_info); + } + int32_t response_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + return std::unexpected(std::format("callZeus http code:{}", response_code)); + } + nlohmann::json rsp = nlohmann::json::parse(input.recv, nullptr, false); + if (rsp.is_discarded()) { + SPDLOG_ERROR("json parse error"); + return std::unexpected("parse callZeus error"); + } + return rsp; +} + } // namespace FreeGpt::FreeGpt(Config& cfg) @@ -733,155 +788,6 @@ boost::asio::awaitable FreeGpt::deepAi(std::shared_ptr ch, nlohma co_return; } -boost::asio::awaitable FreeGpt::aiTianhu(std::shared_ptr ch, nlohmann::json json) { - boost::asio::post(*m_thread_pool_ptr, [=, this] { - ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; - boost::system::error_code err{}; - - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - CURLcode res; - CURL* curl = curl_easy_init(); - if (!curl) { - auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); - boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, error_info); }); - return; - } - curl_easy_setopt(curl, CURLOPT_URL, "https://www.aitianhu.com/api/chat-process"); - - if (!m_cfg.http_proxy.empty()) - curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); - - auto cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { - auto recv_data_ptr = static_cast(userp); - std::string data{(char*)contents, size * nmemb}; - recv_data_ptr->append(data); - return size * nmemb; - }; - size_t (*fn)(void* contents, size_t size, size_t nmemb, void* userp) = cb; - - std::string recv_data; - curlEasySetopt(curl); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fn); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &recv_data); - - constexpr std::string_view json_str = R"({ - "prompt":"hello", - "options":{}, - "systemMessage":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature":0.8, - "top_p":1 - })"; - nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); - - request["prompt"] = prompt; - SPDLOG_INFO("{}", request.dump(2)); - auto str = request.dump(); - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); - - struct curl_slist* headers = nullptr; - headers = curl_slist_append(headers, "Content-Type: application/json"); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - - ScopeExit auto_exit{[=] { - curl_slist_free_all(headers); - curl_easy_cleanup(curl); - }}; - - res = curl_easy_perform(curl); - - if (res != CURLE_OK) { - auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); - boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, error_info); }); - return; - } - int32_t response_code; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); - if (response_code != 200) { - boost::asio::post(ch->get_executor(), - [=] { ch->try_send(err, std::format("aiTianhu http code:{}", response_code)); }); - return; - } - auto lines = recv_data | std::views::split('\n') | std::views::transform([](auto&& rng) { - return std::string_view(&*rng.begin(), std::ranges::distance(rng.begin(), rng.end())); - }) | - to>(); - if (lines.empty()) { - SPDLOG_ERROR("lines empty"); - return; - } - nlohmann::json rsp = nlohmann::json::parse(lines.back(), nullptr, false); - if (rsp.is_discarded()) { - SPDLOG_ERROR("json parse error"); - ch->try_send(err, std::format("json parse error: {}", lines.back())); - return; - } - ch->try_send(err, rsp.value("text", rsp.dump())); - return; - }); - co_return; -} - -boost::asio::awaitable FreeGpt::aiChat(std::shared_ptr ch, nlohmann::json json) { - ScopeExit auto_exit{[&] { ch->close(); }}; - boost::system::error_code err{}; - - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - constexpr std::string_view host = "chat-gpt.org"; - constexpr std::string_view port = "443"; - - boost::beast::http::request req{boost::beast::http::verb::post, "/api/text", 11}; - req.set(boost::beast::http::field::host, host); - req.set("authority", "chat-gpt.org"); - req.set("accept", "*/*"); - req.set("cache-control", "no-cache"); - req.set(boost::beast::http::field::content_type, "application/json"); - req.set(boost::beast::http::field::origin, "https://chat-gpt.org"); - req.set("pragma", "no-cache"); - req.set(boost::beast::http::field::referer, "https://chat-gpt.org/chat"); - req.set("sec-ch-ua-mobile", "?0"); - req.set("sec-ch-ua-platform", R"("macOS")"); - req.set("sec-fetch-dest", "empty"); - req.set("sec-fetch-mode", "cors"); - req.set("sec-fetch-site", "same-origin"); - req.set( - boost::beast::http::field::user_agent, - R"(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36)"); - - nlohmann::json data{ - {"message", std::format("user: {}\nassistant:", prompt)}, - {"temperature", 0.5}, - {"presence_penalty", 0}, - {"top_p", 1}, - {"frequency_penalty", 0}, - }; - req.body() = data.dump(); - req.prepare_payload(); - - auto ret = co_await sendRequestRecvResponse(req, host, port, std::bind_front(&FreeGpt::createHttpClient, *this)); - if (!ret.has_value()) { - co_await ch->async_send(err, ret.error(), use_nothrow_awaitable); - co_return; - } - auto& [res, ctx, stream_] = ret.value(); - if (boost::beast::http::status::ok != res.result()) { - SPDLOG_ERROR("http status code: {}", res.result_int()); - co_await ch->async_send(err, res.reason(), use_nothrow_awaitable); - co_return; - } - - nlohmann::json rsp = nlohmann::json::parse(res.body(), nullptr, false); - if (rsp.is_discarded()) { - SPDLOG_ERROR("json parse error"); - co_await ch->async_send(err, "json parse error", use_nothrow_awaitable); - co_return; - } - SPDLOG_INFO("rsp: {}", rsp.dump()); - co_await ch->async_send(err, rsp.value("message", rsp.dump()), use_nothrow_awaitable); - co_return; -} - boost::asio::awaitable FreeGpt::chatGptAi(std::shared_ptr ch, nlohmann::json json) { ScopeExit auto_exit{[&] { ch->close(); }}; boost::system::error_code err{}; @@ -1020,71 +926,6 @@ create_client: co_return; } -boost::asio::awaitable FreeGpt::acytoo(std::shared_ptr ch, nlohmann::json json) { - boost::system::error_code err{}; - ScopeExit auto_exit{[&] { ch->close(); }}; - - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - constexpr std::string_view host = "chat.acytoo.com"; - constexpr std::string_view port = "443"; - - boost::beast::http::request req{boost::beast::http::verb::post, - "/api/completions", 11}; - req.set(boost::beast::http::field::host, host); - req.set( - boost::beast::http::field::user_agent, - R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36)"); - req.set("Accept", "*/*"); - req.set("Accept-Encoding", "gzip, deflate"); - req.set(boost::beast::http::field::content_type, "application/json"); - - constexpr std::string_view json_str = R"({ - "key":"", - "model":"gpt-3.5-turbo", - "messages":[ - { - "role":"user", - "content":"user: hello\nassistant:", - "createdAt":1688518523500 - } - ], - "temperature":0.5, - "password":"" - })"; - nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); - - request["messages"][0]["content"] = std::format("user: {}\nassistant:", prompt); - auto time_now = std::chrono::system_clock::now(); - auto duration_in_ms = std::chrono::duration_cast(time_now.time_since_epoch()); - request["messages"][0]["createdAt"] = duration_in_ms.count(); - SPDLOG_INFO("{}", request.dump(2)); - - req.body() = request.dump(); - req.prepare_payload(); - - auto ret = co_await sendRequestRecvResponse(req, host, port, std::bind_front(&FreeGpt::createHttpClient, *this)); - if (!ret.has_value()) { - co_await ch->async_send(err, ret.error(), use_nothrow_awaitable); - co_return; - } - auto& [res, ctx, stream_] = ret.value(); - if (boost::beast::http::status::ok != res.result()) { - SPDLOG_ERROR("http status code: {}", res.result_int()); - co_await ch->async_send(err, res.reason(), use_nothrow_awaitable); - co_return; - } - auto decompress_value = decompress(res); - if (!decompress_value.has_value()) { - SPDLOG_ERROR("decompress error"); - co_await ch->async_send(err, decompress_value.error(), use_nothrow_awaitable); - co_return; - } - auto& body = decompress_value.value(); - co_await ch->async_send(err, std::move(body), use_nothrow_awaitable); - co_return; -} - boost::asio::awaitable FreeGpt::openAi(std::shared_ptr ch, nlohmann::json json) { boost::system::error_code err{}; ScopeExit auto_exit{[&] { ch->close(); }}; @@ -1159,250 +1000,6 @@ boost::asio::awaitable FreeGpt::openAi(std::shared_ptr ch, nlohma co_return; } -boost::asio::awaitable FreeGpt::h2o(std::shared_ptr ch, nlohmann::json json) { - boost::system::error_code err{}; - ScopeExit auto_exit{[&] { ch->close(); }}; - - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - constexpr std::string_view host = "gpt-gm.h2o.ai"; - constexpr std::string_view port = "443"; - - constexpr std::string_view user_agent{ - R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0)"}; - - boost::beast::http::request req_init_cookie{boost::beast::http::verb::get, "/", - 11}; - req_init_cookie.set(boost::beast::http::field::host, host); - req_init_cookie.set(boost::beast::http::field::user_agent, user_agent); - - auto ret = co_await sendRequestRecvResponse(req_init_cookie, host, port, - std::bind_front(&FreeGpt::createHttpClient, *this)); - if (!ret.has_value()) { - co_await ch->async_send(err, ret.error(), use_nothrow_awaitable); - co_return; - } - auto& [response, ctx, stream_] = ret.value(); - if (boost::beast::http::status::ok != response.result()) { - SPDLOG_ERROR("http status code: {}", response.result_int()); - co_await ch->async_send(err, response.reason(), use_nothrow_awaitable); - co_return; - } - auto fields = splitString(response["Set-Cookie"], " "); - if (fields.empty()) { - std::stringstream ss; - ss << response.base(); - SPDLOG_ERROR("get cookie error: {}", ss.str()); - co_await ch->async_send(err, "can't get cookie", use_nothrow_awaitable); - co_return; - } - fields[0].pop_back(); - std::string cookie{std::move(fields[0])}; - SPDLOG_INFO("cookie: {}", cookie); - { - boost::beast::http::request req_init_setting{boost::beast::http::verb::post, - "/settings", 11}; - req_init_setting.set("Cookie", cookie); - req_init_setting.set(boost::beast::http::field::host, host); - req_init_setting.set(boost::beast::http::field::user_agent, user_agent); - req_init_setting.set("Accept", - "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8"); - req_init_setting.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3"); - req_init_setting.set("Content-Type", "application/x-www-form-urlencoded"); - req_init_setting.set("Upgrade-Insecure-Requests", "1"); - req_init_setting.set("Sec-Fetch-Dest", "document"); - req_init_setting.set("Sec-Fetch-Mode", "navigate"); - req_init_setting.set("Sec-Fetch-Site", "same-origin"); - req_init_setting.set("Sec-Fetch-User", "?1"); - req_init_setting.set("Referer", "https://gpt-gm.h2o.ai/r/jGfKSwU"); - - std::stringstream ss1; - ss1 << "ethicsModalAccepted=true&"; - ss1 << "shareConversationsWithModelAuthors=true&"; - ss1 << "ethicsModalAcceptedAt=" - << "&"; - ss1 << "activeModel=h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1&"; - ss1 << "searchEnabled=true"; - - req_init_setting.body() = ss1.str(); - req_init_setting.prepare_payload(); - - auto [ec, count] = co_await boost::beast::http::async_write(stream_, req_init_setting, use_nothrow_awaitable); - if (ec) { - SPDLOG_ERROR("{}", ec.message()); - co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); - co_return; - } - boost::beast::flat_buffer b; - boost::beast::http::response res; - std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable); - if (ec) { - SPDLOG_ERROR("{}", ec.message()); - co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); - co_return; - } - if (res.result_int() != 303) { - std::string reason{res.reason()}; - SPDLOG_ERROR("reason: {}", reason); - co_await ch->async_send( - err, std::format("return unexpected http status code: {}({})", res.result_int(), reason), - use_nothrow_awaitable); - co_return; - } - { - boost::beast::http::request req_init_cookie{boost::beast::http::verb::get, - "/r/jGfKSwU", 11}; - req_init_cookie.set(boost::beast::http::field::host, host); - req_init_cookie.set(boost::beast::http::field::user_agent, user_agent); - auto [ec, count] = - co_await boost::beast::http::async_write(stream_, req_init_cookie, use_nothrow_awaitable); - if (ec) { - SPDLOG_ERROR("{}", ec.message()); - co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); - co_return; - } - boost::beast::flat_buffer b; - boost::beast::http::response res; - std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable); - if (ec) { - SPDLOG_ERROR("{}", ec.message()); - co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); - co_return; - } - if (res.result_int() != 200) { - std::string reason{res.reason()}; - SPDLOG_ERROR("reason: {}", reason); - co_await ch->async_send( - err, std::format("return unexpected http status code: {}({})", res.result_int(), reason), - use_nothrow_awaitable); - co_return; - } - } - } - std::string conversation_id; - { - boost::beast::http::request req_init_conversation{ - boost::beast::http::verb::post, "/conversation", 11}; - req_init_conversation.set("Cookie", cookie); - req_init_conversation.set(boost::beast::http::field::host, host); - req_init_conversation.set(boost::beast::http::field::user_agent, user_agent); - req_init_conversation.set("Accept", "*/*"); - req_init_conversation.set("Accept-Encoding", "gzip, deflate"); - req_init_conversation.set("Content-Type", "application/json"); - req_init_conversation.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3"); - req_init_conversation.set("Sec-Fetch-Dest", "empty"); - req_init_conversation.set("Sec-Fetch-Mode", "cors"); - req_init_conversation.set("Sec-Fetch-Site", "same-origin"); - req_init_conversation.set("Referer", "https://gpt-gm.h2o.ai/"); - req_init_conversation.body() = R"({"model": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"})"; - req_init_conversation.prepare_payload(); - - auto [ec, count] = - co_await boost::beast::http::async_write(stream_, req_init_conversation, use_nothrow_awaitable); - if (ec) { - SPDLOG_ERROR("{}", ec.message()); - co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); - co_return; - } - boost::beast::flat_buffer b; - boost::beast::http::response res; - std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable); - if (ec) { - SPDLOG_ERROR("{}", ec.message()); - co_await ch->async_send(err, ec.message(), use_nothrow_awaitable); - co_return; - } - if (res.result_int() != 200) { - std::string reason{res.reason()}; - SPDLOG_ERROR("reason: {}", reason); - co_await ch->async_send( - err, std::format("return unexpected http status code: {}({})", res.result_int(), reason), - use_nothrow_awaitable); - co_return; - } - std::cout << res.body() << std::endl; - nlohmann::json rsp_json = nlohmann::json::parse(res.body(), nullptr, false); - if (rsp_json.is_discarded()) { - SPDLOG_ERROR("json parse error: [{}]", fields.back()); - ch->try_send(err, std::format("json parse error: [{}]", fields.back())); - co_return; - } - if (!rsp_json.contains("conversationId")) { - SPDLOG_ERROR("not contains conversationId: {}", res.body()); - co_await ch->async_send(err, res.body(), use_nothrow_awaitable); - co_return; - } - conversation_id = rsp_json["conversationId"].get(); - } - - constexpr std::string_view json_str = R"({ - "inputs":"user: hello\nassistant: ", - "parameters":{ - "temperature":0.4, - "truncate":2048, - "max_new_tokens":1024, - "do_sample":true, - "repetition_penalty":1.2, - "return_full_text":false - }, - "stream":true, - "options":{ - "id":"64cf9d83-7b0d-4851-82b5-6f9090652494", - "response_id":"f76711da-6761-4055-9a05-84a8afce0198", - "is_retry":false, - "use_cache":false, - "web_search_id":"" - } - })"; - nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); - request["inputs"] = std::format("user: {}\nassistant: ", prompt); - request["response_id"] = conversation_id; - request["id"] = createUuidString(); - - boost::beast::http::request req{ - boost::beast::http::verb::post, std::format("/conversation/{}", conversation_id), 11}; - req.set("Cookie", cookie); - req.set(boost::beast::http::field::host, host); - req.set(boost::beast::http::field::user_agent, user_agent); - req.set("Accept", "*/*"); - // req.set("Accept-Encoding", "gzip, deflate"); - req.set("Content-Type", "application/json"); - req.set("Accept-Language", "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3"); - req.set("Sec-Fetch-Dest", "empty"); - req.set("Sec-Fetch-Mode", "cors"); - req.set("Sec-Fetch-Site", "same-origin"); - req.set("Referer", "https://gpt-gm.h2o.ai/"); - req.body() = request.dump(); - req.prepare_payload(); - - std::string recv; - co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch, &recv](std::string chunk_str) { - recv.append(chunk_str); - while (true) { - auto position = recv.find("\n"); - if (position == std::string::npos) - break; - auto msg = recv.substr(0, position + 1); - recv.erase(0, position + 1); - msg.pop_back(); - if (msg.empty() || !msg.contains("text")) - continue; - auto fields = splitString(msg, "data:"); - boost::system::error_code err{}; - nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); - if (line_json.is_discarded()) { - SPDLOG_ERROR("json parse error: [{}]", fields.back()); - ch->try_send(err, std::format("json parse error: [{}]", fields.back())); - continue; - } - auto str = line_json["token"]["text"].get(); - if (!str.empty() && str != "<|endoftext|>") - ch->try_send(err, str); - } - }); - co_return; -} - boost::asio::awaitable FreeGpt::yqcloud(std::shared_ptr ch, nlohmann::json json) { boost::system::error_code err{}; ScopeExit auto_exit{[&] { ch->close(); }}; @@ -2233,118 +1830,6 @@ boost::asio::awaitable FreeGpt::gptGo(std::shared_ptr ch, nlohman co_return; } -boost::asio::awaitable FreeGpt::aiTianhuSpace(std::shared_ptr ch, nlohmann::json json) { - co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); - - boost::system::error_code err{}; - ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - CURLcode res; - CURL* curl = curl_easy_init(); - if (!curl) { - auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); - co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); - ch->try_send(err, error_info); - co_return; - } - auto random = [](int len) { - static std::string chars{"abcdefghijklmnopqrstuvwxyz0123456789"}; - static std::string letter{"abcdefghijklmnopqrstuvwxyz"}; - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution<> dis(0, 1000000); - std::string random_string; - random_string += chars[dis(gen) % letter.length()]; - len = len - 1; - for (int i = 0; i < len; i++) - random_string += chars[dis(gen) % chars.length()]; - return random_string; - }; - auto url = std::format("https://{}.aitianhu.space/api/chat-process", random(6)); - SPDLOG_INFO("url: [{}]", url); - curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); - if (!m_cfg.http_proxy.empty()) - curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); - - struct Input { - std::shared_ptr ch; - std::string recv; - }; - Input input{ch}; - auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { - boost::system::error_code err{}; - auto input_ptr = static_cast(userp); - std::string data{(char*)contents, size * nmemb}; - auto& [ch, recv] = *input_ptr; - recv.append(data); - while (true) { - auto position = recv.find("\n"); - if (position == std::string::npos) - break; - auto msg = recv.substr(0, position + 1); - recv.erase(0, position + 1); - msg.pop_back(); - if (msg.empty() || !msg.contains("content")) - continue; - boost::system::error_code err{}; - nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false); - if (line_json.is_discarded()) { - SPDLOG_ERROR("json parse error: [{}]", msg); - boost::asio::post(ch->get_executor(), - [=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); }); - continue; - } - auto str = line_json["detail"]["choices"][0]["delta"]["content"].get(); - if (!str.empty()) - boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); }); - } - return size * nmemb; - }; - size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; - curlEasySetopt(curl); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); - - constexpr std::string_view request_str{R"({ - "prompt":"hello", - "options":{}, - "systemMessage":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature":0.8, - "top_p":1 - })"}; - nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); - request["prompt"] = prompt; - auto str = request.dump(); - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); - - struct curl_slist* headers = nullptr; - headers = curl_slist_append(headers, "Content-Type: application/json"); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - - ScopeExit auto_exit{[=] { - curl_slist_free_all(headers); - curl_easy_cleanup(curl); - }}; - - res = curl_easy_perform(curl); - - if (res != CURLE_OK) { - co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); - auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); - ch->try_send(err, error_info); - co_return; - } - int32_t response_code; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); - if (response_code != 200) { - co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); - ch->try_send(err, std::format("you http code:{}", response_code)); - co_return; - } - co_return; -} - boost::asio::awaitable FreeGpt::aibn(std::shared_ptr ch, nlohmann::json json) { co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); @@ -2564,27 +2049,8 @@ boost::asio::awaitable FreeGpt::chatForAi(std::shared_ptr ch, nlo curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); - auto generate_signature = [](int timestamp, const std::string& conversation_id, const std::string& message) { - std::stringstream ss; - ss << timestamp << ":" << conversation_id << ":" << message << ":6B46K4pt"; - std::string data = ss.str(); - - unsigned char digest[SHA256_DIGEST_LENGTH]; - SHA256(reinterpret_cast(data.c_str()), data.length(), digest); - - std::stringstream sha_stream; - for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) { - sha_stream << std::setfill('0') << std::setw(2) << std::hex << static_cast(digest[i]); - } - return sha_stream.str(); - }; - uint64_t timestamp = - std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); - auto onversation_id = std::format("id_{}", timestamp); - std::string signature = generate_signature(timestamp, onversation_id, prompt); - constexpr std::string_view request_str{R"({ - "conversationId": "id_1696338587", + "conversationId": "temp", "conversationType": "chat_continuous", "botId": "chat_continuous", "globalSettings": { @@ -2593,23 +2059,19 @@ boost::asio::awaitable FreeGpt::chatForAi(std::shared_ptr ch, nlo "messageHistorySize": 5, "temperature": 0.7, "top_p": 1, - "stream": true + "stream": false }, "botSettings": {}, "prompt": "hello", "messages": [{ "role": "user", "content": "hello" - }], - "sign": "1505ec882d72d5f3175a74ac84d665b1b904e6671b2e7334268f540975929a26", - "timestamp": 1696338587 + }] })"}; nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); - request["sign"] = signature; - request["conversationId"] = onversation_id; request["messages"] = getConversationJson(json); - request["timestamp"] = timestamp; + request["prompt"] = prompt; auto str = request.dump(); SPDLOG_INFO("request : [{}]", str); @@ -2747,101 +2209,6 @@ boost::asio::awaitable FreeGpt::freeGpt(std::shared_ptr ch, nlohm co_return; } -boost::asio::awaitable FreeGpt::cromicle(std::shared_ptr ch, nlohmann::json json) { - co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); - - boost::system::error_code err{}; - ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - CURLcode res; - CURL* curl = curl_easy_init(); - if (!curl) { - auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); - co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); - ch->try_send(err, error_info); - co_return; - } - curl_easy_setopt(curl, CURLOPT_URL, "https://cromicle.top/chat"); - if (!m_cfg.http_proxy.empty()) - curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); - - struct Input { - std::shared_ptr ch; - std::string recv; - }; - Input input{ch}; - auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { - boost::system::error_code err{}; - auto input_ptr = static_cast(userp); - std::string data{(char*)contents, size * nmemb}; - auto& [ch, recv] = *input_ptr; - boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, data); }); - return size * nmemb; - }; - size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; - curlEasySetopt(curl); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); - - auto generate_signature = [](const std::string& message) { - std::stringstream ss; - ss << "asdap" << message; - std::string data = ss.str(); - - unsigned char digest[SHA256_DIGEST_LENGTH]; - SHA256(reinterpret_cast(data.c_str()), data.length(), digest); - - std::stringstream sha_stream; - for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) { - sha_stream << std::setfill('0') << std::setw(2) << std::hex << static_cast(digest[i]); - } - return sha_stream.str(); - }; - std::string signature = generate_signature(prompt); - - constexpr std::string_view request_str{R"({ - "message": "hello", - "hash": "dda6ea4e1dc215f198084018b1df20cfeafe9fbdfe31d8a350d6917509158d8a", - "token": "asdap" - })"}; - nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); - - request["hash"] = signature; - request["message"] = prompt; - - auto str = request.dump(); - SPDLOG_INFO("request : [{}]", str); - - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); - - struct curl_slist* headers = nullptr; - headers = curl_slist_append(headers, "Content-Type: application/json"); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - - ScopeExit auto_exit{[=] { - curl_slist_free_all(headers); - curl_easy_cleanup(curl); - }}; - - res = curl_easy_perform(curl); - - if (res != CURLE_OK) { - co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); - auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); - ch->try_send(err, error_info); - co_return; - } - int32_t response_code; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); - if (response_code != 200) { - co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); - ch->try_send(err, std::format("you http code:{}", response_code)); - co_return; - } - co_return; -} - boost::asio::awaitable FreeGpt::chatGpt4Online(std::shared_ptr ch, nlohmann::json json) { co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); @@ -3185,3 +2552,316 @@ boost::asio::awaitable FreeGpt::gptalk(std::shared_ptr ch, nlohma } co_return; } + +boost::asio::awaitable FreeGpt::gptForLove(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + + boost::system::error_code err{}; + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + CURLcode res; + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, error_info); + co_return; + } + curl_easy_setopt(curl, CURLOPT_URL, "https://api.gptplus.one/chat-process"); + if (!m_cfg.http_proxy.empty()) + curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); + struct Input { + std::shared_ptr ch; + std::string recv; + }; + Input input{ch}; + auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + boost::system::error_code err{}; + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [ch, recv] = *input_ptr; + recv.append(data); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.contains("10分钟内提问超过了5次")) { + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, msg); }); + return size * nmemb; + } + if (msg.empty() || !msg.contains("content")) + continue; + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", msg); + boost::asio::post(ch->get_executor(), + [=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); }); + continue; + } + auto str = line_json["detail"]["choices"][0]["delta"]["content"].get(); + if (!str.empty()) + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); }); + } + return size * nmemb; + }; + size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; + curlEasySetopt(curl); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); + + constexpr std::string_view request_str{R"({ + "prompt": "hello", + "options": {}, + "systemMessage": "You are ChatGPT, the version is GPT3.5, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", + "temperature": 0.8, + "top_p": 1, + "secret": "U2FsdGVkX18vdtlMj0nP1LoUzEqJTP0is+Q2+bQJNMk=", + "stream": false + })"}; + nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); + + auto secret_rsp = callZeus("http://127.0.0.1:8860/gptforlove", "{}"); + if (!secret_rsp.has_value()) { + SPDLOG_ERROR("callZeus error: {}", secret_rsp.error()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, secret_rsp.error()); + co_return; + } + SPDLOG_INFO("zeus: [{}]", secret_rsp.value().dump()); + request["secret"] = secret_rsp.value()["secret"]; + request["prompt"] = prompt; + + auto str = request.dump(); + SPDLOG_INFO("request : [{}]", str); + + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, str.c_str()); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + headers = curl_slist_append(headers, "referer: https://ai18.gptforlove.com/"); + headers = curl_slist_append(headers, "origin: https://ai18.gptforlove.com"); + headers = curl_slist_append(headers, "authority: api.gptplus.one"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + + if (res != CURLE_OK) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); + ch->try_send(err, error_info); + co_return; + } + int32_t response_code; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("you http code:{}", response_code)); + co_return; + } + co_return; +} + +boost::asio::awaitable FreeGpt::chatGptDemo(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + boost::system::error_code err{}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + CURLcode res; + int32_t response_code; + + struct Input { + std::shared_ptr ch; + std::string recv; + }; + Input input; + + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, error_info); + co_return; + } + curl_easy_setopt(curl, CURLOPT_URL, "https://chat.chatgptdemo.net/"); + + if (!m_cfg.http_proxy.empty()) + curl_easy_setopt(curl, CURLOPT_PROXY, m_cfg.http_proxy.c_str()); + curlEasySetopt(curl); + + auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t { + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [ch, recv] = *input_ptr; + recv.append(data); + return size * nmemb; + }; + size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb; + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); + + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "origin: https://chat.chatgptdemo.net"); + headers = curl_slist_append(headers, "referer: https://chat.chatgptdemo.net/"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + ScopeExit auto_exit{[=] { + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + }}; + + res = curl_easy_perform(curl); + if (res != CURLE_OK) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res)); + ch->try_send(err, error_info); + co_return; + } + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code); + if (response_code != 200) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("chatGptDemo http code:{}", response_code)); + co_return; + } + auto ret = findAll(R"(