From fd9d38b5f0ab91b61a58ce81cbb37e9c14114391 Mon Sep 17 00:00:00 2001 From: Dmitry Afanasyev <71835315+Balshgit@users.noreply.github.com> Date: Thu, 4 Jan 2024 01:12:08 +0300 Subject: [PATCH] add provider GeminiProChat & increase chat workers num (#75) --- bot_microservice/constants.py | 1 + chatgpt_microservice/include/free_gpt.h | 1 + chatgpt_microservice/src/free_gpt.cpp | 79 +++++++++++++++++++++++++ chatgpt_microservice/src/main.cpp | 1 + docker-compose.yml | 1 + 5 files changed, 83 insertions(+) diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 20fb26a..06a972b 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -69,6 +69,7 @@ class ChatGptModelsEnum(StrEnum): gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove" gpt_3_5_turbo_stream_fakeGpt = "gpt-3.5-turbo-stream-fakeGpt" gpt_3_5_turbo_stream_aura = "gpt-3.5-turbo-stream-aura" + gpt_3_5_turbo_stream_geminiProChat = "gpt-3.5-turbo-stream-geminiProChat" @classmethod def values(cls) -> set[str]: diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index b0739df..a28dcf9 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -35,6 +35,7 @@ public: boost::asio::awaitable aura(std::shared_ptr, nlohmann::json); boost::asio::awaitable gpt6(std::shared_ptr, nlohmann::json); boost::asio::awaitable chatxyz(std::shared_ptr, nlohmann::json); + boost::asio::awaitable geminiProChat(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index 8407dbd..ad4eaef 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -2272,3 +2272,82 @@ boost::asio::awaitable FreeGpt::chatxyz(std::shared_ptr ch, nlohm } co_return; } + +boost::asio::awaitable FreeGpt::geminiProChat(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + uint64_t timestamp = getTimestamp(); + + auto generate_signature = [](uint64_t timestamp, const std::string& message) { + std::string s = std::to_string(timestamp) + ":" + message + ":9C4680FB-A4E1-6BC7-052A-7F68F9F5AD1F"; + unsigned char hash[SHA256_DIGEST_LENGTH]; + SHA256_CTX sha256; + if (!SHA256_Init(&sha256)) + throw std::runtime_error("SHA-256 initialization failed"); + if (!SHA256_Update(&sha256, s.c_str(), s.length())) + throw std::runtime_error("SHA-256 update failed"); + if (!SHA256_Final(hash, &sha256)) + throw std::runtime_error("SHA-256 finalization failed"); + std::stringstream ss; + for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) + ss << std::hex << std::setw(2) << std::setfill('0') << static_cast(hash[i]); + return ss.str(); + }; + std::string signature = generate_signature(timestamp, prompt); + + boost::system::error_code err{}; + std::unordered_multimap headers{ + {"Accept", "t*/*"}, + {"content-type", "application/json"}, + {"Referer", "https://geminiprochat.com/"}, + {"Origin", "https://geminiprochat.com"}, + {"Sec-Fetch-Dest", "empty"}, + {"Sec-Fetch-Mode", "cors"}, + {"Sec-Fetch-Site", "same-origin"}, + {"TE", "trailers"}, + }; + std::string recv; + auto ret = Curl() + .setUrl("https://geminiprochat.com/api/generate") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string chunk_str) mutable { + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, chunk_str); }); + return; + }) + .setBody([&] { + constexpr std::string_view ask_json_str = R"({ + "messages":[ + { + "role":"user", + "parts":[ + { + "text":"Hello" + } + ] + } + ], + "time":1704256758261, + "pass":null, + "sign":"e5cbb75324af44b4d9e138238335a7f2120bdae2109625883c3dc44884917086" + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + ask_request["messages"][0]["parts"][0]["text"] = prompt; + ask_request["sign"] = signature; + ask_request["time"] = timestamp; + std::string ask_request_str = ask_request.dump(); + SPDLOG_INFO("request: [{}]", ask_request_str); + return ask_request_str; + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("{}", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + co_return; +} diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp index fbc9186..b44abd6 100644 --- a/chatgpt_microservice/src/main.cpp +++ b/chatgpt_microservice/src/main.cpp @@ -324,6 +324,7 @@ int main(int, char** argv) { ADD_METHOD("gpt-3.5-turbo-stream-aura", FreeGpt::aura); ADD_METHOD("gpt6", FreeGpt::gpt6); ADD_METHOD("gpt-3.5-turbo-stream-chatxyz", FreeGpt::chatxyz); + ADD_METHOD("gpt-3.5-turbo-stream-geminiProChat", FreeGpt::geminiProChat); SPDLOG_INFO("active provider:"); for (auto& [provider, _] : gpt_function) diff --git a/docker-compose.yml b/docker-compose.yml index 35e9712..9ac8e1b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -46,6 +46,7 @@ services: environment: CHAT_PATH: "/gpt/chat" API_KEY: "a40f22f2-c1a2-4b1d-a47f-55ae1a7ddbed" + WORK_THREAD_NUM: 4 networks: chatgpt-network: ipv4_address: 200.20.0.11