diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 0738381..c8bb75d 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -65,6 +65,7 @@ class ChatGptModelsEnum(StrEnum): gpt_3_5_turbo_stream_fakeGpt = "gpt-3.5-turbo-stream-fakeGpt" gpt_3_5_turbo_stream_aura = "gpt-3.5-turbo-stream-aura" gpt_3_5_turbo_stream_geminiProChat = "gpt-3.5-turbo-stream-geminiProChat" + gpt_3_5_turbo_stream_flowgpt = "gpt-3.5-turbo-stream-flowgpt" @classmethod def values(cls) -> set[str]: diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index fe1c3bb..38dafde 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -30,6 +30,7 @@ public: boost::asio::awaitable fakeGpt(std::shared_ptr, nlohmann::json); boost::asio::awaitable aura(std::shared_ptr, nlohmann::json); boost::asio::awaitable geminiProChat(std::shared_ptr, nlohmann::json); + boost::asio::awaitable flowGpt(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index 8882d23..986d1a8 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -1684,3 +1684,93 @@ boost::asio::awaitable FreeGpt::geminiProChat(std::shared_ptr ch, } co_return; } + +boost::asio::awaitable FreeGpt::flowGpt(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + boost::system::error_code err{}; + std::unordered_multimap headers{ + {"Accept", "*/*"}, + {"content-type", "application/json"}, + {"Referer", "https://flowgpt.com/"}, + {"Origin", "https://flowgpt.com"}, + {"Sec-Fetch-Dest", "empty"}, + {"Sec-Fetch-Mode", "cors"}, + {"Sec-Fetch-Site", "same-site"}, + {"Sec-Ch-Ua-Mobile", "?0"}, + {"Authorization", "Bearer null"}, + }; + std::string recv; + auto ret = Curl() + .setUrl("https://backend-k8s.flowgpt.com/v2/chat-anonymous") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string str) mutable { + recv.append(str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.contains("event")) + continue; + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", msg); + boost::asio::post(ch->get_executor(), [=] { + ch->try_send(err, std::format("json parse error: [{}]", msg)); + }); + continue; + } + auto type = line_json["event"].get(); + if (type == "text") { + auto new_message = line_json["data"].get(); + if (new_message.empty()) + continue; + if (!new_message.empty()) + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, new_message); }); + } + } + return; + }) + .setBody([&] { + constexpr std::string_view ask_json_str = R"({ + "model": "gpt-3.5-turbo", + "nsfw": false, + "question": "hello", + "history": [ + { + "role": "assistant", + "content": "Hello, how can I help you today?" + } + ], + "system": "You are helpful assistant. Follow the user's instructions carefully.", + "temperature": 0.7, + "promptId": "model-gpt-3.5-turbo", + "documentIds": [], + "chatFileDocumentIds": [], + "generateImage": false, + "generateAudio": false + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + ask_request["question"] = prompt; + std::string ask_request_str = ask_request.dump(); + SPDLOG_INFO("request: [{}]", ask_request_str); + return ask_request_str; + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("{}", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + co_return; +} \ No newline at end of file diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp index 5006238..e725622 100644 --- a/chatgpt_microservice/src/main.cpp +++ b/chatgpt_microservice/src/main.cpp @@ -319,6 +319,7 @@ int main(int, char** argv) { ADD_METHOD("gpt-3.5-turbo-stream-fakeGpt", FreeGpt::fakeGpt); ADD_METHOD("gpt-3.5-turbo-stream-aura", FreeGpt::aura); ADD_METHOD("gpt-3.5-turbo-stream-geminiProChat", FreeGpt::geminiProChat); + ADD_METHOD("gpt-3.5-turbo-stream-flowgpt", FreeGpt::flowGpt); SPDLOG_INFO("active provider:"); for (auto& [provider, _] : gpt_function)