diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 5a7aeee..273445d 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -54,6 +54,8 @@ class ChatGptModelsEnum(StrEnum): gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk" gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo" llama2 = "llama2" + gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin" + gpt_4_ChatGpt4Online = "gpt-4-ChatGpt4Online" gpt_3_5_turbo_stream_chatGptAi = "gpt-3.5-turbo-stream-chatGptAi" gpt_3_5_turbo_stream_FakeGpt = "gpt-3.5-turbo-stream-FakeGpt" gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt" diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index 86b2b04..83fda51 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -38,6 +38,8 @@ public: boost::asio::awaitable fakeGpt(std::shared_ptr, nlohmann::json); boost::asio::awaitable vercel(std::shared_ptr, nlohmann::json); boost::asio::awaitable aivvm(std::shared_ptr, nlohmann::json); + boost::asio::awaitable berlin(std::shared_ptr, nlohmann::json); + boost::asio::awaitable chatGpt4Online(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index 599025d..dc0acfc 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -1041,7 +1041,7 @@ boost::asio::awaitable FreeGpt::binjie(std::shared_ptr ch, nlohma co_return; } - auto ret = co_await sendRequestRecvChunk(ch, client.value(), req, 200, [&ch](std::string str) { + co_await sendRequestRecvChunk(ch, client.value(), req, 200, [&ch](std::string str) { boost::system::error_code err{}; ch->try_send(err, std::move(str)); }); @@ -2601,3 +2601,198 @@ boost::asio::awaitable FreeGpt::aivvm(std::shared_ptr ch, nlohman return_flag = false; co_return; } + +boost::asio::awaitable FreeGpt::berlin(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + boost::system::error_code err{}; + std::unordered_multimap headers{ + {"Accept", "*/*"}, + {"content-type", "application/json"}, + {"referer", "https://ai.berlin4h.top/"}, + {"origin", "https://ai.berlin4h.top"}, + {"Alt-Used", R"(ai.berlin4h.top)"}, + {"Pragma", R"(no-cache)"}, + }; + std::string recv; + auto ret = Curl() + .setUrl("https://ai.berlin4h.top/api/login") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) {}) + .setRecvBodyCallback([&](std::string str) mutable { recv.append(str); }) + .setBody([&] { + constexpr std::string_view ask_json_str = R"({ + "account":"免费使用GPT3.5模型@163.com", + "password":"659e945c2d004686bad1a75b708c962f" + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + SPDLOG_INFO("request: [{}]", ask_request.dump()); + return ask_request.dump(); + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("https://ai.berlin4h.top/api/login: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + SPDLOG_INFO("recv: {}", recv); + nlohmann::json login_rsp_json = nlohmann::json::parse(recv, nullptr, false); + if (login_rsp_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", recv); + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, std::format("json parse error: [{}]", recv)); }); + co_return; + } + headers.emplace("token", login_rsp_json["data"]["token"].get()); + recv.clear(); + ret = Curl() + .setUrl("https://ai.berlin4h.top/api/chat/completions") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string str) mutable { + recv.append(str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty()) + continue; + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", msg); + boost::asio::post(ch->get_executor(), + [=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); }); + continue; + } + auto message = line_json["content"].get(); + if (message.empty()) + continue; + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, message); }); + } + return; + }) + .setBody([&] { + constexpr std::string_view ask_json_str = R"({ + "prompt":"hello", + "parentMessageId":"936a47d9-2d29-4569-9906-38e9686048da", + "options":{ + "model":"gpt-3.5-turbo", + "temperature":0, + "presence_penalty":0, + "frequency_penalty":0, + "max_tokens":1888, + "stream":false + } + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + ask_request["prompt"] = prompt; + ask_request["parentMessageId"] = createUuidString(); + std::string ask_request_str = ask_request.dump(); + SPDLOG_INFO("request: [{}]", ask_request_str); + return ask_request_str; + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("https://ai.berlin4h.top/api/chat/completions: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + co_return; +} + +boost::asio::awaitable FreeGpt::chatGpt4Online(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + boost::system::error_code err{}; + std::unordered_multimap headers{ + {"Accept", "*/*"}, + {"content-type", "application/x-www-form-urlencoded"}, + }; + std::string recv; + auto ret = Curl() + .setUrl("https://chatgpt4online.org") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) {}) + .setRecvBodyCallback([&](std::string str) mutable { recv.append(str); }) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("https://chatgpt4online.org: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + static std::string pattern{R"(data-nonce=".*")"}; + + std::vector matches = findAll(pattern, recv); + if (matches.size() != 1) { + SPDLOG_ERROR("parsing login failed"); + co_await ch->async_send(err, recv, use_nothrow_awaitable); + co_return; + } + + std::regex reg("\"([^\"]*)\""); + std::sregex_iterator iter(matches[0].begin(), matches[0].end(), reg); + std::sregex_iterator end; + std::vector results; + while (iter != end) { + results.emplace_back(iter->str(1)); + iter++; + } + if (results.empty()) { + SPDLOG_ERROR("Failed to extract content"); + co_await ch->async_send(err, "Failed to extract content", use_nothrow_awaitable); + co_return; + } + auto& nonce = results[0]; + SPDLOG_INFO("data_nonce: {}", nonce); + ret = Curl() + .setUrl("https://chatgpt4online.org/rizq") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string str) mutable { + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(str, nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", str); + boost::asio::post(ch->get_executor(), + [=] { ch->try_send(err, std::format("json parse error: [{}]", str)); }); + return; + } + auto message = line_json["data"].get(); + if (message.empty()) + return; + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, message); }); + }) + .setBody([&] { + std::multimap params{ + {"_wpnonce", nonce}, + {"post_id", "58"}, + {"url", "https://chatgpt4online.org"}, + {"action", "wpaicg_chat_shortcode_message"}, + {"message", prompt}, + {"bot_id", "3405"}, + }; + return paramsToQueryStr(params); + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("https://chatgpt4online.org/rizq: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + co_return; +} diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp index ec7e853..f59c37c 100644 --- a/chatgpt_microservice/src/main.cpp +++ b/chatgpt_microservice/src/main.cpp @@ -355,6 +355,8 @@ int main(int, char** argv) { ADD_METHOD("gpt-3.5-turbo-stream-FakeGpt", FreeGpt::fakeGpt); ADD_METHOD("gpt-3.5-turbo-stream-Vercel", FreeGpt::vercel); ADD_METHOD("gpt-3.5-turbo-stream-aivvm", FreeGpt::aivvm); + ADD_METHOD("gpt-3.5-turbo-stream-Berlin", FreeGpt::berlin); + ADD_METHOD("gpt-4-ChatGpt4Online", FreeGpt::chatGpt4Online); SPDLOG_INFO("active provider:"); for (auto& [provider, _] : gpt_function)