mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-09-11 22:30:41 +03:00
add gpt-4-ChatGpt4Online and gpt-3.5-turbo-stream-Berlin providers (#56)
This commit is contained in:
parent
18e798a399
commit
31ad4e7dcb
@ -54,6 +54,8 @@ class ChatGptModelsEnum(StrEnum):
|
||||
gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk"
|
||||
gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo"
|
||||
llama2 = "llama2"
|
||||
gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin"
|
||||
gpt_4_ChatGpt4Online = "gpt-4-ChatGpt4Online"
|
||||
gpt_3_5_turbo_stream_chatGptAi = "gpt-3.5-turbo-stream-chatGptAi"
|
||||
gpt_3_5_turbo_stream_FakeGpt = "gpt-3.5-turbo-stream-FakeGpt"
|
||||
gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt"
|
||||
|
@ -38,6 +38,8 @@ public:
|
||||
boost::asio::awaitable<void> fakeGpt(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> vercel(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aivvm(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> berlin(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);
|
||||
|
||||
private:
|
||||
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>
|
||||
|
@ -1041,7 +1041,7 @@ boost::asio::awaitable<void> FreeGpt::binjie(std::shared_ptr<Channel> ch, nlohma
|
||||
co_return;
|
||||
}
|
||||
|
||||
auto ret = co_await sendRequestRecvChunk(ch, client.value(), req, 200, [&ch](std::string str) {
|
||||
co_await sendRequestRecvChunk(ch, client.value(), req, 200, [&ch](std::string str) {
|
||||
boost::system::error_code err{};
|
||||
ch->try_send(err, std::move(str));
|
||||
});
|
||||
@ -2601,3 +2601,198 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
|
||||
return_flag = false;
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::berlin(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
boost::system::error_code err{};
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"},
|
||||
{"content-type", "application/json"},
|
||||
{"referer", "https://ai.berlin4h.top/"},
|
||||
{"origin", "https://ai.berlin4h.top"},
|
||||
{"Alt-Used", R"(ai.berlin4h.top)"},
|
||||
{"Pragma", R"(no-cache)"},
|
||||
};
|
||||
std::string recv;
|
||||
auto ret = Curl()
|
||||
.setUrl("https://ai.berlin4h.top/api/login")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) mutable { recv.append(str); })
|
||||
.setBody([&] {
|
||||
constexpr std::string_view ask_json_str = R"({
|
||||
"account":"免费使用GPT3.5模型@163.com",
|
||||
"password":"659e945c2d004686bad1a75b708c962f"
|
||||
})";
|
||||
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||
SPDLOG_INFO("request: [{}]", ask_request.dump());
|
||||
return ask_request.dump();
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("https://ai.berlin4h.top/api/login: [{}]", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
SPDLOG_INFO("recv: {}", recv);
|
||||
nlohmann::json login_rsp_json = nlohmann::json::parse(recv, nullptr, false);
|
||||
if (login_rsp_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", recv);
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, std::format("json parse error: [{}]", recv)); });
|
||||
co_return;
|
||||
}
|
||||
headers.emplace("token", login_rsp_json["data"]["token"].get<std::string>());
|
||||
recv.clear();
|
||||
ret = Curl()
|
||||
.setUrl("https://ai.berlin4h.top/api/chat/completions")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) mutable {
|
||||
recv.append(str);
|
||||
while (true) {
|
||||
auto position = recv.find("\n");
|
||||
if (position == std::string::npos)
|
||||
break;
|
||||
auto msg = recv.substr(0, position + 1);
|
||||
recv.erase(0, position + 1);
|
||||
msg.pop_back();
|
||||
if (msg.empty())
|
||||
continue;
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", msg);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); });
|
||||
continue;
|
||||
}
|
||||
auto message = line_json["content"].get<std::string>();
|
||||
if (message.empty())
|
||||
continue;
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, message); });
|
||||
}
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view ask_json_str = R"({
|
||||
"prompt":"hello",
|
||||
"parentMessageId":"936a47d9-2d29-4569-9906-38e9686048da",
|
||||
"options":{
|
||||
"model":"gpt-3.5-turbo",
|
||||
"temperature":0,
|
||||
"presence_penalty":0,
|
||||
"frequency_penalty":0,
|
||||
"max_tokens":1888,
|
||||
"stream":false
|
||||
}
|
||||
})";
|
||||
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||
ask_request["prompt"] = prompt;
|
||||
ask_request["parentMessageId"] = createUuidString();
|
||||
std::string ask_request_str = ask_request.dump();
|
||||
SPDLOG_INFO("request: [{}]", ask_request_str);
|
||||
return ask_request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("https://ai.berlin4h.top/api/chat/completions: [{}]", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatGpt4Online(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
boost::system::error_code err{};
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"},
|
||||
{"content-type", "application/x-www-form-urlencoded"},
|
||||
};
|
||||
std::string recv;
|
||||
auto ret = Curl()
|
||||
.setUrl("https://chatgpt4online.org")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) mutable { recv.append(str); })
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("https://chatgpt4online.org: [{}]", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
static std::string pattern{R"(data-nonce=".*")"};
|
||||
|
||||
std::vector<std::string> matches = findAll(pattern, recv);
|
||||
if (matches.size() != 1) {
|
||||
SPDLOG_ERROR("parsing login failed");
|
||||
co_await ch->async_send(err, recv, use_nothrow_awaitable);
|
||||
co_return;
|
||||
}
|
||||
|
||||
std::regex reg("\"([^\"]*)\"");
|
||||
std::sregex_iterator iter(matches[0].begin(), matches[0].end(), reg);
|
||||
std::sregex_iterator end;
|
||||
std::vector<std::string> results;
|
||||
while (iter != end) {
|
||||
results.emplace_back(iter->str(1));
|
||||
iter++;
|
||||
}
|
||||
if (results.empty()) {
|
||||
SPDLOG_ERROR("Failed to extract content");
|
||||
co_await ch->async_send(err, "Failed to extract content", use_nothrow_awaitable);
|
||||
co_return;
|
||||
}
|
||||
auto& nonce = results[0];
|
||||
SPDLOG_INFO("data_nonce: {}", nonce);
|
||||
ret = Curl()
|
||||
.setUrl("https://chatgpt4online.org/rizq")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) mutable {
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(str, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", str);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", str)); });
|
||||
return;
|
||||
}
|
||||
auto message = line_json["data"].get<std::string>();
|
||||
if (message.empty())
|
||||
return;
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, message); });
|
||||
})
|
||||
.setBody([&] {
|
||||
std::multimap<std::string, std::string> params{
|
||||
{"_wpnonce", nonce},
|
||||
{"post_id", "58"},
|
||||
{"url", "https://chatgpt4online.org"},
|
||||
{"action", "wpaicg_chat_shortcode_message"},
|
||||
{"message", prompt},
|
||||
{"bot_id", "3405"},
|
||||
};
|
||||
return paramsToQueryStr(params);
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("https://chatgpt4online.org/rizq: [{}]", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
@ -355,6 +355,8 @@ int main(int, char** argv) {
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-FakeGpt", FreeGpt::fakeGpt);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-Vercel", FreeGpt::vercel);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-aivvm", FreeGpt::aivvm);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-Berlin", FreeGpt::berlin);
|
||||
ADD_METHOD("gpt-4-ChatGpt4Online", FreeGpt::chatGpt4Online);
|
||||
|
||||
SPDLOG_INFO("active provider:");
|
||||
for (auto& [provider, _] : gpt_function)
|
||||
|
Loading…
x
Reference in New Issue
Block a user