mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-09-10 17:20:41 +03:00
remove inactive provider (#68)
This commit is contained in:
parent
4117a2f467
commit
a8276167d8
@ -49,20 +49,16 @@ class ChatGptModelsEnum(StrEnum):
|
||||
gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo"
|
||||
gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt"
|
||||
gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle"
|
||||
gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk"
|
||||
gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo"
|
||||
gpt_3_5_turbo_stream_gptTalkRu = "gpt-3.5-turbo--stream-gptTalkRu"
|
||||
Llama_2_70b_chat_hf_stream_DeepInfra = "Llama-2-70b-chat-hf-stream-DeepInfra"
|
||||
gpt_4_stream_aivvm = "gpt-4-stream-aivvm"
|
||||
gpt_3_5_turbo_stream_AiChatOnline = "gpt-3.5-turbo-stream-AiChatOnline"
|
||||
llama2_70B = "llama2-70B"
|
||||
gpt_3_5_turbo_gptChatly = "gpt-3.5-turbo-gptChatly"
|
||||
gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin"
|
||||
gpt_4_ChatGpt4Online = "gpt-4-ChatGpt4Online"
|
||||
gpt_3_5_turbo_stream_chatGptAi = "gpt-3.5-turbo-stream-chatGptAi"
|
||||
gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt"
|
||||
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
|
||||
gpt_3_5_turbo_stream_ChatForAi = "gpt-3.5-turbo-stream-ChatForAi"
|
||||
|
||||
@classmethod
|
||||
def values(cls) -> set[str]:
|
||||
@ -80,10 +76,8 @@ class ChatGptModelsEnum(StrEnum):
|
||||
priority = 3
|
||||
case "gpt-3.5-turbo-stream-GeekGpt":
|
||||
priority = 2
|
||||
case "gpt-3.5-turbo-stream-gptalk":
|
||||
priority = 1
|
||||
case "llama2":
|
||||
priority = 1
|
||||
priority = 2
|
||||
fields = {"model": model, "priority": priority}
|
||||
models.append(fields)
|
||||
return models
|
||||
@ -92,4 +86,8 @@ class ChatGptModelsEnum(StrEnum):
|
||||
def _deprecated() -> set[str]:
|
||||
return {
|
||||
"gpt-3.5-turbo-stream-gptforlove",
|
||||
"gpt-3.5-turbo-stream-gptalk",
|
||||
"gpt-3.5-turbo-stream-ChatForAi",
|
||||
"gpt-4-ChatGpt4Online",
|
||||
"gpt-3.5-turbo--stream-gptTalkRu",
|
||||
}
|
||||
|
@ -2559,4 +2559,463 @@ std::string md5(const std::string& str, bool reverse = true) {
|
||||
if (reverse)
|
||||
std::ranges::reverse(md5_str);
|
||||
return md5_str;
|
||||
}
|
||||
}
|
||||
boost::asio::awaitable<void> FreeGpt::gptalk(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
boost::system::error_code err{};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
auto generate_token_hex = [](int32_t length) {
|
||||
std::random_device rd;
|
||||
std::stringstream ss;
|
||||
std::mt19937 gen(rd());
|
||||
std::uniform_int_distribution<> dis(0, 15);
|
||||
for (int i = 0; i < length; ++i)
|
||||
ss << std::hex << dis(gen);
|
||||
std::string token = ss.str();
|
||||
token = std::string(length * 2 - token.length(), '0') + token;
|
||||
return token;
|
||||
};
|
||||
|
||||
uint64_t timestamp = getTimestamp<std::chrono::seconds>();
|
||||
std::string recv;
|
||||
Curl curl;
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"}, {"Content-Type", "application/json"},
|
||||
{"authority", "gptalk.net"}, {"origin", "https://gptalk.net"},
|
||||
{"x-auth-appid", "2229"}, {"x-auth-openid", ""},
|
||||
{"x-auth-platform", ""}, {"x-auth-timestamp", std::to_string(timestamp)},
|
||||
};
|
||||
auto ret = curl.setUrl("https://gptalk.net/api/chatgpt/user/login")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([&](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
nlohmann::json login_json;
|
||||
login_json["fingerprint"] = generate_token_hex(16);
|
||||
login_json["platform"] = "fingerprint";
|
||||
std::string request_str = login_json.dump();
|
||||
return request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
co_return;
|
||||
}
|
||||
SPDLOG_INFO("login rsp: [{}]", recv);
|
||||
nlohmann::json auth_rsp = nlohmann::json::parse(recv, nullptr, false);
|
||||
auto auth_token = auth_rsp["data"]["token"].get<std::string>();
|
||||
SPDLOG_INFO("token: [{}]", auth_token);
|
||||
|
||||
auto auth_str = std::format("Bearer {}", auth_token);
|
||||
headers.emplace("authorization", auth_str);
|
||||
|
||||
recv.clear();
|
||||
ret = curl.setUrl("https://gptalk.net/api/chatgpt/chatapi/text")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([&](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view json_str = R"({
|
||||
"content":"hello",
|
||||
"accept":"stream",
|
||||
"from":1,
|
||||
"model":"gpt-3.5-turbo",
|
||||
"is_mobile":0,
|
||||
"user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
|
||||
"is_open_ctx":0,
|
||||
"prompt":"",
|
||||
"roid":111,
|
||||
"temperature":0,
|
||||
"ctx_msg_count":3,
|
||||
"created_at":1696655321
|
||||
})";
|
||||
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
|
||||
request["created_at"] = timestamp;
|
||||
request["content"] = prompt;
|
||||
auto request_str = request.dump();
|
||||
return request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
co_return;
|
||||
}
|
||||
SPDLOG_INFO("input.recv: [{}]", recv);
|
||||
nlohmann::json get_text_rsp = nlohmann::json::parse(recv, nullptr, false);
|
||||
auto token = get_text_rsp["data"]["token"].get<std::string>();
|
||||
SPDLOG_INFO("token: [{}]", token);
|
||||
recv.clear();
|
||||
|
||||
std::string last_message;
|
||||
auto url = std::format("https://gptalk.net/api/chatgpt/chatapi/stream?token={}", token);
|
||||
ret = curl.setUrl(url)
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([&](std::string) {})
|
||||
.setOpt(CURLOPT_HTTPGET, 1L)
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
while (true) {
|
||||
auto position = recv.find("\n");
|
||||
if (position == std::string::npos)
|
||||
break;
|
||||
auto msg = recv.substr(0, position + 1);
|
||||
recv.erase(0, position + 1);
|
||||
msg.pop_back();
|
||||
if (msg.empty() || !msg.contains("content") || !msg.starts_with("data: "))
|
||||
continue;
|
||||
msg.erase(0, 6);
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", msg);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); });
|
||||
continue;
|
||||
}
|
||||
auto content = line_json["content"].get<std::string>();
|
||||
if (last_message.empty())
|
||||
last_message = content;
|
||||
else {
|
||||
auto count = last_message.size();
|
||||
last_message = content;
|
||||
content.erase(0, count);
|
||||
}
|
||||
if (content.empty())
|
||||
continue;
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=, content = std::move(content)] { ch->try_send(err, content); });
|
||||
}
|
||||
})
|
||||
.clearHeaders()
|
||||
.setHttpHeaders([] -> auto& {
|
||||
static std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"}, {"Content-Type", "application/json"},
|
||||
{"authority", "gptalk.net"}, {"origin", "https://gptalk.net"},
|
||||
{"x-auth-appid", "2229"}, {"x-auth-openid", ""},
|
||||
{"x-auth-platform", ""},
|
||||
};
|
||||
return headers;
|
||||
}())
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::gptForLove(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
boost::system::error_code err{};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
auto secret_rsp = callZeus(std::format("{}/gptforlove", m_cfg.zeus), "{}");
|
||||
if (!secret_rsp.has_value()) {
|
||||
SPDLOG_ERROR("callZeus error: {}", secret_rsp.error());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, secret_rsp.error());
|
||||
co_return;
|
||||
}
|
||||
SPDLOG_INFO("zeus: [{}]", secret_rsp.value().dump());
|
||||
static std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Content-Type", "application/json"},
|
||||
{"referer", "https://ai18.gptforlove.com/"},
|
||||
{"origin", "https://ai18.gptforlove.com"},
|
||||
{"authority", "api.gptplus.one"},
|
||||
};
|
||||
std::string recv;
|
||||
auto ret = Curl()
|
||||
.setUrl("https://api.gptplus.one/chat-process")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
while (true) {
|
||||
auto position = recv.find("\n");
|
||||
if (position == std::string::npos)
|
||||
break;
|
||||
auto msg = recv.substr(0, position + 1);
|
||||
recv.erase(0, position + 1);
|
||||
msg.pop_back();
|
||||
if (msg.contains("10分钟内提问超过了5次")) {
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, msg); });
|
||||
return;
|
||||
}
|
||||
if (msg.empty() || !msg.contains("content"))
|
||||
continue;
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", msg);
|
||||
boost::asio::post(ch->get_executor(), [=] {
|
||||
ch->try_send(err, std::format("json parse error: [{}]", msg));
|
||||
});
|
||||
continue;
|
||||
}
|
||||
auto str = line_json["detail"]["choices"][0]["delta"]["content"].get<std::string>();
|
||||
if (!str.empty())
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
||||
}
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view request_str{R"({
|
||||
"prompt": "hello",
|
||||
"options": {},
|
||||
"systemMessage": "You are ChatGPT, the version is GPT3.5, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
||||
"temperature": 0.8,
|
||||
"top_p": 1,
|
||||
"secret": "U2FsdGVkX18vdtlMj0nP1LoUzEqJTP0is+Q2+bQJNMk=",
|
||||
"stream": false
|
||||
})"};
|
||||
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
|
||||
request["secret"] = secret_rsp.value()["secret"];
|
||||
request["prompt"] = prompt;
|
||||
auto str = request.dump();
|
||||
SPDLOG_INFO("request : [{}]", str);
|
||||
return str;
|
||||
}())
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
|
||||
boost::system::error_code err{};
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
auto generate_signature = [](uint64_t timestamp, const std::string& message, const std::string& id) {
|
||||
std::string s = std::to_string(timestamp) + ":" + id + ":" + message + ":7YN8z6d6";
|
||||
unsigned char hash[SHA256_DIGEST_LENGTH];
|
||||
SHA256_CTX sha256;
|
||||
if (!SHA256_Init(&sha256))
|
||||
throw std::runtime_error("SHA-256 initialization failed");
|
||||
if (!SHA256_Update(&sha256, s.c_str(), s.length()))
|
||||
throw std::runtime_error("SHA-256 update failed");
|
||||
if (!SHA256_Final(hash, &sha256))
|
||||
throw std::runtime_error("SHA-256 finalization failed");
|
||||
std::stringstream ss;
|
||||
for (int i = 0; i < SHA256_DIGEST_LENGTH; i++)
|
||||
ss << std::hex << std::setw(2) << std::setfill('0') << static_cast<int>(hash[i]);
|
||||
return ss.str();
|
||||
};
|
||||
static std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Content-Type", "application/json"},
|
||||
{"Origin", "https://chatforai.store"},
|
||||
{"Referer", "https://chatforai.store/?r=b"},
|
||||
};
|
||||
auto ret = Curl()
|
||||
.setUrl("https://chatforai.store/api/handle/provider-openai")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
boost::asio::post(ch->get_executor(), [=, str = std::move(str)] { ch->try_send(err, str); });
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
uint64_t timestamp = getTimestamp();
|
||||
constexpr std::string_view request_str{R"({
|
||||
"conversationId":"id_1701845910554",
|
||||
"conversationType":"chat_continuous",
|
||||
"botId":"chat_continuous",
|
||||
"globalSettings":{
|
||||
"baseUrl":"https://api.openai.com",
|
||||
"model":"gpt-3.5-turbo",
|
||||
"messageHistorySize":5,
|
||||
"temperature":0.7,
|
||||
"top_p":1,
|
||||
"stream":false
|
||||
},
|
||||
"botSettings":{
|
||||
},
|
||||
"prompt":"hello",
|
||||
"messages":[
|
||||
{
|
||||
"role":"user",
|
||||
"content":"hello"
|
||||
}
|
||||
],
|
||||
"timestamp":1701845910677,
|
||||
"sign":"fd7fac179ff93e8745b3c7a61075e3d8062a795e7ca1cd3d7cf24e3303de7a95"
|
||||
})"};
|
||||
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
|
||||
auto conversation_id = std::format("id_{}", timestamp - 123);
|
||||
request["conversationId"] = conversation_id;
|
||||
request["timestamp"] = timestamp;
|
||||
request["sign"] = generate_signature(timestamp, prompt, conversation_id);
|
||||
request["messages"] = getConversationJson(json);
|
||||
request["prompt"] = prompt;
|
||||
auto str = request.dump();
|
||||
SPDLOG_INFO("request : [{}]", str);
|
||||
return str;
|
||||
}())
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatGpt4Online(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
boost::system::error_code err{};
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"},
|
||||
{"content-type", "application/x-www-form-urlencoded"},
|
||||
};
|
||||
Curl curl;
|
||||
std::string recv;
|
||||
auto ret = curl.setUrl("https://chatgpt4online.org")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) mutable { recv.append(str); })
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
static std::string pattern{R"(data-nonce=".*")"};
|
||||
|
||||
std::vector<std::string> matches = findAll(pattern, recv);
|
||||
if (matches.size() != 1) {
|
||||
SPDLOG_ERROR("parsing login failed");
|
||||
co_await ch->async_send(err, recv, use_nothrow_awaitable);
|
||||
co_return;
|
||||
}
|
||||
|
||||
std::regex reg("\"([^\"]*)\"");
|
||||
std::sregex_iterator iter(matches[0].begin(), matches[0].end(), reg);
|
||||
std::sregex_iterator end;
|
||||
std::vector<std::string> results;
|
||||
while (iter != end) {
|
||||
results.emplace_back(iter->str(1));
|
||||
iter++;
|
||||
}
|
||||
if (results.empty()) {
|
||||
SPDLOG_ERROR("Failed to extract content");
|
||||
co_await ch->async_send(err, "Failed to extract content", use_nothrow_awaitable);
|
||||
co_return;
|
||||
}
|
||||
auto& nonce = results[0];
|
||||
SPDLOG_INFO("data_nonce: {}", nonce);
|
||||
ret = curl.setUrl("https://chatgpt4online.org/rizq")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) mutable {
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(str, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", str);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", str)); });
|
||||
return;
|
||||
}
|
||||
auto message = line_json["data"].get<std::string>();
|
||||
if (message.empty())
|
||||
return;
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, message); });
|
||||
})
|
||||
.setBody([&] {
|
||||
std::multimap<std::string, std::string> params{
|
||||
{"_wpnonce", nonce},
|
||||
{"post_id", "58"},
|
||||
{"url", "https://chatgpt4online.org"},
|
||||
{"action", "wpaicg_chat_shortcode_message"},
|
||||
{"message", prompt},
|
||||
{"bot_id", "3405"},
|
||||
};
|
||||
return paramsToQueryStr(params);
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::gptTalkru(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
boost::system::error_code err{};
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "application/json, text/plain, */*"},
|
||||
{"content-type", "application/json"},
|
||||
{"Referer", "https://gpttalk.ru/"},
|
||||
{"Origin", "https://gpttalk.ru"},
|
||||
};
|
||||
std::string recv;
|
||||
auto ret = Curl()
|
||||
.setUrl("https://gpttalk.ru/gpt2")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) mutable {
|
||||
boost::system::error_code err{};
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view ask_json_str = R"({
|
||||
"model":"gpt-3.5-turbo",
|
||||
"modelType":1,
|
||||
"prompt":[
|
||||
{
|
||||
"role":"user",
|
||||
"content":"我的上一个问题"
|
||||
}
|
||||
],
|
||||
"responseType":"stream"
|
||||
})";
|
||||
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||
ask_request["prompt"] = getConversationJson(json);
|
||||
std::string ask_request_str = ask_request.dump();
|
||||
SPDLOG_INFO("request: [{}]", ask_request_str);
|
||||
return ask_request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
@ -23,16 +23,11 @@ public:
|
||||
boost::asio::awaitable<void> you(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> binjie(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptalk(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptForLove(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGptDemo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> llama2(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> geekGpt(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGptAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aivvm(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptTalkru(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> deepInfra(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptChatly(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aiChatOnline(std::shared_ptr<Channel>, nlohmann::json);
|
||||
|
@ -918,330 +918,6 @@ boost::asio::awaitable<void> FreeGpt::gptGo(std::shared_ptr<Channel> ch, nlohman
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
|
||||
boost::system::error_code err{};
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
auto generate_signature = [](uint64_t timestamp, const std::string& message, const std::string& id) {
|
||||
std::string s = std::to_string(timestamp) + ":" + id + ":" + message + ":7YN8z6d6";
|
||||
unsigned char hash[SHA256_DIGEST_LENGTH];
|
||||
SHA256_CTX sha256;
|
||||
if (!SHA256_Init(&sha256))
|
||||
throw std::runtime_error("SHA-256 initialization failed");
|
||||
if (!SHA256_Update(&sha256, s.c_str(), s.length()))
|
||||
throw std::runtime_error("SHA-256 update failed");
|
||||
if (!SHA256_Final(hash, &sha256))
|
||||
throw std::runtime_error("SHA-256 finalization failed");
|
||||
std::stringstream ss;
|
||||
for (int i = 0; i < SHA256_DIGEST_LENGTH; i++)
|
||||
ss << std::hex << std::setw(2) << std::setfill('0') << static_cast<int>(hash[i]);
|
||||
return ss.str();
|
||||
};
|
||||
static std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Content-Type", "application/json"},
|
||||
{"Origin", "https://chatforai.store"},
|
||||
{"Referer", "https://chatforai.store/?r=b"},
|
||||
};
|
||||
auto ret = Curl()
|
||||
.setUrl("https://chatforai.store/api/handle/provider-openai")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
boost::asio::post(ch->get_executor(), [=, str = std::move(str)] { ch->try_send(err, str); });
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
uint64_t timestamp = getTimestamp();
|
||||
constexpr std::string_view request_str{R"({
|
||||
"conversationId":"id_1701845910554",
|
||||
"conversationType":"chat_continuous",
|
||||
"botId":"chat_continuous",
|
||||
"globalSettings":{
|
||||
"baseUrl":"https://api.openai.com",
|
||||
"model":"gpt-3.5-turbo",
|
||||
"messageHistorySize":5,
|
||||
"temperature":0.7,
|
||||
"top_p":1,
|
||||
"stream":false
|
||||
},
|
||||
"botSettings":{
|
||||
},
|
||||
"prompt":"hello",
|
||||
"messages":[
|
||||
{
|
||||
"role":"user",
|
||||
"content":"hello"
|
||||
}
|
||||
],
|
||||
"timestamp":1701845910677,
|
||||
"sign":"fd7fac179ff93e8745b3c7a61075e3d8062a795e7ca1cd3d7cf24e3303de7a95"
|
||||
})"};
|
||||
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
|
||||
auto conversation_id = std::format("id_{}", timestamp - 123);
|
||||
request["conversationId"] = conversation_id;
|
||||
request["timestamp"] = timestamp;
|
||||
request["sign"] = generate_signature(timestamp, prompt, conversation_id);
|
||||
request["messages"] = getConversationJson(json);
|
||||
request["prompt"] = prompt;
|
||||
auto str = request.dump();
|
||||
SPDLOG_INFO("request : [{}]", str);
|
||||
return str;
|
||||
}())
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::gptalk(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
boost::system::error_code err{};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
auto generate_token_hex = [](int32_t length) {
|
||||
std::random_device rd;
|
||||
std::stringstream ss;
|
||||
std::mt19937 gen(rd());
|
||||
std::uniform_int_distribution<> dis(0, 15);
|
||||
for (int i = 0; i < length; ++i)
|
||||
ss << std::hex << dis(gen);
|
||||
std::string token = ss.str();
|
||||
token = std::string(length * 2 - token.length(), '0') + token;
|
||||
return token;
|
||||
};
|
||||
|
||||
uint64_t timestamp = getTimestamp<std::chrono::seconds>();
|
||||
std::string recv;
|
||||
Curl curl;
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"}, {"Content-Type", "application/json"},
|
||||
{"authority", "gptalk.net"}, {"origin", "https://gptalk.net"},
|
||||
{"x-auth-appid", "2229"}, {"x-auth-openid", ""},
|
||||
{"x-auth-platform", ""}, {"x-auth-timestamp", std::to_string(timestamp)},
|
||||
};
|
||||
auto ret = curl.setUrl("https://gptalk.net/api/chatgpt/user/login")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([&](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
nlohmann::json login_json;
|
||||
login_json["fingerprint"] = generate_token_hex(16);
|
||||
login_json["platform"] = "fingerprint";
|
||||
std::string request_str = login_json.dump();
|
||||
return request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
co_return;
|
||||
}
|
||||
SPDLOG_INFO("login rsp: [{}]", recv);
|
||||
nlohmann::json auth_rsp = nlohmann::json::parse(recv, nullptr, false);
|
||||
auto auth_token = auth_rsp["data"]["token"].get<std::string>();
|
||||
SPDLOG_INFO("token: [{}]", auth_token);
|
||||
|
||||
auto auth_str = std::format("Bearer {}", auth_token);
|
||||
headers.emplace("authorization", auth_str);
|
||||
|
||||
recv.clear();
|
||||
ret = curl.setUrl("https://gptalk.net/api/chatgpt/chatapi/text")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([&](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
return;
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view json_str = R"({
|
||||
"content":"hello",
|
||||
"accept":"stream",
|
||||
"from":1,
|
||||
"model":"gpt-3.5-turbo",
|
||||
"is_mobile":0,
|
||||
"user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
|
||||
"is_open_ctx":0,
|
||||
"prompt":"",
|
||||
"roid":111,
|
||||
"temperature":0,
|
||||
"ctx_msg_count":3,
|
||||
"created_at":1696655321
|
||||
})";
|
||||
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
|
||||
request["created_at"] = timestamp;
|
||||
request["content"] = prompt;
|
||||
auto request_str = request.dump();
|
||||
return request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
co_return;
|
||||
}
|
||||
SPDLOG_INFO("input.recv: [{}]", recv);
|
||||
nlohmann::json get_text_rsp = nlohmann::json::parse(recv, nullptr, false);
|
||||
auto token = get_text_rsp["data"]["token"].get<std::string>();
|
||||
SPDLOG_INFO("token: [{}]", token);
|
||||
recv.clear();
|
||||
|
||||
std::string last_message;
|
||||
auto url = std::format("https://gptalk.net/api/chatgpt/chatapi/stream?token={}", token);
|
||||
ret = curl.setUrl(url)
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([&](std::string) {})
|
||||
.setOpt(CURLOPT_HTTPGET, 1L)
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
while (true) {
|
||||
auto position = recv.find("\n");
|
||||
if (position == std::string::npos)
|
||||
break;
|
||||
auto msg = recv.substr(0, position + 1);
|
||||
recv.erase(0, position + 1);
|
||||
msg.pop_back();
|
||||
if (msg.empty() || !msg.contains("content") || !msg.starts_with("data: "))
|
||||
continue;
|
||||
msg.erase(0, 6);
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", msg);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); });
|
||||
continue;
|
||||
}
|
||||
auto content = line_json["content"].get<std::string>();
|
||||
if (last_message.empty())
|
||||
last_message = content;
|
||||
else {
|
||||
auto count = last_message.size();
|
||||
last_message = content;
|
||||
content.erase(0, count);
|
||||
}
|
||||
if (content.empty())
|
||||
continue;
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=, content = std::move(content)] { ch->try_send(err, content); });
|
||||
}
|
||||
})
|
||||
.clearHeaders()
|
||||
.setHttpHeaders([] -> auto& {
|
||||
static std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"}, {"Content-Type", "application/json"},
|
||||
{"authority", "gptalk.net"}, {"origin", "https://gptalk.net"},
|
||||
{"x-auth-appid", "2229"}, {"x-auth-openid", ""},
|
||||
{"x-auth-platform", ""},
|
||||
};
|
||||
return headers;
|
||||
}())
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::gptForLove(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
boost::system::error_code err{};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
auto secret_rsp = callZeus(std::format("{}/gptforlove", m_cfg.zeus), "{}");
|
||||
if (!secret_rsp.has_value()) {
|
||||
SPDLOG_ERROR("callZeus error: {}", secret_rsp.error());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, secret_rsp.error());
|
||||
co_return;
|
||||
}
|
||||
SPDLOG_INFO("zeus: [{}]", secret_rsp.value().dump());
|
||||
static std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Content-Type", "application/json"},
|
||||
{"referer", "https://ai18.gptforlove.com/"},
|
||||
{"origin", "https://ai18.gptforlove.com"},
|
||||
{"authority", "api.gptplus.one"},
|
||||
};
|
||||
std::string recv;
|
||||
auto ret = Curl()
|
||||
.setUrl("https://api.gptplus.one/chat-process")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvBodyCallback([&](std::string str) {
|
||||
recv.append(str);
|
||||
while (true) {
|
||||
auto position = recv.find("\n");
|
||||
if (position == std::string::npos)
|
||||
break;
|
||||
auto msg = recv.substr(0, position + 1);
|
||||
recv.erase(0, position + 1);
|
||||
msg.pop_back();
|
||||
if (msg.contains("10分钟内提问超过了5次")) {
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, msg); });
|
||||
return;
|
||||
}
|
||||
if (msg.empty() || !msg.contains("content"))
|
||||
continue;
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", msg);
|
||||
boost::asio::post(ch->get_executor(), [=] {
|
||||
ch->try_send(err, std::format("json parse error: [{}]", msg));
|
||||
});
|
||||
continue;
|
||||
}
|
||||
auto str = line_json["detail"]["choices"][0]["delta"]["content"].get<std::string>();
|
||||
if (!str.empty())
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
||||
}
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view request_str{R"({
|
||||
"prompt": "hello",
|
||||
"options": {},
|
||||
"systemMessage": "You are ChatGPT, the version is GPT3.5, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
||||
"temperature": 0.8,
|
||||
"top_p": 1,
|
||||
"secret": "U2FsdGVkX18vdtlMj0nP1LoUzEqJTP0is+Q2+bQJNMk=",
|
||||
"stream": false
|
||||
})"};
|
||||
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
|
||||
request["secret"] = secret_rsp.value()["secret"];
|
||||
request["prompt"] = prompt;
|
||||
auto str = request.dump();
|
||||
SPDLOG_INFO("request : [{}]", str);
|
||||
return str;
|
||||
}())
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatGptDemo(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
@ -1801,143 +1477,6 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatGpt4Online(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
boost::system::error_code err{};
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "*/*"},
|
||||
{"content-type", "application/x-www-form-urlencoded"},
|
||||
};
|
||||
Curl curl;
|
||||
std::string recv;
|
||||
auto ret = curl.setUrl("https://chatgpt4online.org")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) {})
|
||||
.setRecvBodyCallback([&](std::string str) mutable { recv.append(str); })
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
static std::string pattern{R"(data-nonce=".*")"};
|
||||
|
||||
std::vector<std::string> matches = findAll(pattern, recv);
|
||||
if (matches.size() != 1) {
|
||||
SPDLOG_ERROR("parsing login failed");
|
||||
co_await ch->async_send(err, recv, use_nothrow_awaitable);
|
||||
co_return;
|
||||
}
|
||||
|
||||
std::regex reg("\"([^\"]*)\"");
|
||||
std::sregex_iterator iter(matches[0].begin(), matches[0].end(), reg);
|
||||
std::sregex_iterator end;
|
||||
std::vector<std::string> results;
|
||||
while (iter != end) {
|
||||
results.emplace_back(iter->str(1));
|
||||
iter++;
|
||||
}
|
||||
if (results.empty()) {
|
||||
SPDLOG_ERROR("Failed to extract content");
|
||||
co_await ch->async_send(err, "Failed to extract content", use_nothrow_awaitable);
|
||||
co_return;
|
||||
}
|
||||
auto& nonce = results[0];
|
||||
SPDLOG_INFO("data_nonce: {}", nonce);
|
||||
ret = curl.setUrl("https://chatgpt4online.org/rizq")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) mutable {
|
||||
boost::system::error_code err{};
|
||||
nlohmann::json line_json = nlohmann::json::parse(str, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", str);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", str)); });
|
||||
return;
|
||||
}
|
||||
auto message = line_json["data"].get<std::string>();
|
||||
if (message.empty())
|
||||
return;
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, message); });
|
||||
})
|
||||
.setBody([&] {
|
||||
std::multimap<std::string, std::string> params{
|
||||
{"_wpnonce", nonce},
|
||||
{"post_id", "58"},
|
||||
{"url", "https://chatgpt4online.org"},
|
||||
{"action", "wpaicg_chat_shortcode_message"},
|
||||
{"message", prompt},
|
||||
{"bot_id", "3405"},
|
||||
};
|
||||
return paramsToQueryStr(params);
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::gptTalkru(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
boost::system::error_code err{};
|
||||
std::unordered_multimap<std::string, std::string> headers{
|
||||
{"Accept", "application/json, text/plain, */*"},
|
||||
{"content-type", "application/json"},
|
||||
{"Referer", "https://gpttalk.ru/"},
|
||||
{"Origin", "https://gpttalk.ru"},
|
||||
};
|
||||
std::string recv;
|
||||
auto ret = Curl()
|
||||
.setUrl("https://gpttalk.ru/gpt2")
|
||||
.setProxy(m_cfg.http_proxy)
|
||||
.setRecvHeadersCallback([](std::string) { return; })
|
||||
.setRecvBodyCallback([&](std::string str) mutable {
|
||||
boost::system::error_code err{};
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
||||
})
|
||||
.setBody([&] {
|
||||
constexpr std::string_view ask_json_str = R"({
|
||||
"model":"gpt-3.5-turbo",
|
||||
"modelType":1,
|
||||
"prompt":[
|
||||
{
|
||||
"role":"user",
|
||||
"content":"我的上一个问题"
|
||||
}
|
||||
],
|
||||
"responseType":"stream"
|
||||
})";
|
||||
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||
ask_request["prompt"] = getConversationJson(json);
|
||||
std::string ask_request_str = ask_request.dump();
|
||||
SPDLOG_INFO("request: [{}]", ask_request_str);
|
||||
return ask_request_str;
|
||||
}())
|
||||
.clearHeaders()
|
||||
.setHttpHeaders(headers)
|
||||
.perform();
|
||||
if (ret.has_value()) {
|
||||
SPDLOG_ERROR("{}", ret.value());
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::deepInfra(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
|
@ -340,16 +340,11 @@ int main(int, char** argv) {
|
||||
ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you);
|
||||
ADD_METHOD("gpt-3-stream-binjie", FreeGpt::binjie);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-GptGo", FreeGpt::gptGo);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-gptalk", FreeGpt::gptalk);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-ChatForAi", FreeGpt::chatForAi);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-gptforlove", FreeGpt::gptForLove);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-ChatgptDemo", FreeGpt::chatGptDemo);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-GeekGpt", FreeGpt::geekGpt);
|
||||
ADD_METHOD("llama2-70B", FreeGpt::llama2);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-chatGptAi", FreeGpt::chatGptAi);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-aivvm", FreeGpt::aivvm);
|
||||
ADD_METHOD("gpt-4-ChatGpt4Online", FreeGpt::chatGpt4Online);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-gptTalkRu", FreeGpt::gptTalkru);
|
||||
ADD_METHOD("Llama-2-70b-chat-hf-stream-DeepInfra", FreeGpt::deepInfra);
|
||||
ADD_METHOD("gpt-3.5-turbo-gptChatly", FreeGpt::gptChatly);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-AiChatOnline", FreeGpt::aiChatOnline);
|
||||
|
Loading…
x
Reference in New Issue
Block a user