add provider gpt-3.5-turbo-gptChatly (#63)

This commit is contained in:
Dmitry Afanasyev 2023-12-06 18:34:12 +03:00 committed by GitHub
parent 2359481fb7
commit 2973841137
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 882 additions and 841 deletions

View File

@ -42,7 +42,6 @@ class ChatGptModelsEnum(StrEnum):
gpt_4_ChatgptAi = "gpt-4-ChatgptAi"
gpt_3_5_turbo_weWordle = "gpt-3.5-turbo-weWordle"
gpt_3_5_turbo_stream_yqcloud = "gpt-3.5-turbo-stream-yqcloud"
gpt_OpenAssistant_stream_HuggingChat = "gpt-OpenAssistant-stream-HuggingChat"
gpt_4_turbo_stream_you = "gpt-4-turbo-stream-you"
gpt_3_stream_binjie = "gpt-3-stream-binjie"
gpt_3_5_turbo_stream_CodeLinkAva = "gpt-3.5-turbo-stream-CodeLinkAva"
@ -52,19 +51,16 @@ class ChatGptModelsEnum(StrEnum):
gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle"
gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk"
gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo"
gpt_3_5_turbo_stream_ChatAnywhere = "gpt-3.5-turbo-stream-ChatAnywhere"
gpt_3_5_turbo_ChatgptNext = "gpt-3.5-turbo-ChatgptNext"
gpt_3_5_turbo_stream_gptTalkRu = "gpt-3.5-turbo--stream-gptTalkRu"
Llama_2_70b_chat_hf_stream_DeepInfra = "Llama-2-70b-chat-hf-stream-DeepInfra"
gpt_4_stream_aivvm = "gpt-4-stream-aivvm"
llama2 = "llama2"
llama2_70B = "llama2-70B"
gpt_3_5_turbo_gptChatly = "gpt-3.5-turbo-gptChatly"
gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin"
gpt_4_ChatGpt4Online = "gpt-4-ChatGpt4Online"
gpt_3_5_turbo_stream_chatGptAi = "gpt-3.5-turbo-stream-chatGptAi"
gpt_3_5_turbo_stream_FakeGpt = "gpt-3.5-turbo-stream-FakeGpt"
gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt"
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
gpt_3_5_turbo_stream_Vercel = "gpt-3.5-turbo-stream-Vercel"
gpt_3_5_turbo_stream_aivvm = "gpt-3.5-turbo-stream-aivvm"
gpt_3_5_turbo_stream_ChatForAi = "gpt-3.5-turbo-stream-ChatForAi"
@ -84,8 +80,6 @@ class ChatGptModelsEnum(StrEnum):
priority = 3
case "gpt-3.5-turbo-stream-GeekGpt":
priority = 2
case "gpt-3.5-turbo-stream-FakeGpt":
priority = 2
case "gpt-3.5-turbo-stream-gptalk":
priority = 1
case "llama2":
@ -98,5 +92,4 @@ class ChatGptModelsEnum(StrEnum):
def _deprecated() -> set[str]:
return {
"gpt-3.5-turbo-stream-gptforlove",
"gpt-3.5-turbo-stream-Vercel",
}

View File

@ -1,6 +1,8 @@
# Cpp FreeGPT WebUI
[![](https://github.com/fantasy-peak/cpp-freegpt-webui/workflows/ubuntu-gcc13/badge.svg)](https://github.com/fantasy-peak/cpp-freegpt-webui/actions) [![](https://github.com/fantasy-peak/cpp-freegpt-webui/workflows/ubuntu-clang18/badge.svg)](https://github.com/fantasy-peak/cpp-freegpt-webui/actions)
[![](https://github.com/fantasy-peak/cpp-freegpt-webui/workflows/ubuntu-gcc13/badge.svg)](https://github.com/fantasy-peak/cpp-freegpt-webui/actions) [![](https://github.com/fantasy-peak/cpp-freegpt-webui/workflows/ubuntu-clang18/badge.svg)](https://github.com/fantasy-peak/cpp-freegpt-webui/actions) [![](https://img.shields.io/badge/language-C%2B%2B23-yellow.svg
)](https://en.wikipedia.org/wiki/C%2B%2B17) [![Docker Pulls](https://img.shields.io/docker/pulls/fantasypeak/freegpt?logo=docker
)](https://hub.docker.com/r/fantasypeak/freegpt/)
## GPT 3.5/4
@ -113,7 +115,7 @@ docker run -it --rm \
```
### Start the flaresolverr docker [optional]
This is not necessary, The some provider(aivvm) is enabled Cloudflare challenges, so we need use flaresolverr to solve it.
This is not necessary, The some provider(aivvm,gptChatly) is enabled Cloudflare challenges, so we need use flaresolverr to solve it.
```
docker run -d \
--name=flaresolverr \
@ -181,3 +183,4 @@ Please note the following:
By using this repository or any code related to it, you agree to these terms. The author is not responsible for any
copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent
impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.

View File

@ -909,85 +909,6 @@ std::expected<std::string, std::string> decompress(auto& res) {
}
}
boost::asio::awaitable<void> FreeGpt::gptChatly(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
struct Input {
std::shared_ptr<Channel> ch;
std::string recv;
};
Input input;
CURLcode res;
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, error_info);
co_return;
}
ScopeExit auto_exit{[=] { curl_easy_cleanup(curl); }};
auto ret = sendHttpRequest(CurlHttpRequest{
.curl = curl,
.url = "https://gptchatly.com/fetch-response",
.http_proxy = m_cfg.http_proxy,
.cb = [](void* contents, size_t size, size_t nmemb, void* userp) mutable -> size_t {
boost::system::error_code err{};
auto input_ptr = static_cast<Input*>(userp);
std::string data{(char*)contents, size * nmemb};
auto& [ch, recv] = *input_ptr;
nlohmann::json line_json = nlohmann::json::parse(data, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", data);
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, std::format("json parse error: [{}]", data)); });
return size * nmemb;
}
auto str = line_json["chatGPTResponse"].get<std::string>();
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
return size * nmemb;
},
.input = [&] -> void* {
input.recv.clear();
input.ch = ch;
return &input;
}(),
.headers = [&] -> auto& {
static std::unordered_map<std::string, std::string> headers{
{"Accept", "*/*"},
{"origin", "https://gptchatly.com"},
{"referer", "https://gptchatly.com/"},
{"Content-Type", "application/json"},
};
return headers;
}(),
.body = [&] -> std::string {
constexpr std::string_view ask_json_str = R"({
"past_conversations": ""
})";
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
ask_request["past_conversations"] = getConversationJson(json);
std::string ask_request_str = ask_request.dump();
SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
return ask_request_str;
}(),
.response_header_ptr = nullptr,
.expect_response_code = 200,
.ssl_verify = false,
});
if (ret) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::ylokh(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
@ -1893,3 +1814,639 @@ boost::asio::awaitable<void> FreeGpt::deepAi(std::shared_ptr<Channel> ch, nlohma
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::fakeGpt(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"referer", "https://chat-shared2.zhile.io/?v=2"},
};
std::multimap<std::string, std::string> api_load_params{
{"t", std::to_string(getTimestamp<std::chrono::seconds>())},
};
auto api_load_url = std::format("https://chat-shared2.zhile.io/api/loads?{}", paramsToQueryStr(api_load_params));
std::string chunk_body;
Curl curl;
auto ret = curl.setUrl(api_load_url)
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) {
chunk_body.append(str);
return;
})
.setHttpHeaders([&] -> auto& { return headers; }())
.perform();
if (ret.has_value()) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
nlohmann::json json_result = nlohmann::json::parse(chunk_body, nullptr, false);
if (json_result.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", chunk_body);
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("json parse error: [{}]", chunk_body));
co_return;
}
std::vector<nlohmann::json> random_j;
for (auto& j : json_result["loads"]) {
if (j["count"].get<int32_t>() == 0)
random_j.emplace_back(std::move(j));
}
if (random_j.empty()) {
SPDLOG_ERROR("random_j is empty!!!");
ch->try_send(err, json_result.dump());
co_return;
}
std::mt19937 g{std::random_device{}()};
std::uniform_int_distribution<std::size_t> d{0, random_j.size()};
auto token_id = random_j[d(g)];
std::cout << token_id.dump() << std::endl;
headers.emplace("Content-Type", "application/x-www-form-urlencoded");
// send login
std::multimap<std::string, std::string> login_params{
{"token_key", token_id["token_id"].get<std::string>()},
{"session_password",
[](int len) -> std::string {
static std::string chars{"abcdefghijklmnopqrstuvwxyz"};
static std::string letter{"0123456789"};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, 1000000);
std::string random_string;
random_string += letter[dis(gen) % letter.length()];
len = len - 1;
for (int i = 0; i < len; i++)
random_string += chars[dis(gen) % chars.length()];
return random_string;
}(10)},
};
chunk_body.clear();
headers.erase("Content-Type");
std::string header_str;
auto body = paramsToQueryStr(login_params);
ret = curl.setUrl("https://chat-shared2.zhile.io/auth/login")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([&](std::string str) {
header_str.append(str);
return;
})
.setRecvBodyCallback([&](std::string str) {
chunk_body.append(str);
return;
})
.setBody(body)
.clearHeaders()
.setHttpHeaders([&] -> auto& { return headers; }())
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
auto response_header = Curl::parseHttpHeaders(header_str);
auto range = response_header.equal_range("set-cookie");
std::string cookie;
for (auto it = range.first; it != range.second; ++it) {
if (!(it->second.contains("credential=")))
continue;
auto view = it->second | std::views::drop_while(isspace) | std::views::reverse |
std::views::drop_while(isspace) | std::views::reverse;
auto fields = splitString(std::string{view.begin(), view.end()}, " ");
if (fields.size() < 1) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, "can't get cookie");
co_return;
}
cookie = std::move(fields[0]);
break;
}
SPDLOG_INFO("cookie: [{}]", cookie);
SPDLOG_INFO("rsp: [{}]", chunk_body);
chunk_body.clear();
headers.emplace("cookie", cookie);
// /api/auth/session
ret = curl.setUrl("https://chat-shared2.zhile.io/api/auth/session")
.setProxy(m_cfg.http_proxy)
.setOpt(CURLOPT_HTTPGET, 1L)
.setRecvHeadersCallback([](std::string str) {
std::cout << str << std::endl;
return;
})
.setRecvBodyCallback([&](std::string str) mutable {
chunk_body.append(str);
return;
})
.clearHeaders()
.setHttpHeaders([&] -> auto& { return headers; }())
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
json_result.clear();
json_result = nlohmann::json::parse(chunk_body, nullptr, false);
if (json_result.is_discarded()) {
SPDLOG_ERROR("/api/auth/session json parse error: [{}]", chunk_body);
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("/api/auth/session parse error: [{}]", chunk_body));
co_return;
}
auto cache_token = json_result["accessToken"].get<std::string>();
SPDLOG_INFO("accessToken: [{}]", cache_token);
headers.erase("Accept");
headers.emplace("Content-Type", "application/json");
headers.emplace("Accept", "text/event-stream");
auto auth = std::format("Bearer {}", cache_token);
SPDLOG_INFO("auth: [{}]", auth);
headers.emplace("X-Authorization", auth);
std::string recv;
std::string last_message;
ret = curl.setUrl("https://chat-shared2.zhile.io/api/conversation")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
recv.append(str);
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty() || !msg.starts_with("data: ") || !msg.contains("content"))
continue;
msg.erase(0, 6);
if (msg == "[DONE]")
break;
boost::system::error_code err{};
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", msg);
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); });
continue;
}
auto type = line_json["message"]["content"]["content_type"].get<std::string>();
if (type == "text") {
auto new_message = line_json["message"]["content"]["parts"][0].get<std::string>();
if (new_message.empty())
continue;
std::string tmp{new_message};
new_message.erase(0, last_message.size());
last_message = std::move(tmp);
if (!new_message.empty())
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, new_message); });
}
}
return;
})
.setBody([&] {
constexpr std::string_view json_str = R"({
"action":"next",
"messages":[
{
"id":"a68cd787-c96c-4234-8ec9-00805f73a7b8",
"author":{"role":"user"},
"content":{
"content_type":"text",
"parts":["hello"]
},
"metadata":{}
}
],
"parent_message_id":"fdc171e6-dd0d-4494-93ce-e7d219e6ed05",
"model":"text-davinci-002-render-sha",
"plugin_ids":[],
"timezone_offset_min":-120,
"suggestions":[],
"history_and_training_disabled":true,
"arkose_token":"",
"force_paragen":false
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["parent_message_id"] = createUuidString();
request["messages"][0]["id"] = createUuidString();
request["messages"][0]["content"]["parts"][0] = prompt;
SPDLOG_INFO("request: [{}]", request.dump(2));
return request.dump();
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
}
boost::asio::awaitable<void> FreeGpt::vercel(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
auto create_random_number = [] {
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_int_distribution<int> distribution(99, 999);
int random_number = distribution(mt);
return random_number;
};
constexpr std::string_view user_agent_str{
R"(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{}.{} Safari/537.36)"};
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"authority", "sdk.vercel.ai"},
{"content-type", "application/json"},
{"referer", "https://sdk.vercel.ai/"},
{"origin", "https://sdk.vercel.ai"},
{"sec-ch-ua", R"("Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117")"},
{"sec-ch-ua-mobile", R"(?0)"},
{"sec-ch-ua-platform", R"("macOS")"},
{"cache-control", "no-cache"},
{"pragma", "no-cache"},
};
headers.emplace("user-agent", std::format(user_agent_str, create_random_number(), create_random_number()));
std::string recv;
Curl curl;
auto ret = curl.setUrl("https://sdk.vercel.ai/openai.jpeg")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
recv.append(str);
return;
})
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
nlohmann::json request;
request["data"] = std::move(recv);
recv.clear();
auto vercel_rsp = callZeus(std::format("{}/vercel", m_cfg.zeus), request.dump());
if (!vercel_rsp.has_value()) {
SPDLOG_ERROR("callZeus vercel error: {}", vercel_rsp.error());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, vercel_rsp.error());
co_return;
}
headers.erase("custom-encoding");
headers.erase("user-agent");
headers.emplace("custom-encoding", vercel_rsp.value()["data"]);
headers.emplace("user-agent", std::format(user_agent_str, create_random_number(), create_random_number()));
for (int i = 0; i < 20; i++) {
auto ret =
curl.setUrl("https://sdk.vercel.ai/api/generate")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
if (str == "Internal Server Error" || str == "Rate limit exceeded") {
SPDLOG_WARN("vercel: [{}]", str);
return;
}
boost::asio::post(ch->get_executor(), [=, str = std::move(str)] { ch->try_send(err, str); });
return;
})
.setBody([&] {
constexpr std::string_view json_str = R"({
"model":"openai:gpt-3.5-turbo",
"messages":[
{
"role":"user",
"content":"hello"
}
],
"playgroundId":"403bce4c-7eb6-47b0-b1b5-0cb6b2469f70",
"chatIndex":0,
"temperature":0.7,
"maximumLength":4096,
"topP":1,
"topK":1,
"presencePenalty":1,
"frequencyPenalty":1,
"stopSequences":[]
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
request["playgroundId"] = createUuidString();
SPDLOG_INFO("request: [{}]", request.dump(2));
return request.dump();
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await timeout(std::chrono::seconds(2));
continue;
}
co_return;
}
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, "call sdk.vercel.ai error");
co_return;
}
boost::asio::awaitable<void> FreeGpt::chatAnywhere(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
boost::system::error_code err{};
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "application/json, text/plain, */*"},
{"content-type", "application/json"},
{"Referer", "https://chatanywhere.cn/"},
{"Origin", "https://chatanywhere.cn"},
{"Authorization", ""},
};
std::string recv;
auto ret = Curl()
.setUrl("https://chatanywhere.cn/v1/chat/gpt/")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
boost::system::error_code err{};
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
})
.setBody([&] {
constexpr std::string_view ask_json_str = R"({
"list":[
{
"role":"user",
"content":"hello"
}
],
"id":"s1_qYuOLXjI3rEpc7WHfQ",
"title":"hello",
"prompt":"",
"temperature":0.5,
"models":"61490748",
"continuous":true
})";
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
ask_request["title"] = prompt;
ask_request["list"] = getConversationJson(json);
std::string ask_request_str = ask_request.dump();
SPDLOG_INFO("request: [{}]", ask_request_str);
return ask_request_str;
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::chatGptNext(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
// auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
std::string recv;
auto ret = Curl()
.setUrl("https://chat.fstha.com/api/openai/v1/chat/completions")
.setProxy(m_cfg.http_proxy)
.setRecvBodyCallback([&](std::string str) mutable { recv.append(str); })
.setHttpHeaders([&] -> auto& {
static std::unordered_multimap<std::string, std::string> headers{
{"Accept", "text/event-stream"},
{"origin", "https://chat.fstha.com"},
{"referer", "https://chat.fstha.com/"},
{"x-requested-with", "XMLHttpRequest"},
{"Authorization", "Bearer ak-chatgpt-nice"},
{"Alt-Used", "chat.fstha.com"},
};
return headers;
}())
.setBody([&] {
constexpr std::string_view request_str{R"({
"messages":[
{
"role":"user",
"content":"hello"
}
],
"stream":false,
"model":"gpt-3.5-turbo",
"temperature":0.5,
"presence_penalty":0,
"frequency_penalty":0,
"top_p":1
})"};
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
request["messages"] = getConversationJson(json);
auto str = request.dump();
SPDLOG_INFO("request : [{}]", str);
return str;
}())
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
SPDLOG_INFO("{}", recv);
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
nlohmann::json rsp = nlohmann::json::parse(recv, nullptr, false);
if (rsp.is_discarded()) {
SPDLOG_ERROR("json parse error: {}", recv);
ch->try_send(err, recv);
}
auto info = rsp["choices"][0]["message"]["content"].get<std::string>();
ch->try_send(err, info);
co_return;
}
boost::asio::awaitable<void> FreeGpt::huggingChat(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
constexpr std::string_view host = "huggingface.co";
constexpr std::string_view port = "443";
constexpr std::string_view user_agent{
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0)"};
boost::beast::http::request<boost::beast::http::empty_body> req_init_cookie{boost::beast::http::verb::get,
"/chat/", 11};
req_init_cookie.set(boost::beast::http::field::host, host);
req_init_cookie.set(boost::beast::http::field::user_agent, user_agent);
auto ret = co_await sendRequestRecvResponse(req_init_cookie, host, port,
std::bind_front(&FreeGpt::createHttpClient, *this));
if (!ret.has_value()) {
co_await ch->async_send(err, ret.error(), use_nothrow_awaitable);
co_return;
}
auto& [response, ctx, stream_] = ret.value();
if (boost::beast::http::status::ok != response.result()) {
SPDLOG_ERROR("http status code: {}", response.result_int());
co_await ch->async_send(err, response.reason(), use_nothrow_awaitable);
co_return;
}
auto fields = splitString(response["Set-Cookie"], " ");
if (fields.empty()) {
std::stringstream ss;
ss << response.base();
SPDLOG_ERROR("get cookie error: {}", ss.str());
co_await ch->async_send(err, "can't get cookie", use_nothrow_awaitable);
co_return;
}
fields[0].pop_back();
std::string cookie{std::move(fields[0])};
SPDLOG_INFO("cookie: {}", cookie);
boost::beast::http::request<boost::beast::http::string_body> req_init_conversation{boost::beast::http::verb::post,
"/chat/conversation", 11};
req_init_conversation.set("Cookie", cookie);
req_init_conversation.set(boost::beast::http::field::host, host);
req_init_conversation.set(boost::beast::http::field::user_agent, user_agent);
req_init_conversation.set("Accept", "*/*");
req_init_conversation.set("Content-Type", "application/json");
req_init_conversation.body() = R"({"model": "meta-llama/Llama-2-70b-chat-hf"})";
req_init_conversation.prepare_payload();
auto [ec, count] = co_await boost::beast::http::async_write(stream_, req_init_conversation, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
boost::beast::flat_buffer b;
boost::beast::http::response<boost::beast::http::string_body> res;
std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
if (res.result_int() != 200) {
std::string reason{res.reason()};
SPDLOG_ERROR("reason: {}", reason);
co_await ch->async_send(err,
std::format("return unexpected http status code: {}({})", res.result_int(), reason),
use_nothrow_awaitable);
co_return;
}
nlohmann::json rsp_json = nlohmann::json::parse(res.body(), nullptr, false);
if (rsp_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", fields.back());
ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
co_return;
}
if (!rsp_json.contains("conversationId")) {
SPDLOG_ERROR("not contains conversationId: {}", res.body());
co_await ch->async_send(err, res.body(), use_nothrow_awaitable);
co_return;
}
auto conversation_id = rsp_json["conversationId"].get<std::string>();
SPDLOG_INFO("conversation_id: [{}]", conversation_id);
constexpr std::string_view json_str = R"({
"inputs":"hello",
"parameters":{
"temperature":0.2,
"truncate":1000,
"max_new_tokens":1024,
"stop":[
"</s>"
],
"top_p":0.95,
"repetition_penalty":1.2,
"top_k":50,
"return_full_text":false
},
"stream":true,
"options":{
"id":"9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id":"04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry":false,
"use_cache":false,
"web_search_id":""
}
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["inputs"] = prompt;
request["options"]["response_id"] = createUuidString();
request["options"]["id"] = createUuidString();
boost::beast::http::request<boost::beast::http::string_body> req{
boost::beast::http::verb::post, std::format("/chat/conversation/{}", conversation_id), 11};
req.set("Cookie", cookie);
req.set(boost::beast::http::field::host, host);
req.set(boost::beast::http::field::user_agent, user_agent);
req.set("Accept", "*/*");
req.set("Content-Type", "application/json");
req.body() = request.dump();
req.prepare_payload();
std::string recv;
co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch, &recv](std::string chunk_str) {
recv.append(chunk_str);
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty())
continue;
boost::system::error_code err{};
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", msg);
ch->try_send(err, std::format("json parse error: [{}]", msg));
continue;
}
if (!line_json.contains("type")) {
SPDLOG_ERROR("invalid json format: [{}]", line_json.dump());
continue;
}
auto type = line_json["type"].get<std::string>();
if (type == "stream") {
if (auto str = line_json["token"].get<std::string>(); !str.empty())
ch->try_send(err, str);
} else if (type == "finalAnswer") {
ch->close();
}
}
return;
});
co_return;
}

View File

@ -20,7 +20,6 @@ public:
boost::asio::awaitable<void> openAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> yqcloud(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> huggingChat(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> you(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> binjie(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json);
@ -31,14 +30,11 @@ public:
boost::asio::awaitable<void> llama2(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> geekGpt(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGptAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> fakeGpt(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> vercel(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> aivvm(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatAnywhere(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGptNext(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptTalkru(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> deepInfra(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptChatly(std::shared_ptr<Channel>, nlohmann::json);
private:
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>

View File

@ -674,167 +674,6 @@ create_client:
co_return;
}
boost::asio::awaitable<void> FreeGpt::huggingChat(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
constexpr std::string_view host = "huggingface.co";
constexpr std::string_view port = "443";
constexpr std::string_view user_agent{
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0)"};
boost::beast::http::request<boost::beast::http::empty_body> req_init_cookie{boost::beast::http::verb::get,
"/chat/", 11};
req_init_cookie.set(boost::beast::http::field::host, host);
req_init_cookie.set(boost::beast::http::field::user_agent, user_agent);
auto ret = co_await sendRequestRecvResponse(req_init_cookie, host, port,
std::bind_front(&FreeGpt::createHttpClient, *this));
if (!ret.has_value()) {
co_await ch->async_send(err, ret.error(), use_nothrow_awaitable);
co_return;
}
auto& [response, ctx, stream_] = ret.value();
if (boost::beast::http::status::ok != response.result()) {
SPDLOG_ERROR("http status code: {}", response.result_int());
co_await ch->async_send(err, response.reason(), use_nothrow_awaitable);
co_return;
}
auto fields = splitString(response["Set-Cookie"], " ");
if (fields.empty()) {
std::stringstream ss;
ss << response.base();
SPDLOG_ERROR("get cookie error: {}", ss.str());
co_await ch->async_send(err, "can't get cookie", use_nothrow_awaitable);
co_return;
}
fields[0].pop_back();
std::string cookie{std::move(fields[0])};
SPDLOG_INFO("cookie: {}", cookie);
boost::beast::http::request<boost::beast::http::string_body> req_init_conversation{boost::beast::http::verb::post,
"/chat/conversation", 11};
req_init_conversation.set("Cookie", cookie);
req_init_conversation.set(boost::beast::http::field::host, host);
req_init_conversation.set(boost::beast::http::field::user_agent, user_agent);
req_init_conversation.set("Accept", "*/*");
req_init_conversation.set("Content-Type", "application/json");
req_init_conversation.body() = R"({"model": "meta-llama/Llama-2-70b-chat-hf"})";
req_init_conversation.prepare_payload();
auto [ec, count] = co_await boost::beast::http::async_write(stream_, req_init_conversation, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
boost::beast::flat_buffer b;
boost::beast::http::response<boost::beast::http::string_body> res;
std::tie(ec, count) = co_await boost::beast::http::async_read(stream_, b, res, use_nothrow_awaitable);
if (ec) {
SPDLOG_ERROR("{}", ec.message());
co_await ch->async_send(err, ec.message(), use_nothrow_awaitable);
co_return;
}
if (res.result_int() != 200) {
std::string reason{res.reason()};
SPDLOG_ERROR("reason: {}", reason);
co_await ch->async_send(err,
std::format("return unexpected http status code: {}({})", res.result_int(), reason),
use_nothrow_awaitable);
co_return;
}
nlohmann::json rsp_json = nlohmann::json::parse(res.body(), nullptr, false);
if (rsp_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", fields.back());
ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
co_return;
}
if (!rsp_json.contains("conversationId")) {
SPDLOG_ERROR("not contains conversationId: {}", res.body());
co_await ch->async_send(err, res.body(), use_nothrow_awaitable);
co_return;
}
auto conversation_id = rsp_json["conversationId"].get<std::string>();
SPDLOG_INFO("conversation_id: [{}]", conversation_id);
constexpr std::string_view json_str = R"({
"inputs":"hello",
"parameters":{
"temperature":0.2,
"truncate":1000,
"max_new_tokens":1024,
"stop":[
"</s>"
],
"top_p":0.95,
"repetition_penalty":1.2,
"top_k":50,
"return_full_text":false
},
"stream":true,
"options":{
"id":"9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id":"04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry":false,
"use_cache":false,
"web_search_id":""
}
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["inputs"] = prompt;
request["options"]["response_id"] = createUuidString();
request["options"]["id"] = createUuidString();
boost::beast::http::request<boost::beast::http::string_body> req{
boost::beast::http::verb::post, std::format("/chat/conversation/{}", conversation_id), 11};
req.set("Cookie", cookie);
req.set(boost::beast::http::field::host, host);
req.set(boost::beast::http::field::user_agent, user_agent);
req.set("Accept", "*/*");
req.set("Content-Type", "application/json");
req.body() = request.dump();
req.prepare_payload();
std::string recv;
co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch, &recv](std::string chunk_str) {
recv.append(chunk_str);
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty())
continue;
boost::system::error_code err{};
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", msg);
ch->try_send(err, std::format("json parse error: [{}]", msg));
continue;
}
if (!line_json.contains("type")) {
SPDLOG_ERROR("invalid json format: [{}]", line_json.dump());
continue;
}
auto type = line_json["type"].get<std::string>();
if (type == "stream") {
if (auto str = line_json["token"].get<std::string>(); !str.empty())
ch->try_send(err, str);
} else if (type == "finalAnswer") {
ch->close();
}
}
return;
});
co_return;
}
boost::asio::awaitable<void> FreeGpt::you(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
@ -1121,7 +960,7 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
static std::unordered_multimap<std::string, std::string> headers{
{"Content-Type", "application/json"},
{"Origin", "https://chatforai.store"},
{"Referer", "https://chatforai.store/"},
{"Referer", "https://chatforai.store/?r=b"},
};
auto ret = Curl()
.setUrl("https://chatforai.store/api/handle/provider-openai")
@ -1134,7 +973,7 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
.setBody([&] {
uint64_t timestamp = getTimestamp();
constexpr std::string_view request_str{R"({
"conversationId": "id_1696984301982",
"conversationId":"id_1701845910554",
"conversationType":"chat_continuous",
"botId":"chat_continuous",
"globalSettings":{
@ -1142,19 +981,23 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
"model":"gpt-3.5-turbo",
"messageHistorySize":5,
"temperature":0.7,
"top_p": 1
"top_p":1,
"stream":false
},
"botSettings":{
},
"botSettings": {},
"prompt":"hello",
"messages": [{
"messages":[
{
"role":"user",
"content":"hello"
}],
"sign": "15d8e701706743ffa74f8b96c97bd1f79354c7da4a97438c81c6bb259004cd77",
"timestamp": 1696984302017
}
],
"timestamp":1701845910677,
"sign":"fd7fac179ff93e8745b3c7a61075e3d8062a795e7ca1cd3d7cf24e3303de7a95"
})"};
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
auto conversation_id = std::format("id_{}", timestamp - 35);
auto conversation_id = std::format("id_{}", timestamp - 123);
request["conversationId"] = conversation_id;
request["timestamp"] = timestamp;
request["sign"] = generate_signature(timestamp, prompt, conversation_id);
@ -1564,8 +1407,8 @@ boost::asio::awaitable<void> FreeGpt::llama2(std::shared_ptr<Channel> ch, nlohma
})
.setBody([&] {
constexpr std::string_view ask_json_str = R"({
"prompt":"[INST] hello [/INST]\n[INST] hello [/INST]\n",
"version":"d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381",
"prompt":"[INST] hello [/INST]\n",
"version":"02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
"systemPrompt":"You are a helpful assistant.",
"temperature":0.75,
"topP":0.9,
@ -1846,365 +1689,6 @@ create_client:
co_return;
}
boost::asio::awaitable<void> FreeGpt::fakeGpt(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"referer", "https://chat-shared2.zhile.io/?v=2"},
};
std::multimap<std::string, std::string> api_load_params{
{"t", std::to_string(getTimestamp<std::chrono::seconds>())},
};
auto api_load_url = std::format("https://chat-shared2.zhile.io/api/loads?{}", paramsToQueryStr(api_load_params));
std::string chunk_body;
Curl curl;
auto ret = curl.setUrl(api_load_url)
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) {
chunk_body.append(str);
return;
})
.setHttpHeaders([&] -> auto& { return headers; }())
.perform();
if (ret.has_value()) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
nlohmann::json json_result = nlohmann::json::parse(chunk_body, nullptr, false);
if (json_result.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", chunk_body);
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("json parse error: [{}]", chunk_body));
co_return;
}
std::vector<nlohmann::json> random_j;
for (auto& j : json_result["loads"]) {
if (j["count"].get<int32_t>() == 0)
random_j.emplace_back(std::move(j));
}
if (random_j.empty()) {
SPDLOG_ERROR("random_j is empty!!!");
ch->try_send(err, json_result.dump());
co_return;
}
std::mt19937 g{std::random_device{}()};
std::uniform_int_distribution<std::size_t> d{0, random_j.size()};
auto token_id = random_j[d(g)];
std::cout << token_id.dump() << std::endl;
headers.emplace("Content-Type", "application/x-www-form-urlencoded");
// send login
std::multimap<std::string, std::string> login_params{
{"token_key", token_id["token_id"].get<std::string>()},
{"session_password",
[](int len) -> std::string {
static std::string chars{"abcdefghijklmnopqrstuvwxyz"};
static std::string letter{"0123456789"};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, 1000000);
std::string random_string;
random_string += letter[dis(gen) % letter.length()];
len = len - 1;
for (int i = 0; i < len; i++)
random_string += chars[dis(gen) % chars.length()];
return random_string;
}(10)},
};
chunk_body.clear();
headers.erase("Content-Type");
std::string header_str;
auto body = paramsToQueryStr(login_params);
ret = curl.setUrl("https://chat-shared2.zhile.io/auth/login")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([&](std::string str) {
header_str.append(str);
return;
})
.setRecvBodyCallback([&](std::string str) {
chunk_body.append(str);
return;
})
.setBody(body)
.clearHeaders()
.setHttpHeaders([&] -> auto& { return headers; }())
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
auto response_header = Curl::parseHttpHeaders(header_str);
auto range = response_header.equal_range("set-cookie");
std::string cookie;
for (auto it = range.first; it != range.second; ++it) {
if (!(it->second.contains("credential=")))
continue;
auto view = it->second | std::views::drop_while(isspace) | std::views::reverse |
std::views::drop_while(isspace) | std::views::reverse;
auto fields = splitString(std::string{view.begin(), view.end()}, " ");
if (fields.size() < 1) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, "can't get cookie");
co_return;
}
cookie = std::move(fields[0]);
break;
}
SPDLOG_INFO("cookie: [{}]", cookie);
SPDLOG_INFO("rsp: [{}]", chunk_body);
chunk_body.clear();
headers.emplace("cookie", cookie);
// /api/auth/session
ret = curl.setUrl("https://chat-shared2.zhile.io/api/auth/session")
.setProxy(m_cfg.http_proxy)
.setOpt(CURLOPT_HTTPGET, 1L)
.setRecvHeadersCallback([](std::string str) {
std::cout << str << std::endl;
return;
})
.setRecvBodyCallback([&](std::string str) mutable {
chunk_body.append(str);
return;
})
.clearHeaders()
.setHttpHeaders([&] -> auto& { return headers; }())
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
json_result.clear();
json_result = nlohmann::json::parse(chunk_body, nullptr, false);
if (json_result.is_discarded()) {
SPDLOG_ERROR("/api/auth/session json parse error: [{}]", chunk_body);
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("/api/auth/session parse error: [{}]", chunk_body));
co_return;
}
auto cache_token = json_result["accessToken"].get<std::string>();
SPDLOG_INFO("accessToken: [{}]", cache_token);
headers.erase("Accept");
headers.emplace("Content-Type", "application/json");
headers.emplace("Accept", "text/event-stream");
auto auth = std::format("Bearer {}", cache_token);
SPDLOG_INFO("auth: [{}]", auth);
headers.emplace("X-Authorization", auth);
std::string recv;
std::string last_message;
ret = curl.setUrl("https://chat-shared2.zhile.io/api/conversation")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
recv.append(str);
while (true) {
auto position = recv.find("\n");
if (position == std::string::npos)
break;
auto msg = recv.substr(0, position + 1);
recv.erase(0, position + 1);
msg.pop_back();
if (msg.empty() || !msg.starts_with("data: ") || !msg.contains("content"))
continue;
msg.erase(0, 6);
if (msg == "[DONE]")
break;
boost::system::error_code err{};
nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", msg);
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); });
continue;
}
auto type = line_json["message"]["content"]["content_type"].get<std::string>();
if (type == "text") {
auto new_message = line_json["message"]["content"]["parts"][0].get<std::string>();
if (new_message.empty())
continue;
std::string tmp{new_message};
new_message.erase(0, last_message.size());
last_message = std::move(tmp);
if (!new_message.empty())
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, new_message); });
}
}
return;
})
.setBody([&] {
constexpr std::string_view json_str = R"({
"action":"next",
"messages":[
{
"id":"a68cd787-c96c-4234-8ec9-00805f73a7b8",
"author":{"role":"user"},
"content":{
"content_type":"text",
"parts":["hello"]
},
"metadata":{}
}
],
"parent_message_id":"fdc171e6-dd0d-4494-93ce-e7d219e6ed05",
"model":"text-davinci-002-render-sha",
"plugin_ids":[],
"timezone_offset_min":-120,
"suggestions":[],
"history_and_training_disabled":true,
"arkose_token":"",
"force_paragen":false
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["parent_message_id"] = createUuidString();
request["messages"][0]["id"] = createUuidString();
request["messages"][0]["content"]["parts"][0] = prompt;
SPDLOG_INFO("request: [{}]", request.dump(2));
return request.dump();
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
}
boost::asio::awaitable<void> FreeGpt::vercel(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
auto create_random_number = [] {
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_int_distribution<int> distribution(99, 999);
int random_number = distribution(mt);
return random_number;
};
constexpr std::string_view user_agent_str{
R"(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{}.{} Safari/537.36)"};
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"authority", "sdk.vercel.ai"},
{"content-type", "application/json"},
{"referer", "https://sdk.vercel.ai/"},
{"origin", "https://sdk.vercel.ai"},
{"sec-ch-ua", R"("Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117")"},
{"sec-ch-ua-mobile", R"(?0)"},
{"sec-ch-ua-platform", R"("macOS")"},
{"cache-control", "no-cache"},
{"pragma", "no-cache"},
};
headers.emplace("user-agent", std::format(user_agent_str, create_random_number(), create_random_number()));
std::string recv;
Curl curl;
auto ret = curl.setUrl("https://sdk.vercel.ai/openai.jpeg")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
recv.append(str);
return;
})
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
nlohmann::json request;
request["data"] = std::move(recv);
recv.clear();
auto vercel_rsp = callZeus(std::format("{}/vercel", m_cfg.zeus), request.dump());
if (!vercel_rsp.has_value()) {
SPDLOG_ERROR("callZeus vercel error: {}", vercel_rsp.error());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, vercel_rsp.error());
co_return;
}
headers.erase("custom-encoding");
headers.erase("user-agent");
headers.emplace("custom-encoding", vercel_rsp.value()["data"]);
headers.emplace("user-agent", std::format(user_agent_str, create_random_number(), create_random_number()));
for (int i = 0; i < 20; i++) {
auto ret =
curl.setUrl("https://sdk.vercel.ai/api/generate")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
if (str == "Internal Server Error" || str == "Rate limit exceeded") {
SPDLOG_WARN("vercel: [{}]", str);
return;
}
boost::asio::post(ch->get_executor(), [=, str = std::move(str)] { ch->try_send(err, str); });
return;
})
.setBody([&] {
constexpr std::string_view json_str = R"({
"model":"openai:gpt-3.5-turbo",
"messages":[
{
"role":"user",
"content":"hello"
}
],
"playgroundId":"403bce4c-7eb6-47b0-b1b5-0cb6b2469f70",
"chatIndex":0,
"temperature":0.7,
"maximumLength":4096,
"topP":1,
"topK":1,
"presencePenalty":1,
"frequencyPenalty":1,
"stopSequences":[]
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
request["playgroundId"] = createUuidString();
SPDLOG_INFO("request: [{}]", request.dump(2));
return request.dump();
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await timeout(std::chrono::seconds(2));
continue;
}
co_return;
}
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, "call sdk.vercel.ai error");
co_return;
}
boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
@ -2286,33 +1770,25 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
cookie_queue.enqueue(std::move(item));
});
auto user_agent = std::get<2>(item);
constexpr std::string_view host = "chat.aivvm.com";
constexpr std::string_view port = "443";
boost::asio::ssl::context ctx1(boost::asio::ssl::context::tls);
ctx1.set_verify_mode(boost::asio::ssl::verify_none);
auto client = co_await createHttpClient(ctx1, host, port);
if (!client.has_value()) {
SPDLOG_ERROR("createHttpClient: {}", client.error());
co_await ch->async_send(err, client.error(), use_nothrow_awaitable);
co_return;
}
auto& stream_ = client.value();
boost::beast::http::request<boost::beast::http::string_body> req{boost::beast::http::verb::post, "/api/chat", 11};
req.set(boost::beast::http::field::host, host);
req.set(boost::beast::http::field::user_agent, user_agent);
req.set("Accept", "*/*");
req.set("accept-language", "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2");
req.set("origin", "https://chat.aivvm.com");
req.set("referer", "https://chat.aivvm.com/zh");
req.set(boost::beast::http::field::content_type, "application/json");
req.set("sec-fetch-dest", "empty");
req.set("sec-fetch-mode", "cors");
req.set("sec-fetch-site", "same-origin");
req.set("DNT", "1");
req.set("Cookie", std::get<1>(item));
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"Content-Type", "application/json"},
{"Cookie", std::get<1>(item)},
{"Origin", "https://chat.aivvm.com"},
{"Referer", "https://chat.aivvm.com/zh"},
{"User-Agent", user_agent},
};
auto ret = Curl()
.setUrl("https://chat.aivvm.com/api/chat")
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
boost::system::error_code err{};
if (!str.empty())
ch->try_send(err, str);
return;
})
.setBody([&] {
auto model = json.at("model").get<std::string>();
if (model == "gpt-3.5-turbo-stream-aivvm") {
constexpr std::string_view json_str = R"({
@ -2335,7 +1811,7 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
return request.dump();
} else {
constexpr std::string_view json_str = R"({
"model":{
@ -2355,17 +1831,16 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
return request.dump();
}
}())
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
}
req.prepare_payload();
auto result = co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch](std::string str) {
boost::system::error_code err{};
if (!str.empty())
ch->try_send(err, str);
});
if (result == Status::UnexpectedHttpCode)
return_flag = false;
co_return;
}
@ -2455,125 +1930,6 @@ boost::asio::awaitable<void> FreeGpt::chatGpt4Online(std::shared_ptr<Channel> ch
co_return;
}
boost::asio::awaitable<void> FreeGpt::chatAnywhere(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
boost::system::error_code err{};
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "application/json, text/plain, */*"},
{"content-type", "application/json"},
{"Referer", "https://chatanywhere.cn/"},
{"Origin", "https://chatanywhere.cn"},
{"Authorization", ""},
};
std::string recv;
auto ret = Curl()
.setUrl("https://chatanywhere.cn/v1/chat/gpt/")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
boost::system::error_code err{};
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
})
.setBody([&] {
constexpr std::string_view ask_json_str = R"({
"list":[
{
"role":"user",
"content":"hello"
}
],
"id":"s1_qYuOLXjI3rEpc7WHfQ",
"title":"hello",
"prompt":"",
"temperature":0.5,
"models":"61490748",
"continuous":true
})";
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
ask_request["title"] = prompt;
ask_request["list"] = getConversationJson(json);
std::string ask_request_str = ask_request.dump();
SPDLOG_INFO("request: [{}]", ask_request_str);
return ask_request_str;
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::chatGptNext(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
// auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
std::string recv;
auto ret = Curl()
.setUrl("https://chat.fstha.com/api/openai/v1/chat/completions")
.setProxy(m_cfg.http_proxy)
.setRecvBodyCallback([&](std::string str) mutable { recv.append(str); })
.setHttpHeaders([&] -> auto& {
static std::unordered_multimap<std::string, std::string> headers{
{"Accept", "text/event-stream"},
{"origin", "https://chat.fstha.com"},
{"referer", "https://chat.fstha.com/"},
{"x-requested-with", "XMLHttpRequest"},
{"Authorization", "Bearer ak-chatgpt-nice"},
{"Alt-Used", "chat.fstha.com"},
};
return headers;
}())
.setBody([&] {
constexpr std::string_view request_str{R"({
"messages":[
{
"role":"user",
"content":"hello"
}
],
"stream":false,
"model":"gpt-3.5-turbo",
"temperature":0.5,
"presence_penalty":0,
"frequency_penalty":0,
"top_p":1
})"};
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
request["messages"] = getConversationJson(json);
auto str = request.dump();
SPDLOG_INFO("request : [{}]", str);
return str;
}())
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
SPDLOG_INFO("{}", recv);
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
nlohmann::json rsp = nlohmann::json::parse(recv, nullptr, false);
if (rsp.is_discarded()) {
SPDLOG_ERROR("json parse error: {}", recv);
ch->try_send(err, recv);
}
auto info = rsp["choices"][0]["message"]["content"].get<std::string>();
ch->try_send(err, info);
co_return;
}
boost::asio::awaitable<void> FreeGpt::gptTalkru(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
@ -2666,12 +2022,14 @@ boost::asio::awaitable<void> FreeGpt::deepInfra(std::shared_ptr<Channel> ch, nlo
nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", fields.back());
boost::asio::post(ch->get_executor(), [=] {
ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
});
continue;
}
auto str = line_json["choices"][0]["delta"]["content"].get<std::string>();
if (!str.empty())
ch->try_send(err, str);
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
}
})
.setBody([&] {
@ -2701,3 +2059,141 @@ boost::asio::awaitable<void> FreeGpt::deepInfra(std::shared_ptr<Channel> ch, nlo
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::gptChatly(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
using Tuple = std::tuple<std::chrono::time_point<std::chrono::system_clock>, std::string, std::string>;
static moodycamel::ConcurrentQueue<Tuple> cookie_queue;
Tuple item;
bool found{false};
if (cookie_queue.try_dequeue(item)) {
auto& [time_point, cookie, _] = item;
if (std::chrono::system_clock::now() - time_point < std::chrono::minutes(120))
found = true;
}
if (!found) {
std::string recv;
auto get_cookiet_ret = Curl()
.setUrl(m_cfg.flaresolverr)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
recv.append(str);
return;
})
.setBody([] {
nlohmann::json data{
{"cmd", "request.get"},
{"url", "https://gptchatly.com"},
{"maxTimeout", 60000},
{"session_ttl_minutes", 60},
};
return data.dump();
}())
.setHttpHeaders([&] -> auto& {
static std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"Content-Type", "application/json"},
};
return headers;
}())
.perform();
if (get_cookiet_ret.has_value()) {
SPDLOG_ERROR("call {}: [{}]", m_cfg.flaresolverr, get_cookiet_ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, get_cookiet_ret.value());
co_return;
}
nlohmann::json rsp = nlohmann::json::parse(recv, nullptr, false);
if (rsp.is_discarded()) {
SPDLOG_ERROR("json parse error");
co_await ch->async_send(err, "json parse error", use_nothrow_awaitable);
co_return;
}
SPDLOG_INFO("rsp: {}", rsp.dump());
auto status = rsp.at("status").get<std::string>();
if (status != "ok") {
SPDLOG_ERROR("get cookie error");
co_await ch->async_send(err, "get cookie error", use_nothrow_awaitable);
co_return;
}
auto it =
std::ranges::find_if(rsp["solution"]["cookies"], [](auto& p) { return p["name"] == "cf_clearance"; });
if (it == rsp["solution"]["cookies"].end()) {
SPDLOG_ERROR("not found cookie");
co_await ch->async_send(err, "not found cookie", use_nothrow_awaitable);
co_return;
}
std::string user_agent = rsp["solution"].at("userAgent");
auto cookie_str = std::format("cf_clearance={}", (*it)["value"].get<std::string>());
// std::cout << rsp["solution"]["userAgent"].get<std::string>() << std::endl;
item = std::make_tuple(std::chrono::system_clock::now(), std::move(cookie_str), user_agent);
}
SPDLOG_INFO("cookie: {}", std::get<1>(item));
bool return_flag{true};
ScopeExit auto_free([&] mutable {
if (!return_flag)
return;
auto& [time_point, cookie, _] = item;
if (std::chrono::system_clock::now() - time_point < std::chrono::minutes(120))
cookie_queue.enqueue(std::move(item));
});
auto user_agent = std::get<2>(item);
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"Content-Type", "application/json"},
{"Cookie", std::get<1>(item)},
{"Origin", "https://gptchatly.com"},
{"Referer", "https://gptchatly.com/"},
{"User-Agent", user_agent},
};
auto ret =
Curl()
.setUrl("https://gptchatly.com/felch-response")
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
boost::system::error_code err{};
if (!str.empty()) {
nlohmann::json line_json = nlohmann::json::parse(str, nullptr, false);
if (line_json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", str);
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, std::format("json parse error: [{}]", str)); });
return;
}
if (line_json.contains("chatGPTResponse"))
boost::asio::post(ch->get_executor(),
[=] { ch->try_send(err, line_json["chatGPTResponse"].get<std::string>()); });
else
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
}
return;
})
.setBody([&] {
constexpr std::string_view ask_json_str = R"({
"past_conversations":[
{
"role":"system",
"content":"Always reply in a language that user talks to you. Be concise. Don't repeat itself."
}
]
})";
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
auto request_json = getConversationJson(json);
for (auto& j : request_json)
ask_request["past_conversations"].push_back(j);
std::string ask_request_str = ask_request.dump();
SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
return ask_request_str;
}())
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("{}", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
}
co_return;
}

View File

@ -337,7 +337,6 @@ int main(int, char** argv) {
ADD_METHOD("gpt-3.5-turbo-stream-openai", FreeGpt::openAi);
ADD_METHOD("gpt-3.5-turbo-stream-yqcloud", FreeGpt::yqcloud);
ADD_METHOD("gpt-OpenAssistant-stream-HuggingChat", FreeGpt::huggingChat)
ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you);
ADD_METHOD("gpt-3-stream-binjie", FreeGpt::binjie);
ADD_METHOD("gpt-3.5-turbo-stream-GptGo", FreeGpt::gptGo);
@ -346,17 +345,14 @@ int main(int, char** argv) {
ADD_METHOD("gpt-3.5-turbo-stream-gptforlove", FreeGpt::gptForLove);
ADD_METHOD("gpt-3.5-turbo-stream-ChatgptDemo", FreeGpt::chatGptDemo);
ADD_METHOD("gpt-3.5-turbo-stream-GeekGpt", FreeGpt::geekGpt);
ADD_METHOD("llama2", FreeGpt::llama2);
ADD_METHOD("llama2-70B", FreeGpt::llama2);
ADD_METHOD("gpt-3.5-turbo-stream-chatGptAi", FreeGpt::chatGptAi);
ADD_METHOD("gpt-3.5-turbo-stream-FakeGpt", FreeGpt::fakeGpt);
ADD_METHOD("gpt-3.5-turbo-stream-Vercel", FreeGpt::vercel);
ADD_METHOD("gpt-3.5-turbo-stream-aivvm", FreeGpt::aivvm);
ADD_METHOD("gpt-4-stream-aivvm", FreeGpt::aivvm);
ADD_METHOD("gpt-4-ChatGpt4Online", FreeGpt::chatGpt4Online);
ADD_METHOD("gpt-3.5-turbo-stream-ChatAnywhere", FreeGpt::chatAnywhere);
ADD_METHOD("gpt-3.5-turbo-ChatgptNext", FreeGpt::chatGptNext);
ADD_METHOD("gpt-3.5-turbo-stream-gptTalkRu", FreeGpt::gptTalkru);
ADD_METHOD("Llama-2-70b-chat-hf-stream-DeepInfra", FreeGpt::deepInfra);
ADD_METHOD("gpt-3.5-turbo-gptChatly", FreeGpt::gptChatly);
SPDLOG_INFO("active provider:");
for (auto& [provider, _] : gpt_function)