mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-09-11 22:30:41 +03:00
add provider deepInfra & remove deepai (#61)
This commit is contained in:
parent
1d8b3c09d7
commit
c80b001740
@ -41,7 +41,6 @@ class ChatGptModelsEnum(StrEnum):
|
|||||||
gpt_3_5_turbo_stream_openai = "gpt-3.5-turbo-stream-openai"
|
gpt_3_5_turbo_stream_openai = "gpt-3.5-turbo-stream-openai"
|
||||||
gpt_4_ChatgptAi = "gpt-4-ChatgptAi"
|
gpt_4_ChatgptAi = "gpt-4-ChatgptAi"
|
||||||
gpt_3_5_turbo_weWordle = "gpt-3.5-turbo-weWordle"
|
gpt_3_5_turbo_weWordle = "gpt-3.5-turbo-weWordle"
|
||||||
gpt_3_5_turbo_stream_DeepAi = "gpt-3.5-turbo-stream-DeepAi"
|
|
||||||
gpt_3_5_turbo_stream_yqcloud = "gpt-3.5-turbo-stream-yqcloud"
|
gpt_3_5_turbo_stream_yqcloud = "gpt-3.5-turbo-stream-yqcloud"
|
||||||
gpt_OpenAssistant_stream_HuggingChat = "gpt-OpenAssistant-stream-HuggingChat"
|
gpt_OpenAssistant_stream_HuggingChat = "gpt-OpenAssistant-stream-HuggingChat"
|
||||||
gpt_4_turbo_stream_you = "gpt-4-turbo-stream-you"
|
gpt_4_turbo_stream_you = "gpt-4-turbo-stream-you"
|
||||||
@ -56,6 +55,7 @@ class ChatGptModelsEnum(StrEnum):
|
|||||||
gpt_3_5_turbo_stream_ChatAnywhere = "gpt-3.5-turbo-stream-ChatAnywhere"
|
gpt_3_5_turbo_stream_ChatAnywhere = "gpt-3.5-turbo-stream-ChatAnywhere"
|
||||||
gpt_3_5_turbo_ChatgptNext = "gpt-3.5-turbo-ChatgptNext"
|
gpt_3_5_turbo_ChatgptNext = "gpt-3.5-turbo-ChatgptNext"
|
||||||
gpt_3_5_turbo_stream_gptTalkRu = "gpt-3.5-turbo--stream-gptTalkRu"
|
gpt_3_5_turbo_stream_gptTalkRu = "gpt-3.5-turbo--stream-gptTalkRu"
|
||||||
|
Llama_2_70b_chat_hf_stream_DeepInfra = "Llama-2-70b-chat-hf-stream-DeepInfra"
|
||||||
llama2 = "llama2"
|
llama2 = "llama2"
|
||||||
gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin"
|
gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin"
|
||||||
gpt_4_ChatGpt4Online = "gpt-4-ChatGpt4Online"
|
gpt_4_ChatGpt4Online = "gpt-4-ChatGpt4Online"
|
||||||
|
@ -1834,3 +1834,62 @@ boost::asio::awaitable<void> FreeGpt::chatBase(std::shared_ptr<Channel> ch, nloh
|
|||||||
}
|
}
|
||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boost::asio::awaitable<void> FreeGpt::deepAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||||
|
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||||
|
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||||
|
|
||||||
|
boost::system::error_code err{};
|
||||||
|
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||||
|
|
||||||
|
std::string user_agent{
|
||||||
|
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36)"};
|
||||||
|
|
||||||
|
std::random_device rd;
|
||||||
|
std::mt19937 mt(rd());
|
||||||
|
std::uniform_int_distribution<uint64_t> dist(0, 100000000);
|
||||||
|
uint64_t part1{dist(mt)};
|
||||||
|
auto part2 = md5(user_agent + md5(user_agent + md5(std::format("{}{}x", user_agent, part1))));
|
||||||
|
auto api_key = std::format("tryit-{}-{}", part1, part2);
|
||||||
|
|
||||||
|
constexpr char CRLF[] = "\r\n";
|
||||||
|
static std::string MULTI_PART_BOUNDARY = "9bc627aea4f77e150e6057f78036e73f";
|
||||||
|
|
||||||
|
auto content_type_str = std::format("multipart/form-data; boundary={}", MULTI_PART_BOUNDARY);
|
||||||
|
SPDLOG_INFO("content_type_str: {}", content_type_str);
|
||||||
|
auto api_key_str = std::format("api-key: {}", api_key);
|
||||||
|
|
||||||
|
std::unordered_multimap<std::string, std::string> headers{
|
||||||
|
{"Content-Type", content_type_str},
|
||||||
|
{"api-key", api_key},
|
||||||
|
};
|
||||||
|
auto ret = Curl()
|
||||||
|
.setUrl("https://api.deepai.org/save_chat_session")
|
||||||
|
.setProxy(m_cfg.http_proxy)
|
||||||
|
.setRecvHeadersCallback([&](std::string) {})
|
||||||
|
.setRecvBodyCallback([&](std::string str) {
|
||||||
|
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
||||||
|
return;
|
||||||
|
})
|
||||||
|
.setBody([&] {
|
||||||
|
nlohmann::json request_json{{{"role", "user"}, {"content", std::move(prompt)}}};
|
||||||
|
std::ostringstream payload;
|
||||||
|
payload << "--" << MULTI_PART_BOUNDARY << CRLF
|
||||||
|
<< R"(Content-Disposition: form-data; name="chat_style")" << CRLF << CRLF << "chat"
|
||||||
|
<< CRLF << "--" << MULTI_PART_BOUNDARY << CRLF
|
||||||
|
<< R"(Content-Disposition: form-data; name="chatHistory")" << CRLF << CRLF
|
||||||
|
<< request_json.dump() << CRLF << "--" << MULTI_PART_BOUNDARY << "--" << CRLF;
|
||||||
|
SPDLOG_INFO("{}", payload.str());
|
||||||
|
auto str = payload.str();
|
||||||
|
return str;
|
||||||
|
}())
|
||||||
|
.clearHeaders()
|
||||||
|
.setHttpHeaders(headers)
|
||||||
|
.perform();
|
||||||
|
if (ret.has_value()) {
|
||||||
|
SPDLOG_ERROR("{}", ret.value());
|
||||||
|
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||||
|
ch->try_send(err, ret.value());
|
||||||
|
}
|
||||||
|
co_return;
|
||||||
|
}
|
||||||
|
@ -18,7 +18,6 @@ public:
|
|||||||
|
|
||||||
FreeGpt(Config&);
|
FreeGpt(Config&);
|
||||||
|
|
||||||
boost::asio::awaitable<void> deepAi(std::shared_ptr<Channel>, nlohmann::json);
|
|
||||||
boost::asio::awaitable<void> openAi(std::shared_ptr<Channel>, nlohmann::json);
|
boost::asio::awaitable<void> openAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
boost::asio::awaitable<void> yqcloud(std::shared_ptr<Channel>, nlohmann::json);
|
boost::asio::awaitable<void> yqcloud(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
boost::asio::awaitable<void> huggingChat(std::shared_ptr<Channel>, nlohmann::json);
|
boost::asio::awaitable<void> huggingChat(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
@ -39,6 +38,7 @@ public:
|
|||||||
boost::asio::awaitable<void> chatAnywhere(std::shared_ptr<Channel>, nlohmann::json);
|
boost::asio::awaitable<void> chatAnywhere(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
boost::asio::awaitable<void> chatGptNext(std::shared_ptr<Channel>, nlohmann::json);
|
boost::asio::awaitable<void> chatGptNext(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
boost::asio::awaitable<void> gptTalkru(std::shared_ptr<Channel>, nlohmann::json);
|
boost::asio::awaitable<void> gptTalkru(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
|
boost::asio::awaitable<void> deepInfra(std::shared_ptr<Channel>, nlohmann::json);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>
|
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>
|
||||||
|
@ -542,65 +542,6 @@ FreeGpt::createHttpClient(boost::asio::ssl::context& ctx, std::string_view host,
|
|||||||
co_return stream_;
|
co_return stream_;
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::asio::awaitable<void> FreeGpt::deepAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
|
||||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
|
||||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
|
||||||
|
|
||||||
boost::system::error_code err{};
|
|
||||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
|
||||||
|
|
||||||
std::string user_agent{
|
|
||||||
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36)"};
|
|
||||||
|
|
||||||
std::random_device rd;
|
|
||||||
std::mt19937 mt(rd());
|
|
||||||
std::uniform_int_distribution<uint64_t> dist(0, 100000000);
|
|
||||||
uint64_t part1{dist(mt)};
|
|
||||||
auto part2 = md5(user_agent + md5(user_agent + md5(std::format("{}{}x", user_agent, part1))));
|
|
||||||
auto api_key = std::format("tryit-{}-{}", part1, part2);
|
|
||||||
|
|
||||||
constexpr char CRLF[] = "\r\n";
|
|
||||||
static std::string MULTI_PART_BOUNDARY = "9bc627aea4f77e150e6057f78036e73f";
|
|
||||||
|
|
||||||
auto content_type_str = std::format("multipart/form-data; boundary={}", MULTI_PART_BOUNDARY);
|
|
||||||
SPDLOG_INFO("content_type_str: {}", content_type_str);
|
|
||||||
auto api_key_str = std::format("api-key: {}", api_key);
|
|
||||||
|
|
||||||
std::unordered_multimap<std::string, std::string> headers{
|
|
||||||
{"Content-Type", content_type_str},
|
|
||||||
{"api-key", api_key},
|
|
||||||
};
|
|
||||||
auto ret = Curl()
|
|
||||||
.setUrl("https://api.deepai.org/hacking_is_a_crime")
|
|
||||||
.setProxy(m_cfg.http_proxy)
|
|
||||||
.setRecvHeadersCallback([&](std::string) {})
|
|
||||||
.setRecvBodyCallback([&](std::string str) {
|
|
||||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
|
||||||
return;
|
|
||||||
})
|
|
||||||
.setBody([&] {
|
|
||||||
nlohmann::json request_json{{{"role", "user"}, {"content", std::move(prompt)}}};
|
|
||||||
std::ostringstream payload;
|
|
||||||
payload << "--" << MULTI_PART_BOUNDARY << CRLF
|
|
||||||
<< R"(Content-Disposition: form-data; name="chat_style")" << CRLF << CRLF << "chat"
|
|
||||||
<< CRLF << "--" << MULTI_PART_BOUNDARY << CRLF
|
|
||||||
<< R"(Content-Disposition: form-data; name="chatHistory")" << CRLF << CRLF
|
|
||||||
<< request_json.dump() << CRLF << "--" << MULTI_PART_BOUNDARY << "--" << CRLF;
|
|
||||||
SPDLOG_INFO("{}", payload.str());
|
|
||||||
auto str = payload.str();
|
|
||||||
return str;
|
|
||||||
}())
|
|
||||||
.clearHeaders()
|
|
||||||
.setHttpHeaders(headers)
|
|
||||||
.perform();
|
|
||||||
if (ret.has_value()) {
|
|
||||||
SPDLOG_ERROR("{}", ret.value());
|
|
||||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
|
||||||
ch->try_send(err, ret.value());
|
|
||||||
}
|
|
||||||
co_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::asio::awaitable<void> FreeGpt::openAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
boost::asio::awaitable<void> FreeGpt::openAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||||
boost::system::error_code err{};
|
boost::system::error_code err{};
|
||||||
ScopeExit auto_exit{[&] { ch->close(); }};
|
ScopeExit auto_exit{[&] { ch->close(); }};
|
||||||
@ -1834,6 +1775,7 @@ create_client:
|
|||||||
request.set("Cookie", cookie);
|
request.set("Cookie", cookie);
|
||||||
request.set(boost::beast::http::field::user_agent, user_agent);
|
request.set(boost::beast::http::field::user_agent, user_agent);
|
||||||
request.set("Content-Type", "application/json");
|
request.set("Content-Type", "application/json");
|
||||||
|
request.set("X-Wp-Nonce", j["restNonce"]);
|
||||||
|
|
||||||
constexpr std::string_view json_str = R"({
|
constexpr std::string_view json_str = R"({
|
||||||
"botId":"chatbot-9vy3t5",
|
"botId":"chatbot-9vy3t5",
|
||||||
@ -2267,12 +2209,12 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
|
|||||||
boost::system::error_code err{};
|
boost::system::error_code err{};
|
||||||
ScopeExit auto_exit{[&] { ch->close(); }};
|
ScopeExit auto_exit{[&] { ch->close(); }};
|
||||||
|
|
||||||
using Tuple = std::tuple<std::chrono::time_point<std::chrono::system_clock>, std::string>;
|
using Tuple = std::tuple<std::chrono::time_point<std::chrono::system_clock>, std::string, std::string>;
|
||||||
static moodycamel::ConcurrentQueue<Tuple> cookie_queue;
|
static moodycamel::ConcurrentQueue<Tuple> cookie_queue;
|
||||||
Tuple item;
|
Tuple item;
|
||||||
bool found{false};
|
bool found{false};
|
||||||
if (cookie_queue.try_dequeue(item)) {
|
if (cookie_queue.try_dequeue(item)) {
|
||||||
auto& [time_point, cookie] = item;
|
auto& [time_point, cookie, _] = item;
|
||||||
if (std::chrono::system_clock::now() - time_point < std::chrono::minutes(120))
|
if (std::chrono::system_clock::now() - time_point < std::chrono::minutes(120))
|
||||||
found = true;
|
found = true;
|
||||||
}
|
}
|
||||||
@ -2290,6 +2232,7 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
|
|||||||
{"cmd", "request.get"},
|
{"cmd", "request.get"},
|
||||||
{"url", "https://chat.aivvm.com/zh"},
|
{"url", "https://chat.aivvm.com/zh"},
|
||||||
{"maxTimeout", 60000},
|
{"maxTimeout", 60000},
|
||||||
|
{"session_ttl_minutes", 60},
|
||||||
};
|
};
|
||||||
return data.dump();
|
return data.dump();
|
||||||
}())
|
}())
|
||||||
@ -2328,26 +2271,24 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
|
|||||||
co_await ch->async_send(err, "not found cookie", use_nothrow_awaitable);
|
co_await ch->async_send(err, "not found cookie", use_nothrow_awaitable);
|
||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
|
std::string user_agent = rsp["solution"].at("userAgent");
|
||||||
auto cookie_str = std::format("cf_clearance={}", (*it)["value"].get<std::string>());
|
auto cookie_str = std::format("cf_clearance={}", (*it)["value"].get<std::string>());
|
||||||
// std::cout << rsp["solution"]["userAgent"].get<std::string>() << std::endl;
|
// std::cout << rsp["solution"]["userAgent"].get<std::string>() << std::endl;
|
||||||
item = std::make_tuple(std::chrono::system_clock::now(), std::move(cookie_str));
|
item = std::make_tuple(std::chrono::system_clock::now(), std::move(cookie_str), user_agent);
|
||||||
}
|
}
|
||||||
SPDLOG_INFO("cookie: {}", std::get<1>(item));
|
SPDLOG_INFO("cookie: {}", std::get<1>(item));
|
||||||
bool return_flag{true};
|
bool return_flag{true};
|
||||||
ScopeExit auto_free([&] mutable {
|
ScopeExit auto_free([&] mutable {
|
||||||
if (!return_flag)
|
if (!return_flag)
|
||||||
return;
|
return;
|
||||||
auto& [time_point, cookie] = item;
|
auto& [time_point, cookie, _] = item;
|
||||||
if (std::chrono::system_clock::now() - time_point < std::chrono::minutes(120))
|
if (std::chrono::system_clock::now() - time_point < std::chrono::minutes(120))
|
||||||
cookie_queue.enqueue(std::move(item));
|
cookie_queue.enqueue(std::move(item));
|
||||||
});
|
});
|
||||||
|
auto user_agent = std::get<2>(item);
|
||||||
constexpr std::string_view host = "chat.aivvm.com";
|
constexpr std::string_view host = "chat.aivvm.com";
|
||||||
constexpr std::string_view port = "443";
|
constexpr std::string_view port = "443";
|
||||||
|
|
||||||
constexpr std::string_view user_agent{
|
|
||||||
R"(Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36)"};
|
|
||||||
|
|
||||||
boost::asio::ssl::context ctx1(boost::asio::ssl::context::tls);
|
boost::asio::ssl::context ctx1(boost::asio::ssl::context::tls);
|
||||||
ctx1.set_verify_mode(boost::asio::ssl::verify_none);
|
ctx1.set_verify_mode(boost::asio::ssl::verify_none);
|
||||||
|
|
||||||
@ -2663,3 +2604,80 @@ boost::asio::awaitable<void> FreeGpt::gptTalkru(std::shared_ptr<Channel> ch, nlo
|
|||||||
}
|
}
|
||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boost::asio::awaitable<void> FreeGpt::deepInfra(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||||
|
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||||
|
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||||
|
|
||||||
|
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||||
|
|
||||||
|
boost::system::error_code err{};
|
||||||
|
std::unordered_multimap<std::string, std::string> headers{
|
||||||
|
{"Accept", "text/event-stream"},
|
||||||
|
{"content-type", "application/json"},
|
||||||
|
{"Referer", "https://deepinfra.com/"},
|
||||||
|
{"Origin", "https://deepinfra.com"},
|
||||||
|
{"X-Deepinfra-Source", "web-embed"},
|
||||||
|
{"sec-ch-ua", R"("Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24")"},
|
||||||
|
{"sec-ch-ua-platform", R"("macOS")"},
|
||||||
|
{"sec-ch-ua-mobile", "?0"},
|
||||||
|
{"Sec-Fetch-Dest", "empty"},
|
||||||
|
{"Sec-Fetch-Mode", "cors"},
|
||||||
|
{"Sec-Fetch-Site", "same-site"},
|
||||||
|
};
|
||||||
|
std::string recv;
|
||||||
|
auto ret = Curl()
|
||||||
|
.setUrl("https://api.deepinfra.com/v1/openai/chat/completions")
|
||||||
|
.setProxy(m_cfg.http_proxy)
|
||||||
|
.setRecvHeadersCallback([](std::string) { return; })
|
||||||
|
.setRecvBodyCallback([&](std::string str) mutable {
|
||||||
|
recv.append(str);
|
||||||
|
while (true) {
|
||||||
|
auto position = recv.find("\n");
|
||||||
|
if (position == std::string::npos)
|
||||||
|
break;
|
||||||
|
auto msg = recv.substr(0, position + 1);
|
||||||
|
recv.erase(0, position + 1);
|
||||||
|
msg.pop_back();
|
||||||
|
if (msg.empty() || !msg.contains("content"))
|
||||||
|
continue;
|
||||||
|
auto fields = splitString(msg, "data: ");
|
||||||
|
boost::system::error_code err{};
|
||||||
|
nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false);
|
||||||
|
if (line_json.is_discarded()) {
|
||||||
|
SPDLOG_ERROR("json parse error: [{}]", fields.back());
|
||||||
|
ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto str = line_json["choices"][0]["delta"]["content"].get<std::string>();
|
||||||
|
if (!str.empty())
|
||||||
|
ch->try_send(err, str);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.setBody([&] {
|
||||||
|
constexpr std::string_view ask_json_str = R"({
|
||||||
|
"model":"meta-llama/Llama-2-70b-chat-hf",
|
||||||
|
"messages":[
|
||||||
|
{
|
||||||
|
"role":"user",
|
||||||
|
"content":"hello"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"stream":true
|
||||||
|
})";
|
||||||
|
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||||
|
ask_request["messages"] = getConversationJson(json);
|
||||||
|
std::string ask_request_str = ask_request.dump();
|
||||||
|
SPDLOG_INFO("request: [{}]", ask_request_str);
|
||||||
|
return ask_request_str;
|
||||||
|
}())
|
||||||
|
.clearHeaders()
|
||||||
|
.setHttpHeaders(headers)
|
||||||
|
.perform();
|
||||||
|
if (ret.has_value()) {
|
||||||
|
SPDLOG_ERROR("{}", ret.value());
|
||||||
|
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||||
|
ch->try_send(err, ret.value());
|
||||||
|
}
|
||||||
|
co_return;
|
||||||
|
}
|
||||||
|
@ -336,7 +336,6 @@ int main(int, char** argv) {
|
|||||||
if (!cfg.api_key.empty())
|
if (!cfg.api_key.empty())
|
||||||
ADD_METHOD("gpt-3.5-turbo-stream-openai", FreeGpt::openAi);
|
ADD_METHOD("gpt-3.5-turbo-stream-openai", FreeGpt::openAi);
|
||||||
|
|
||||||
ADD_METHOD("gpt-3.5-turbo-stream-DeepAi", FreeGpt::deepAi);
|
|
||||||
ADD_METHOD("gpt-3.5-turbo-stream-yqcloud", FreeGpt::yqcloud);
|
ADD_METHOD("gpt-3.5-turbo-stream-yqcloud", FreeGpt::yqcloud);
|
||||||
ADD_METHOD("gpt-OpenAssistant-stream-HuggingChat", FreeGpt::huggingChat)
|
ADD_METHOD("gpt-OpenAssistant-stream-HuggingChat", FreeGpt::huggingChat)
|
||||||
ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you);
|
ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you);
|
||||||
@ -356,6 +355,7 @@ int main(int, char** argv) {
|
|||||||
ADD_METHOD("gpt-3.5-turbo-stream-ChatAnywhere", FreeGpt::chatAnywhere);
|
ADD_METHOD("gpt-3.5-turbo-stream-ChatAnywhere", FreeGpt::chatAnywhere);
|
||||||
ADD_METHOD("gpt-3.5-turbo-ChatgptNext", FreeGpt::chatGptNext);
|
ADD_METHOD("gpt-3.5-turbo-ChatgptNext", FreeGpt::chatGptNext);
|
||||||
ADD_METHOD("gpt-3.5-turbo-stream-gptTalkRu", FreeGpt::gptTalkru);
|
ADD_METHOD("gpt-3.5-turbo-stream-gptTalkRu", FreeGpt::gptTalkru);
|
||||||
|
ADD_METHOD("Llama-2-70b-chat-hf-stream-DeepInfra", FreeGpt::deepInfra);
|
||||||
|
|
||||||
SPDLOG_INFO("active provider:");
|
SPDLOG_INFO("active provider:");
|
||||||
for (auto& [provider, _] : gpt_function)
|
for (auto& [provider, _] : gpt_function)
|
||||||
|
@ -1,14 +1,10 @@
|
|||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import execjs
|
import execjs
|
||||||
from flask import Flask, request
|
from flask import Flask, request
|
||||||
from selenium import webdriver
|
from selenium import webdriver
|
||||||
from selenium.webdriver.support.ui import WebDriverWait
|
|
||||||
from werkzeug.serving import ThreadedWSGIServer
|
from werkzeug.serving import ThreadedWSGIServer
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
@ -21,20 +17,6 @@ options.add_argument("--disable-gpu")
|
|||||||
options.add_argument("--disable-dev-shm-usage")
|
options.add_argument("--disable-dev-shm-usage")
|
||||||
|
|
||||||
|
|
||||||
def deepai_refresh():
|
|
||||||
while True:
|
|
||||||
driver = webdriver.Chrome(options=options)
|
|
||||||
try:
|
|
||||||
driver.get("https://deepai.org")
|
|
||||||
WebDriverWait(driver, 15)
|
|
||||||
cookies = driver.get_cookies()
|
|
||||||
print(cookies, flush=True)
|
|
||||||
except Exception:
|
|
||||||
traceback.print_exc()
|
|
||||||
driver.quit()
|
|
||||||
time.sleep(600)
|
|
||||||
|
|
||||||
|
|
||||||
# curl -X POST -d '{}' -H "Content-Type: application/json" http://127.0.0.1:8860/gptforlove
|
# curl -X POST -d '{}' -H "Content-Type: application/json" http://127.0.0.1:8860/gptforlove
|
||||||
@app.route("/gptforlove", methods=["POST"])
|
@app.route("/gptforlove", methods=["POST"])
|
||||||
def get_gptforlove_secret():
|
def get_gptforlove_secret():
|
||||||
@ -77,8 +59,6 @@ def get_anti_bot_token():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
thread = threading.Thread(target=deepai_refresh)
|
|
||||||
thread.start()
|
|
||||||
port = os.getenv("PORT", "8860")
|
port = os.getenv("PORT", "8860")
|
||||||
ip = os.getenv("IP", "0.0.0.0")
|
ip = os.getenv("IP", "0.0.0.0")
|
||||||
print(f"start zeus at {ip}:{port}", flush=True)
|
print(f"start zeus at {ip}:{port}", flush=True)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user