From d6d21b4292f0a61f0039167bbf5dd773796b5c7f Mon Sep 17 00:00:00 2001 From: Dmitry Afanasyev <71835315+Balshgit@users.noreply.github.com> Date: Mon, 23 Oct 2023 18:12:28 +0300 Subject: [PATCH] add providers GeekGpt FakeGpt (#43) * move ruff to top of lefthook checks * add providers GeekGpt FakeGpt --- bot_microservice/constants.py | 6 +- chatgpt_microservice/deprecated/free_gpt.cpp | 155 ------- chatgpt_microservice/include/free_gpt.h | 2 + chatgpt_microservice/src/free_gpt.cpp | 427 +++++++++++++++++++ chatgpt_microservice/src/main.cpp | 2 + lefthook.yml | 19 +- 6 files changed, 444 insertions(+), 167 deletions(-) diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 3a2536b..273da09 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -55,6 +55,8 @@ class ChatGptModelsEnum(StrEnum): gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online" gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk" llama2 = "llama2" + gpt_3_5_turbo_stream_chatGptAi = "gpt-3.5-turbo-stream-chatGptAi" + gpt_3_5_turbo_stream_FakeGpt = "gpt-3.5-turbo-stream-FakeGpt" gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt" gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove" @@ -69,9 +71,9 @@ class ChatGptModelsEnum(StrEnum): priority = 0 match model: case "gpt-3.5-turbo-stream-gptalk": - priority = 2 - case "gpt-3.5-turbo-stream-GeekGpt": priority = 1 + case "gpt-3.5-turbo-stream-GeekGpt": + priority = 2 case "llama2": priority = 1 fields = {"model": model, "priority": priority} diff --git a/chatgpt_microservice/deprecated/free_gpt.cpp b/chatgpt_microservice/deprecated/free_gpt.cpp index 3243ef9..86bec3b 100644 --- a/chatgpt_microservice/deprecated/free_gpt.cpp +++ b/chatgpt_microservice/deprecated/free_gpt.cpp @@ -1,158 +1,3 @@ -boost::asio::awaitable FreeGpt::chatGptAi(std::shared_ptr ch, nlohmann::json json) { - ScopeExit auto_exit{[&] { ch->close(); }}; - boost::system::error_code err{}; - - constexpr std::string_view host = "chatgpt.ai"; - constexpr std::string_view port = "443"; - - constexpr std::string_view user_agent{ - R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36)"}; - - boost::beast::http::request req{boost::beast::http::verb::get, "/", 11}; - req.set(boost::beast::http::field::host, "chatgpt.ai"); - req.set(boost::beast::http::field::user_agent, user_agent); - req.set("Accept", "*/*"); - - int recreate_num{0}; -create_client: - boost::asio::ssl::context ctx(boost::asio::ssl::context::tls); - ctx.set_verify_mode(boost::asio::ssl::verify_none); - auto client = co_await createHttpClient(ctx, host, port); - if (!client.has_value()) { - SPDLOG_ERROR("createHttpClient: {}", client.error()); - co_await ch->async_send(err, client.error(), use_nothrow_awaitable); - co_return; - } - auto& stream_ = client.value(); - - std::string chunk_body; - std::string cookie; - auto ret = co_await sendRequestRecvChunk( - ch, stream_, req, 200, [&ch, &chunk_body](std::string recv_str) { chunk_body.append(std::move(recv_str)); }, - [&](const boost::beast::http::parser& p) { - auto& headers = p.get(); - for (const auto& header : headers) { - if (boost::beast::http::to_string(header.name()) == "Set-Cookie") { - cookie = header.value(); - return; - } - } - }); - SPDLOG_ERROR("cookie: {}", cookie); - if (ret == Status::Close && recreate_num == 0) { - recreate_num++; - goto create_client; - } - if (ret == Status::HasError) - co_return; - - static std::string pattern{R"(data-system='(.*?)')"}; - - std::vector matches = findAll(pattern, chunk_body); - if (matches.empty()) { - SPDLOG_ERROR("parsing login failed"); - co_await ch->async_send(err, chunk_body, use_nothrow_awaitable); - co_return; - } - - auto html_unescape = [](const std::string& text) { - std::string result = text; - boost::replace_all(result, "&", "&"); - boost::replace_all(result, "<", "<"); - boost::replace_all(result, ">", ">"); - boost::replace_all(result, """, "\""); - boost::replace_all(result, "'", "'"); - return result; - }; - std::string html_json_str; - std::regex regex("'(.*?)'"); - std::smatch result; - if (std::regex_search(matches[0], result, regex)) - html_json_str = html_unescape(result[1]); - if (html_json_str.empty()) { - SPDLOG_ERROR("extract json fail"); - co_await ch->async_send(err, chunk_body, use_nothrow_awaitable); - co_return; - } - nlohmann::json j = nlohmann::json::parse(html_json_str, nullptr, false); - if (j.is_discarded()) { - SPDLOG_ERROR("json parse error"); - co_await ch->async_send(err, "json parse error", use_nothrow_awaitable); - co_return; - } - SPDLOG_INFO("json: {}", j.dump()); - - auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); - - boost::beast::http::request request{boost::beast::http::verb::post, - "/wp-json/mwai-ui/v1/chats/submit", 11}; - request.set(boost::beast::http::field::host, host); - request.set("authority", "chatgpt.ai"); - request.set("accept", "*/*"); - request.set("accept-language", R"(en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3)"); - request.set("cache-control", "no-cache"); - request.set("origin", "https://chatgpt.ai"); - request.set("pragma", "no-cache"); - request.set(boost::beast::http::field::referer, "https://chatgpt.ai/"); - request.set("sec-ch-ua", R"("Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114")"); - request.set("sec-ch-ua-mobile", "?0"); - request.set("sec-ch-ua-platform", R"("Windows")"); - request.set("sec-fetch-dest", "empty"); - request.set("sec-fetch-mode", "cors"); - request.set("sec-fetch-site", "same-origin"); - request.set("Cookie", cookie); - request.set(boost::beast::http::field::user_agent, user_agent); - request.set("Content-Type", "application/json"); - - constexpr std::string_view json_str = R"({ - "botId":"chatbot-9vy3t5", - "clientId":"", - "contextId":1048, - "id":"chatbot-9vy3t5", - "messages":[], - "newMessage":"hello", - "session":"N/A", - "stream":true - })"; - nlohmann::json request_json = nlohmann::json::parse(json_str, nullptr, false); - request_json["botId"] = j["botId"]; - request_json["clientId"] = ""; - request_json["contextId"] = j["contextId"]; - request_json["id"] = j["id"]; - request_json["session"] = j["sessionId"]; - request_json["newMessage"] = prompt; - - SPDLOG_INFO("request: {}", request_json.dump()); - request.body() = request_json.dump(); - request.prepare_payload(); - - std::string recv; - co_await sendRequestRecvChunk(ch, stream_, request, 200, [&](std::string str) { - recv.append(str); - while (true) { - auto position = recv.find("\n"); - if (position == std::string::npos) - break; - auto msg = recv.substr(0, position + 1); - recv.erase(0, position + 1); - msg.pop_back(); - if (msg.empty()) - continue; - auto fields = splitString(msg, "data: "); - boost::system::error_code err{}; - nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); - if (line_json.is_discarded()) { - SPDLOG_ERROR("json parse error: [{}]", fields.back()); - ch->try_send(err, std::format("json parse error: [{}]", fields.back())); - continue; - } - auto type = line_json["type"].get(); - if (type == "live") - ch->try_send(err, line_json["data"].get()); - } - }); - co_return; -} boost::asio::awaitable FreeGpt::gptgod(std::shared_ptr ch, nlohmann::json json) { co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index 8b28bd7..a167d3d 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -36,6 +36,8 @@ public: boost::asio::awaitable llama2(std::shared_ptr, nlohmann::json); boost::asio::awaitable noowai(std::shared_ptr, nlohmann::json); boost::asio::awaitable geekGpt(std::shared_ptr, nlohmann::json); + boost::asio::awaitable chatGptAi(std::shared_ptr, nlohmann::json); + boost::asio::awaitable fakeGpt(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index f3a60bf..07ef464 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -456,6 +457,7 @@ public: if (m_headers) curl_slist_free_all(m_headers); m_headers_list.clear(); + m_headers = nullptr; return *this; } auto& setHttpStatusCode(int32_t code) { @@ -2660,3 +2662,428 @@ boost::asio::awaitable FreeGpt::geekGpt(std::shared_ptr ch, nlohm } co_return; } + +boost::asio::awaitable FreeGpt::chatGptAi(std::shared_ptr ch, nlohmann::json json) { + ScopeExit auto_exit{[&] { ch->close(); }}; + boost::system::error_code err{}; + + constexpr std::string_view host = "chatgpt.ai"; + constexpr std::string_view port = "443"; + + constexpr std::string_view user_agent{ + R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36)"}; + + boost::beast::http::request req{boost::beast::http::verb::get, "/", 11}; + req.set(boost::beast::http::field::host, "chatgpt.ai"); + req.set(boost::beast::http::field::user_agent, user_agent); + req.set("Accept", "*/*"); + + int recreate_num{0}; +create_client: + boost::asio::ssl::context ctx(boost::asio::ssl::context::tls); + ctx.set_verify_mode(boost::asio::ssl::verify_none); + auto client = co_await createHttpClient(ctx, host, port); + if (!client.has_value()) { + SPDLOG_ERROR("createHttpClient: {}", client.error()); + co_await ch->async_send(err, client.error(), use_nothrow_awaitable); + co_return; + } + auto& stream_ = client.value(); + + std::string chunk_body; + std::string cookie; + auto ret = co_await sendRequestRecvChunk( + ch, stream_, req, 200, [&ch, &chunk_body](std::string recv_str) { chunk_body.append(std::move(recv_str)); }, + [&](const boost::beast::http::parser& p) { + auto& headers = p.get(); + for (const auto& header : headers) { + if (boost::beast::http::to_string(header.name()) == "Set-Cookie") { + cookie = header.value(); + return; + } + } + }); + SPDLOG_ERROR("cookie: {}", cookie); + if (ret == Status::Close && recreate_num == 0) { + recreate_num++; + goto create_client; + } + if (ret == Status::HasError) + co_return; + + static std::string pattern{R"(data-system='(.*?)')"}; + + std::vector matches = findAll(pattern, chunk_body); + if (matches.empty()) { + SPDLOG_ERROR("parsing login failed"); + co_await ch->async_send(err, chunk_body, use_nothrow_awaitable); + co_return; + } + + auto html_unescape = [](const std::string& text) { + std::string result = text; + boost::replace_all(result, "&", "&"); + boost::replace_all(result, "<", "<"); + boost::replace_all(result, ">", ">"); + boost::replace_all(result, """, "\""); + boost::replace_all(result, "'", "'"); + return result; + }; + std::string html_json_str; + std::regex regex("'(.*?)'"); + std::smatch result; + if (std::regex_search(matches[0], result, regex)) + html_json_str = html_unescape(result[1]); + if (html_json_str.empty()) { + SPDLOG_ERROR("extract json fail"); + co_await ch->async_send(err, chunk_body, use_nothrow_awaitable); + co_return; + } + nlohmann::json j = nlohmann::json::parse(html_json_str, nullptr, false); + if (j.is_discarded()) { + SPDLOG_ERROR("json parse error"); + co_await ch->async_send(err, "json parse error", use_nothrow_awaitable); + co_return; + } + SPDLOG_INFO("json: {}", j.dump()); + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + boost::beast::http::request request{boost::beast::http::verb::post, + "/wp-json/mwai-ui/v1/chats/submit", 11}; + request.set(boost::beast::http::field::host, host); + request.set("authority", "chatgpt.ai"); + request.set("accept", "*/*"); + request.set("accept-language", R"(en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3)"); + request.set("cache-control", "no-cache"); + request.set("origin", "https://chatgpt.ai"); + request.set("pragma", "no-cache"); + request.set(boost::beast::http::field::referer, "https://chatgpt.ai/"); + request.set("sec-ch-ua", R"("Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114")"); + request.set("sec-ch-ua-mobile", "?0"); + request.set("sec-ch-ua-platform", R"("Windows")"); + request.set("sec-fetch-dest", "empty"); + request.set("sec-fetch-mode", "cors"); + request.set("sec-fetch-site", "same-origin"); + request.set("Cookie", cookie); + request.set(boost::beast::http::field::user_agent, user_agent); + request.set("Content-Type", "application/json"); + + constexpr std::string_view json_str = R"({ + "botId":"chatbot-9vy3t5", + "customId":null, + "session":"N/A", + "chatId":"6tkwezdhivn", + "contextId":1048, + "messages":[ + { + "role":"user", + "content":"hello" + } + ], + "newMessage":"hello", + "stream":true + })"; + nlohmann::json request_json = nlohmann::json::parse(json_str, nullptr, false); + request_json["botId"] = j["botId"]; + request_json["customId"] = j["customId"]; + request_json["session"] = j["sessionId"]; + request_json["contextId"] = j["contextId"]; + request_json["chatId"] = [](int len) -> std::string { + static std::string chars{"abcdefghijklmnopqrstuvwxyz0123456789"}; + static std::string letter{"abcdefghijklmnopqrstuvwxyz"}; + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(0, 1000000); + std::string random_string; + random_string += chars[dis(gen) % letter.length()]; + len = len - 1; + for (int i = 0; i < len; i++) + random_string += chars[dis(gen) % chars.length()]; + return random_string; + }(11); + request_json["messages"] = getConversationJson(json); + + request_json["newMessage"] = prompt; + + SPDLOG_INFO("request: {}", request_json.dump()); + request.body() = request_json.dump(); + request.prepare_payload(); + + std::string recv; + co_await sendRequestRecvChunk(ch, stream_, request, 200, [&](std::string str) { + recv.append(str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty()) + continue; + auto fields = splitString(msg, "data: "); + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", fields.back()); + ch->try_send(err, std::format("json parse error: [{}]", fields.back())); + continue; + } + auto type = line_json["type"].get(); + if (type == "live") + ch->try_send(err, line_json["data"].get()); + } + }); + co_return; +} + +boost::asio::awaitable FreeGpt::fakeGpt(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + boost::system::error_code err{}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + std::unordered_multimap headers{ + {"Accept", "*/*"}, + {"referer", "https://chat-shared2.zhile.io/?v=2"}, + }; + + std::multimap api_load_params{ + {"t", std::to_string(getTimestamp())}, + }; + auto api_load_url = std::format("https://chat-shared2.zhile.io/api/loads?{}", paramsToQueryStr(api_load_params)); + std::string chunk_body; + + Curl curl; + auto ret = curl.setUrl(api_load_url) + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string str) { + chunk_body.append(str); + return; + }) + .setHttpHeaders([&] -> auto& { return headers; }()) + .perform(); + if (ret.has_value()) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + co_return; + } + nlohmann::json json_result = nlohmann::json::parse(chunk_body, nullptr, false); + if (json_result.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", chunk_body); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("json parse error: [{}]", chunk_body)); + co_return; + } + std::vector random_j; + for (auto& j : json_result["loads"]) { + if (j["count"].get() == 0) + random_j.emplace_back(std::move(j)); + } + std::mt19937 g{std::random_device{}()}; + std::uniform_int_distribution d{0, random_j.size()}; + auto token_id = random_j[d(g)]; + std::cout << token_id.dump() << std::endl; + headers.emplace("Content-Type", "application/x-www-form-urlencoded"); + // send login + std::multimap login_params{ + {"token_key", token_id["token_id"].get()}, + {"session_password", + [](int len) -> std::string { + static std::string chars{"abcdefghijklmnopqrstuvwxyz"}; + static std::string letter{"0123456789"}; + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(0, 1000000); + std::string random_string; + random_string += letter[dis(gen) % letter.length()]; + len = len - 1; + for (int i = 0; i < len; i++) + random_string += chars[dis(gen) % chars.length()]; + return random_string; + }(10)}, + }; + chunk_body.clear(); + headers.erase("Content-Type"); + std::string header_str; + auto body = paramsToQueryStr(login_params); + + ret = curl.setUrl("https://chat-shared2.zhile.io/auth/login") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([&](std::string str) { + header_str.append(str); + return; + }) + .setRecvBodyCallback([&](std::string str) { + chunk_body.append(str); + return; + }) + .setBody(body) + .clearHeaders() + .setHttpHeaders([&] -> auto& { return headers; }()) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("auth login error: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + co_return; + } + auto parse = [](auto& buffer) { + std::regex pattern(R"(([^:\r\n]+):([^\r\n]+))"); + std::smatch matches; + auto start = buffer.cbegin(); + auto end = buffer.cend(); + std::multimap response_header; + while (std::regex_search(start, end, matches, pattern)) { + std::string field_name = matches[1].str(); + std::string field_value = matches[2].str(); + response_header.insert(std::pair{field_name, field_value}); + start = matches[0].second; + } + return response_header; + }; + auto response_header = parse(header_str); + auto range = response_header.equal_range("set-cookie"); + std::string cookie; + for (auto it = range.first; it != range.second; ++it) { + if (!(it->second.contains("credential="))) + continue; + auto view = it->second | std::views::drop_while(isspace) | std::views::reverse | + std::views::drop_while(isspace) | std::views::reverse; + auto fields = splitString(std::string{view.begin(), view.end()}, " "); + if (fields.size() < 1) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, "can't get cookie"); + co_return; + } + cookie = std::move(fields[0]); + break; + } + SPDLOG_INFO("cookie: [{}]", cookie); + SPDLOG_INFO("rsp: [{}]", chunk_body); + chunk_body.clear(); + headers.emplace("cookie", cookie); + + // /api/auth/session + ret = curl.setUrl("https://chat-shared2.zhile.io/api/auth/session") + .setProxy(m_cfg.http_proxy) + .setOpt(CURLOPT_HTTPGET, 1L) + .setRecvHeadersCallback([](std::string str) { + std::cout << str << std::endl; + return; + }) + .setRecvBodyCallback([&](std::string str) mutable { + chunk_body.append(str); + return; + }) + .clearHeaders() + .setHttpHeaders([&] -> auto& { return headers; }()) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("/api/auth/session: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + co_return; + } + + json_result.clear(); + json_result = nlohmann::json::parse(chunk_body, nullptr, false); + if (json_result.is_discarded()) { + SPDLOG_ERROR("/api/auth/session json parse error: [{}]", chunk_body); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, std::format("/api/auth/session parse error: [{}]", chunk_body)); + co_return; + } + auto cache_token = json_result["accessToken"].get(); + SPDLOG_INFO("accessToken: [{}]", cache_token); + + headers.erase("Accept"); + headers.emplace("Content-Type", "application/json"); + headers.emplace("Accept", "text/event-stream"); + auto auth = std::format("Bearer {}", cache_token); + SPDLOG_INFO("auth: [{}]", auth); + headers.emplace("X-Authorization", auth); + std::string recv; + std::string last_message; + ret = curl.setUrl("https://chat-shared2.zhile.io/api/conversation") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string str) mutable { + recv.append(str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.starts_with("data: ") || !msg.contains("content")) + continue; + msg.erase(0, 6); + if (msg == "[DONE]") + break; + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(msg, nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", msg); + boost::asio::post(ch->get_executor(), + [=] { ch->try_send(err, std::format("json parse error: [{}]", msg)); }); + continue; + } + auto type = line_json["message"]["content"]["content_type"].get(); + if (type == "text") { + auto new_message = line_json["message"]["content"]["parts"][0].get(); + if (new_message.empty()) + continue; + std::string tmp{new_message}; + new_message.erase(0, last_message.size()); + last_message = std::move(tmp); + if (!new_message.empty()) + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, new_message); }); + } + } + return; + }) + .setBody([&] { + constexpr std::string_view json_str = R"({ + "action":"next", + "messages":[ + { + "id":"a68cd787-c96c-4234-8ec9-00805f73a7b8", + "author":{"role":"user"}, + "content":{ + "content_type":"text", + "parts":["hello"] + }, + "metadata":{} + } + ], + "parent_message_id":"fdc171e6-dd0d-4494-93ce-e7d219e6ed05", + "model":"text-davinci-002-render-sha", + "plugin_ids":[], + "timezone_offset_min":-120, + "suggestions":[], + "history_and_training_disabled":true, + "arkose_token":"", + "force_paragen":false + })"; + nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false); + request["parent_message_id"] = createUuidString(); + request["messages"][0]["id"] = createUuidString(); + request["messages"][0]["content"]["parts"][0] = prompt; + SPDLOG_INFO("request: [{}]", request.dump(2)); + return request.dump(); + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("/api/conversation: [{}]", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + co_return; + } +} diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp index e2c8233..1642fbd 100644 --- a/chatgpt_microservice/src/main.cpp +++ b/chatgpt_microservice/src/main.cpp @@ -351,6 +351,8 @@ int main(int argc, char** argv) { ADD_METHOD("gpt-3.5-turbo-stream-noowai", FreeGpt::noowai); ADD_METHOD("gpt-3.5-turbo-stream-GeekGpt", FreeGpt::geekGpt); ADD_METHOD("llama2", FreeGpt::llama2); + ADD_METHOD("gpt-3.5-turbo-stream-chatGptAi", FreeGpt::chatGptAi); + ADD_METHOD("gpt-3.5-turbo-stream-FakeGpt", FreeGpt::fakeGpt); SPDLOG_INFO("active provider:"); for (auto& [provider, _] : gpt_function) diff --git a/lefthook.yml b/lefthook.yml index 0c40d99..c7b3161 100644 --- a/lefthook.yml +++ b/lefthook.yml @@ -32,35 +32,34 @@ format: piped: true parallel: false commands: - # number prefix used to save ordering - 1_autoflake: + 1_ruff: glob: "*.py" - run: autoflake --recursive --ignore-init-module-imports --remove-all-unused-imports --remove-unused-variables --in-place {staged_files} + run: ruff check --fix bot_microservice 2_isort: glob: "*.py" run: isort --color --quiet {staged_files} 3_black: glob: "*.py" run: black -S {staged_files} - 4_black_check: + 4_autoflake: + glob: "*.py" + run: autoflake --recursive --ignore-init-module-imports --remove-all-unused-imports --remove-unused-variables --in-place {staged_files} + 5_black_check: glob: "*.py" run: black --check -S {staged_files} - 5_ruff: - glob: "*.py" - run: ruff bot_microservice lint: parallel: true commands: + ruff: + glob: "*.py" + run: ruff bot_microservice mypy: glob: "*.py" run: mypy bot_microservice --namespace-packages --config-file pyproject.toml flake8: glob: "*.py" run: flake8 bot_microservice - ruff: - glob: "*.py" - run: ruff bot_microservice check-format: parallel: true