diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 0b7159d..20fb26a 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -60,6 +60,8 @@ class ChatGptModelsEnum(StrEnum): gpt_4_stream_aivvm = "gpt-4-stream-aivvm" gpt_3_5_turbo_stream_AiChatOnline = "gpt-3.5-turbo-stream-AiChatOnline" llama2_70B = "llama2-70B" + gpt6 = "gpt6" + gpt_3_5_turbo_streamc_hatxyz = "gpt-3.5-turbo-stream-chatxyz" gpt_3_5_turbo_gptChatly = "gpt-3.5-turbo-gptChatly" gpt_3_5_turbo_stream_Berlin = "gpt-3.5-turbo-stream-Berlin" gpt_3_5_turbo_stream_chatGptAi = "gpt-3.5-turbo-stream-chatGptAi" diff --git a/chatgpt_microservice/cfg/cpp-free-gpt.yml b/chatgpt_microservice/cfg/cpp-free-gpt.yml index 90f7963..c945c31 100644 --- a/chatgpt_microservice/cfg/cpp-free-gpt.yml +++ b/chatgpt_microservice/cfg/cpp-free-gpt.yml @@ -1,5 +1,13 @@ --- client_root_path: "../client" -enable_proxy: true +interval: 300 +work_thread_num: 8 +host: "0.0.0.0" +port: 8858 +chat_path: "/chat" providers: [] +enable_proxy: true +api_key: "" ip_white_list: [] +zeus: "http://127.0.0.1:8860" +flaresolverr: "http://127.0.0.1:8191/v1" diff --git a/chatgpt_microservice/include/cfg.h b/chatgpt_microservice/include/cfg.h index 7488932..75c2b82 100644 --- a/chatgpt_microservice/include/cfg.h +++ b/chatgpt_microservice/include/cfg.h @@ -1,24 +1,21 @@ #pragma once -#include - #include struct Config { std::string client_root_path; - std::size_t interval{300}; - std::size_t work_thread_num{std::thread::hardware_concurrency() == 1 ? 2 - : std::thread::hardware_concurrency() * 2}; - std::string host{"0.0.0.0"}; - std::string port{"8858"}; - std::string chat_path{"/chat"}; + std::size_t interval; + std::size_t work_thread_num; + std::string host; + std::string port; + std::string chat_path; std::vector providers; bool enable_proxy; std::string http_proxy; std::string api_key; std::vector ip_white_list; - std::string zeus{"http://127.0.0.1:8860"}; - std::string flaresolverr{"http://127.0.0.1:8191/v1"}; + std::string zeus; + std::string flaresolverr; }; YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy, http_proxy, api_key, ip_white_list, zeus, flaresolverr) diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index 28e9ba4..b0739df 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -33,6 +33,8 @@ public: boost::asio::awaitable aiChatOnline(std::shared_ptr, nlohmann::json); boost::asio::awaitable fakeGpt(std::shared_ptr, nlohmann::json); boost::asio::awaitable aura(std::shared_ptr, nlohmann::json); + boost::asio::awaitable gpt6(std::shared_ptr, nlohmann::json); + boost::asio::awaitable chatxyz(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index 4495455..9470be5 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -2097,3 +2097,178 @@ boost::asio::awaitable FreeGpt::aura(std::shared_ptr ch, nlohmann } co_return; } + +boost::asio::awaitable FreeGpt::gpt6(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + boost::system::error_code err{}; + std::unordered_multimap headers{ + {"Accept", "*/*"}, + {"content-type", "application/json"}, + {"Referer", "https://gpt6.ai/"}, + {"Origin", "https://gpt6.ai"}, + {"Sec-Fetch-Dest", "empty"}, + {"Sec-Fetch-Mode", "cors"}, + {"Sec-Fetch-Site", "cross-site"}, + {"TE", "trailers"}, + }; + std::string recv; + auto ret = Curl() + .setUrl("https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string chunk_str) mutable { + recv.append(chunk_str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.contains("content")) + continue; + auto fields = splitString(msg, "data: "); + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", fields.back()); + ch->try_send(err, std::format("json parse error: [{}]", fields.back())); + continue; + } + auto str = line_json["choices"][0]["delta"]["content"].get(); + if (!str.empty()) + ch->try_send(err, str); + } + }) + .setBody([&] { + constexpr std::string_view ask_json_str = R"({ + "prompts":[ + { + "role":"user", + "content":"Hello" + } + ], + "geoInfo":{ + "ip":"100.90.100.222", + "hostname":"ip-100-090-100-222.um36.pools.vodafone-ip.de", + "city":"Muenchen", + "region":"North Rhine-Westphalia", + "country":"DE", + "loc":"44.0910,5.5827", + "org":"AS3209 Vodafone GmbH", + "postal":"41507", + "timezone":"Europe/Berlin" + }, + "paid":false, + "character":{ + "textContent":"", + "id":"52690ad6-22e4-4674-93d4-1784721e9944", + "name":"GPT6", + "htmlContent":"" + } + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + ask_request["prompts"] = getConversationJson(json); + std::string ask_request_str = ask_request.dump(); + SPDLOG_INFO("request: [{}]", ask_request_str); + return ask_request_str; + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("{}", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + co_return; +} + +boost::asio::awaitable FreeGpt::chatxyz(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + boost::system::error_code err{}; + std::unordered_multimap headers{ + {"Accept", "text/event-stream"}, + {"content-type", "application/json"}, + {"Referer", "https://chat.3211000.xyz/"}, + {"Origin", "https://chat.3211000.xyz"}, + {"Sec-Fetch-Dest", "empty"}, + {"Sec-Fetch-Mode", "cors"}, + {"Sec-Fetch-Site", "same-origin"}, + {"TE", "trailers"}, + {"x-requested-with", "XMLHttpRequest"}, + }; + std::string recv; + auto ret = Curl() + .setUrl("https://chat.3211000.xyz/api/openai/v1/chat/completions") + .setProxy(m_cfg.http_proxy) + .setRecvHeadersCallback([](std::string) { return; }) + .setRecvBodyCallback([&](std::string chunk_str) mutable { + recv.append(chunk_str); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.contains("content")) + continue; + auto fields = splitString(msg, "data: "); + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", fields.back()); + ch->try_send(err, std::format("json parse error: [{}]", fields.back())); + continue; + } + if (line_json["choices"][0]["delta"]["content"].is_null()) + continue; + auto str = line_json["choices"][0]["delta"]["content"].get(); + if (!str.empty()) + ch->try_send(err, str); + } + }) + .setBody([&] { + constexpr std::string_view ask_json_str = R"({ + "messages":[ + { + "role":"system", + "content":"\nYou are ChatGPT, a large language model trained by OpenAI.\nCarefully heed the user's instructions.\nRespond using Markdown.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: 2023/12/26 14:12:34\nLatex inline: $x^2$ \nLatex block: $$e=mc^2$$\n\n" + }, + { + "role":"user", + "content":"hello" + } + ], + "stream":true, + "model":"gpt-3.5-turbo", + "temperature":0.5, + "presence_penalty":0, + "frequency_penalty":0, + "top_p":1 + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + ask_request["messages"][1]["content"] = prompt; + std::string ask_request_str = ask_request.dump(); + SPDLOG_INFO("request: [{}]", ask_request_str); + return ask_request_str; + }()) + .clearHeaders() + .setHttpHeaders(headers) + .perform(); + if (ret.has_value()) { + SPDLOG_ERROR("{}", ret.value()); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + } + co_return; +} diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp index 1073d98..fbc9186 100644 --- a/chatgpt_microservice/src/main.cpp +++ b/chatgpt_microservice/src/main.cpp @@ -39,36 +39,8 @@ void setEnvironment(auto& cfg) { if (!upper_http_proxy.empty()) cfg.http_proxy = std::move(upper_http_proxy); } - if (auto [chat_path] = getEnv("CHAT_PATH"); !chat_path.empty()) { - cfg.chat_path = std::move(chat_path); - } if (cfg.chat_path.back() == '/') cfg.chat_path.pop_back(); - if (auto [port] = getEnv("PORT"); !port.empty()) - cfg.port = std::move(port); - if (auto [host] = getEnv("HOST"); !host.empty()) - cfg.host = std::move(host); - if (auto [work_thread_num] = getEnv("WORK_THREAD_NUM"); !work_thread_num.empty()) - cfg.work_thread_num = std::atol(work_thread_num.c_str()); - if (auto [providers] = getEnv("PROVIDERS"); !providers.empty()) { - nlohmann::json providers_list = nlohmann::json::parse(providers, nullptr, false); - if (!providers_list.is_discarded()) - cfg.providers = providers_list.get>(); - } - if (auto [api_key] = getEnv("API_KEY"); !api_key.empty()) - cfg.api_key = std::move(api_key); - if (auto [interval] = getEnv("INTERVAL"); !interval.empty()) - cfg.interval = std::atol(interval.c_str()); - // export IP_WHITE_LIST="[\"127.0.0.1\",\"192.168.1.1\"]" - if (auto [ip_white_list_str] = getEnv("IP_WHITE_LIST"); !ip_white_list_str.empty()) { - nlohmann::json ip_white_list = nlohmann::json::parse(ip_white_list_str, nullptr, false); - if (!ip_white_list.is_discarded()) - cfg.ip_white_list = ip_white_list.get>(); - } - if (auto [zeus] = getEnv("ZEUS"); !zeus.empty()) - cfg.zeus = std::move(zeus); - if (auto [flaresolverr] = getEnv("FLARESOLVERR"); !flaresolverr.empty()) - cfg.flaresolverr = std::move(flaresolverr); } std::string createIndexHtml(const std::string& file, const Config& cfg) { @@ -321,7 +293,7 @@ int main(int, char** argv) { ScopeExit cleanup{[=] { curl_global_cleanup(); }}; spdlog::set_pattern("[%Y-%m-%d %H:%M:%S.%e][thread %t][%!][%s:%#][%l] %v"); - auto [config, error] = yaml_cpp_struct::from_yaml(argv[1]); + auto [config, error] = yaml_cpp_struct::from_yaml_env(argv[1], ""); if (!config) { SPDLOG_ERROR("{}", error); return EXIT_FAILURE; @@ -350,6 +322,8 @@ int main(int, char** argv) { ADD_METHOD("gpt-3.5-turbo-stream-AiChatOnline", FreeGpt::aiChatOnline); ADD_METHOD("gpt-3.5-turbo-stream-fakeGpt", FreeGpt::fakeGpt); ADD_METHOD("gpt-3.5-turbo-stream-aura", FreeGpt::aura); + ADD_METHOD("gpt6", FreeGpt::gpt6); + ADD_METHOD("gpt-3.5-turbo-stream-chatxyz", FreeGpt::chatxyz); SPDLOG_INFO("active provider:"); for (auto& [provider, _] : gpt_function) diff --git a/chatgpt_microservice/xmake.lua b/chatgpt_microservice/xmake.lua index a0234cb..f30b4cc 100644 --- a/chatgpt_microservice/xmake.lua +++ b/chatgpt_microservice/xmake.lua @@ -6,7 +6,7 @@ add_repositories("my_private_repo https://github.com/fantasy-peak/xmake-repo.git add_requires("openssl", {system = false}) add_requires("zlib", {system = false}) -add_requires("yaml_cpp_struct", "nlohmann_json", "spdlog", "inja", "plusaes", "concurrentqueue") +add_requires("yaml_cpp_struct v1.0.4", "nlohmann_json", "spdlog", "inja", "plusaes", "concurrentqueue") add_requires("boost", {configs = {iostreams = true}}) set_languages("c++23")