diff --git a/bot_microservice/constants.py b/bot_microservice/constants.py index 620e5f8..8d88182 100644 --- a/bot_microservice/constants.py +++ b/bot_microservice/constants.py @@ -54,11 +54,8 @@ class ChatGptModelsEnum(StrEnum): gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online" gpt_3_5_turbo_stream_gptalk = "gpt-3.5-turbo-stream-gptalk" llama2 = "llama2" + gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt" gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove" - gpt_3_5_turbo_16k_stream_Ylokh = "gpt-3.5-turbo-16k-stream-Ylokh" - gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum" - gpt_3_5_turbo_stream_GptChatly = "gpt-3.5-turbo-stream-GptChatly" - gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo" @classmethod def values(cls) -> set[str]: @@ -68,8 +65,4 @@ class ChatGptModelsEnum(StrEnum): def _deprecated() -> set[str]: return { "gpt-3.5-turbo-stream-gptforlove", - "gpt-3.5-turbo-stream-aivvm", - "gpt-3.5-turbo-stream-GptChatly", - "gpt-3.5-turbo-stream-Vitalentum", - "gpt-3.5-turbo-16k-stream-Ylokh", } diff --git a/chatgpt_microservice/include/free_gpt.h b/chatgpt_microservice/include/free_gpt.h index 6be9e49..884d7b3 100644 --- a/chatgpt_microservice/include/free_gpt.h +++ b/chatgpt_microservice/include/free_gpt.h @@ -36,6 +36,7 @@ public: boost::asio::awaitable chatGptDemo(std::shared_ptr, nlohmann::json); boost::asio::awaitable llama2(std::shared_ptr, nlohmann::json); boost::asio::awaitable noowai(std::shared_ptr, nlohmann::json); + boost::asio::awaitable geekGpt(std::shared_ptr, nlohmann::json); private: boost::asio::awaitable, std::string>> diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp index 991ce19..5a141d4 100644 --- a/chatgpt_microservice/src/free_gpt.cpp +++ b/chatgpt_microservice/src/free_gpt.cpp @@ -2617,3 +2617,107 @@ boost::asio::awaitable FreeGpt::noowai(std::shared_ptr ch, nlohma } co_return; } + +boost::asio::awaitable FreeGpt::geekGpt(std::shared_ptr ch, nlohmann::json json) { + co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); + ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }}; + boost::system::error_code err{}; + + auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get(); + + struct Input { + std::shared_ptr ch; + std::string recv; + }; + Input input; + + CURLcode res; + CURL* curl = curl_easy_init(); + if (!curl) { + auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res)); + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, error_info); + co_return; + } + ScopeExit auto_exit{[=] { curl_easy_cleanup(curl); }}; + + auto ret = sendHttpRequest(CurlHttpRequest{ + .curl = curl, + .url = "https://ai.fakeopen.com/v1/chat/completions", + .http_proxy = m_cfg.http_proxy, + .cb = [](void* contents, size_t size, size_t nmemb, void* userp) mutable -> size_t { + auto input_ptr = static_cast(userp); + std::string data{(char*)contents, size * nmemb}; + auto& [ch, recv] = *input_ptr; + recv.append(data); + while (true) { + auto position = recv.find("\n"); + if (position == std::string::npos) + break; + auto msg = recv.substr(0, position + 1); + recv.erase(0, position + 1); + msg.pop_back(); + if (msg.empty() || !msg.contains("content")) + continue; + auto fields = splitString(msg, "data: "); + boost::system::error_code err{}; + nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false); + if (line_json.is_discarded()) { + SPDLOG_ERROR("json parse error: [{}]", fields.back()); + boost::asio::post(ch->get_executor(), [=] { + ch->try_send(err, std::format("json parse error: [{}]", fields.back())); + }); + continue; + } + auto str = line_json["choices"][0]["delta"]["content"].get(); + if (!str.empty() && str != "[DONE]") + boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); }); + } + return size * nmemb; + }, + .input = [&] -> void* { + input.recv.clear(); + input.ch = ch; + return &input; + }(), + .headers = [&] -> auto& { + static std::unordered_map headers{ + {"Accept", "*/*"}, + {"origin", "https://chat.geekgpt.org"}, + {"referer", "https://chat.geekgpt.org/"}, + {"Content-Type", "application/json"}, + {"authority", "ai.fakeopen.com"}, + {"authorization", "Bearer pk-this-is-a-real-free-pool-token-for-everyone"}, + }; + return headers; + }(), + .body = [&] -> std::string { + constexpr std::string_view ask_json_str = R"({ + "messages": [{ + "role": "user", + "content": "hello" + }], + "model": "gpt-3.5-turbo", + "temperature": 0.9, + "presence_penalty": 0, + "top_p": 1, + "frequency_penalty": 0, + "stream": true + })"; + nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); + ask_request["messages"] = getConversationJson(json); + std::string ask_request_str = ask_request.dump(); + SPDLOG_INFO("ask_request_str: [{}]", ask_request_str); + return ask_request_str; + }(), + .response_header_ptr = nullptr, + .expect_response_code = 200, + .ssl_verify = false, + }); + if (ret) { + co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable)); + ch->try_send(err, ret.value()); + co_return; + } + co_return; +} diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp index d1843a1..fdfc69d 100644 --- a/chatgpt_microservice/src/main.cpp +++ b/chatgpt_microservice/src/main.cpp @@ -350,6 +350,7 @@ int main(int argc, char** argv) { ADD_METHOD("gpt-3.5-turbo-stream-gptforlove", FreeGpt::gptForLove); ADD_METHOD("gpt-3.5-turbo-stream-ChatgptDemo", FreeGpt::chatGptDemo); ADD_METHOD("gpt-3.5-turbo-stream-noowai", FreeGpt::noowai); + ADD_METHOD("gpt-3.5-turbo-stream-GeekGpt", FreeGpt::geekGpt); ADD_METHOD("llama2", FreeGpt::llama2); SPDLOG_INFO("active provider:");