diff --git a/chatgpt_microservice/README.md b/chatgpt_microservice/README.md
index 9f9e3fd..b981a7b 100644
--- a/chatgpt_microservice/README.md
+++ b/chatgpt_microservice/README.md
@@ -1,5 +1,7 @@
# Cpp FreeGPT WebUI
+[](https://github.com/fantasy-peak/cpp-freegpt-webui/actions) [](https://github.com/fantasy-peak/cpp-freegpt-webui/actions)
+
## GPT 3.5/4
NOT REQUIRE ANY API KEY ❌🔑
@@ -29,9 +31,7 @@ To run the application, run the following command:
1. Check local g++ version, need g++ version >= gcc version 13.1.0 (GCC)
2. install xmake
-wget https://github.com/xmake-io/xmake/releases/download/v2.8.2/xmake-v2.8.2.xz.run
-chmod 777 xmake-v2.8.2.xz.run
-./xmake-v2.8.2.xz.run
+curl -kfsSL https://xmake.io/shget.text | bash -s v2.8.3
source ~/.xmake/profile
3. install libcurl-impersonate, ubuntu (apt-get install libcurl4-openssl-dev) centos7 (yum install libcurl-devel.x86_64)
@@ -64,25 +64,52 @@ docker pull fantasypeak/freegpt:latest
Run the application using Docker:
```
-docker run --rm -p 8858:8858 -it --name freegpt fantasypeak/freegpt:latest
-// OR
-docker run --rm --net=host -it --name freegpt fantasypeak/freegpt:latest
-// use http_proxy
-docker run --rm -p 8858:8858 -it --name freegpt -e HTTP_PROXY=http://127.0.0.1:8080 -e CHAT_PATH=/chat fantasypeak/freegpt:latest
-// set active providers
-docker run --rm -p 8858:8858 -it --name freegpt -e CHAT_PATH=/chat -e PROVIDERS="[\"gpt-4-ChatgptAi\",\"gpt-3.5-turbo-stream-DeepAi\"]" fantasypeak/freegpt:latest
-// enable ip white list function
-docker run --rm -p 8858:8858 -it --name freegpt -e IP_WHITE_LIST="[\"127.0.0.1\",\"192.168.1.1\"]" fantasypeak/freegpt:latest
+docker run -it --rm \
+ -p 8858:8858 \
+ --name freegpt \
+ fantasypeak/freegpt:latest
+```
+Run the application(use http proxy) using Docker:
+```
+docker run --rm -it \
+ -p 8858:8858 \
+ --name freegpt \
+ -e HTTP_PROXY=http://127.0.0.1:8080 \
+ fantasypeak/freegpt:latest
+```
+
+Configurable environment variables
+```
+01. CHAT_PATH=/chat
+02. HTTP_PROXY=http://127.0.0.1:8080
+03. PROVIDERS="[\"gpt-4-ChatgptAi\",\"gpt-3.5-turbo-stream-DeepAi\"]"
+04. IP_WHITE_LIST="[\"127.0.0.1\",\"192.168.1.1\"]"
+05. PORT=8858
+06. HOST=0.0.0.0
+07. WORK_THREAD_NUM=8
+08. INTERVAL=300
+09. ZEUS=http://127.0.0.1:8860
+10. FLARESOLVERR=http://127.0.0.1:8191/v1
```
### Start the Zeus Service [optional]
This is not necessary, Zeus is a cpp-freegpt-webui auxiliary service, because some provider needs to perform specific operations such as get cookies and refreshing web pages etc.
If you need to use these specific providers, you need to start it(Zeus Docker)
+
+Start zeus service
```
-docker pull fantasypeak/freegpt-zeus:latest
-docker run --rm --net=host -it --name zeus fantasypeak/freegpt-zeus:latest
-docker pull fantasypeak/freegpt:latest
-docker run --rm --net=host -it --name freegpt fantasypeak/freegpt:latest
+docker run -d \
+ --name=zeus \
+ -p 8860:8860 \
+ --rm \
+ fantasypeak/freegpt-zeus:latest
+```
+Start the application
+```
+docker run -it --rm \
+ --net=host \
+ --name freegpt \
+ fantasypeak/freegpt:latest
```
### Start the flaresolverr docker [optional]
@@ -97,9 +124,13 @@ docker run -d \
```
### Call OpenAi Api
+It supports calling OpenAI's API, but need set API_KEY
```
-// It supports calling OpenAI's API, but need set API_KEY
-docker run --rm -p 8858:8858 -it --name freegpt -e CHAT_PATH=/chat -e API_KEY=a40f22f2-c1a2-4b1d-a47f-55ae1a7ddbed fantasypeak/freegpt:latest
+docker run --rm -it \
+ -p 8858:8858 \
+ --name freegpt \
+ -e API_KEY=a40f22f2-c1a2-4b1d-a47f-55ae1a7ddbed \
+ fantasypeak/freegpt:latest
```
### WebUI
diff --git a/chatgpt_microservice/include/cfg.h b/chatgpt_microservice/include/cfg.h
index 3015065..18fed4c 100644
--- a/chatgpt_microservice/include/cfg.h
+++ b/chatgpt_microservice/include/cfg.h
@@ -1,11 +1,13 @@
#pragma once
+#include
+
#include
struct Config {
std::string client_root_path;
std::size_t interval{300};
- std::size_t work_thread_num{8};
+ std::size_t work_thread_num{std::thread::hardware_concurrency() * 2};
std::string host{"0.0.0.0"};
std::string port{"8858"};
std::string chat_path{"/chat"};
diff --git a/chatgpt_microservice/include/helper.hpp b/chatgpt_microservice/include/helper.hpp
index 9c6390b..b2368e2 100644
--- a/chatgpt_microservice/include/helper.hpp
+++ b/chatgpt_microservice/include/helper.hpp
@@ -101,3 +101,24 @@ inline std::string createUuidString() {
static thread_local boost::uuids::random_generator gen;
return boost::uuids::to_string(gen());
}
+
+// clang-format off
+namespace detail {
+
+template
+struct to_helper {};
+
+template
+ requires std::convertible_to, typename Container::value_type>
+Container operator|(R&& r, to_helper) {
+ return Container{r.begin(), r.end()};
+}
+
+} // namespace detail
+
+template
+ requires(!std::ranges::view)
+inline auto to() {
+ return detail::to_helper{};
+}
+// clang-format on
diff --git a/chatgpt_microservice/src/free_gpt.cpp b/chatgpt_microservice/src/free_gpt.cpp
index 7eeddb5..9a1ed95 100644
--- a/chatgpt_microservice/src/free_gpt.cpp
+++ b/chatgpt_microservice/src/free_gpt.cpp
@@ -21,27 +21,6 @@
namespace {
-// clang-format off
-namespace detail {
-
-template
-struct to_helper {};
-
-template
- requires std::convertible_to, typename Container::value_type>
-Container operator|(R&& r, to_helper) {
- return Container{r.begin(), r.end()};
-}
-
-} // namespace detail
-
-template
- requires(!std::ranges::view)
-inline auto to() {
- return detail::to_helper{};
-}
-// clang-format on
-
std::string md5(const std::string& str, bool reverse = true) {
unsigned char hash[MD5_DIGEST_LENGTH];
@@ -391,6 +370,8 @@ public:
if (m_curl)
curl_easy_cleanup(m_curl);
}
+ Curl(const Curl&) = delete;
+ Curl& operator=(const Curl&) = delete;
auto& setUrl(std::string_view url) {
m_url = url;
@@ -2245,75 +2226,45 @@ boost::asio::awaitable FreeGpt::llama2(std::shared_ptr ch, nlohma
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get();
- struct Input {
- std::shared_ptr ch;
- std::string recv;
+ static std::unordered_multimap headers{
+ {"Accept", "*/*"},
+ {"origin", "https://www.llama2.ai"},
+ {"referer", "https://www.llama2.ai/"},
+ {"Content-Type", "text/plain;charset=UTF-8"},
};
- Input input;
+ auto ret = Curl()
+ .setUrl("https://www.llama2.ai/api")
+ .setProxy(m_cfg.http_proxy)
+ .setRecvHeadersCallback([](std::string) { return; })
+ .setRecvBodyCallback([&](std::string str) mutable {
+ boost::asio::post(ch->get_executor(), [=, str = std::move(str)] { ch->try_send(err, str); });
+ return;
+ })
+ .setBody([&] {
+ constexpr std::string_view ask_json_str = R"({
+ "prompt":"[INST] hello [/INST]\n[INST] hello [/INST]\n",
+ "version":"d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381",
+ "systemPrompt":"You are a helpful assistant.",
+ "temperature":0.75,
+ "topP":0.9,
+ "maxTokens":800,
+ "image":null,
+ "audio":null
+ })";
+ nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
+ ask_request["prompt"] = std::format("[INST] {} [/INST]\n", prompt);
+ std::string ask_request_str = ask_request.dump();
+ SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
+ return ask_request_str;
+ }())
+ .clearHeaders()
+ .setHttpHeaders(headers)
+ .perform();
- CURLcode res;
- CURL* curl = curl_easy_init();
- if (!curl) {
- auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
- co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
- ch->try_send(err, error_info);
- co_return;
- }
- ScopeExit auto_exit{[=] { curl_easy_cleanup(curl); }};
-
- auto ret = sendHttpRequest(CurlHttpRequest{
- .curl = curl,
- .url = "https://www.llama2.ai/api",
- .http_proxy = m_cfg.http_proxy,
- .cb = [](void* contents, size_t size, size_t nmemb, void* userp) mutable -> size_t {
- auto input_ptr = static_cast(userp);
- std::string data{(char*)contents, size * nmemb};
- auto& [ch, recv] = *input_ptr;
- boost::asio::post(ch->get_executor(), [=] {
- boost::system::error_code err{};
- ch->try_send(err, data);
- });
- return size * nmemb;
- },
- .input = [&] -> void* {
- input.recv.clear();
- input.ch = ch;
- return &input;
- }(),
- .headers = [&] -> auto& {
- static std::unordered_map headers{
- {"Accept", "*/*"},
- {"origin", "https://www.llama2.ai"},
- {"referer", "https://www.llama2.ai/"},
- {"Content-Type", "text/plain;charset=UTF-8"},
- };
- return headers;
- }(),
- .body = [&] -> std::string {
- constexpr std::string_view ask_json_str = R"({
- "prompt":"[INST] hello [/INST]\n[INST] hello [/INST]\n",
- "version":"d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381",
- "systemPrompt":"You are a helpful assistant.",
- "temperature":0.75,
- "topP":0.9,
- "maxTokens":800,
- "image":null,
- "audio":null
- })";
- nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
- ask_request["prompt"] = std::format("[INST] {} [/INST]\n", prompt);
- std::string ask_request_str = ask_request.dump();
- SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
- return ask_request_str;
- }(),
- .response_header_ptr = nullptr,
- .expect_response_code = 200,
- .ssl_verify = false,
- });
if (ret) {
+ SPDLOG_ERROR("https://www.llama2.ai/api: [{}]", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
- co_return;
}
co_return;
}
@@ -2418,102 +2369,77 @@ boost::asio::awaitable FreeGpt::geekGpt(std::shared_ptr ch, nlohm
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
-
- auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get();
-
- struct Input {
- std::shared_ptr ch;
- std::string recv;
+ static std::unordered_multimap headers{
+ {"Accept", "*/*"},
+ {"authority", "ai.fakeopen.com"},
+ {"content-type", "application/json"},
+ {"referer", "https://chat.geekgpt.org/"},
+ {"origin", "https://chat.geekgpt.org"},
+ {"sec-ch-ua", R"("Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117")"},
+ {"sec-ch-ua-mobile", R"(?0)"},
+ {"sec-ch-ua-platform", R"("macOS")"},
+ {"cache-control", "no-cache"},
+ {"pragma", "no-cache"},
+ {"authorization", "Bearer pk-this-is-a-real-free-pool-token-for-everyone"},
};
- Input input;
-
- CURLcode res;
- CURL* curl = curl_easy_init();
- if (!curl) {
- auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
- co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
- ch->try_send(err, error_info);
- co_return;
- }
- ScopeExit auto_exit{[=] { curl_easy_cleanup(curl); }};
-
- auto ret = sendHttpRequest(CurlHttpRequest{
- .curl = curl,
- .url = "https://ai.fakeopen.com/v1/chat/completions",
- .http_proxy = m_cfg.http_proxy,
- .cb = [](void* contents, size_t size, size_t nmemb, void* userp) mutable -> size_t {
- auto input_ptr = static_cast(userp);
- std::string data{(char*)contents, size * nmemb};
- auto& [ch, recv] = *input_ptr;
- recv.append(data);
- while (true) {
- auto position = recv.find("\n");
- if (position == std::string::npos)
- break;
- auto msg = recv.substr(0, position + 1);
- recv.erase(0, position + 1);
- msg.pop_back();
- if (msg.empty() || !msg.contains("content"))
- continue;
- auto fields = splitString(msg, "data: ");
- boost::system::error_code err{};
- nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false);
- if (line_json.is_discarded()) {
- SPDLOG_ERROR("json parse error: [{}]", fields.back());
- boost::asio::post(ch->get_executor(), [=] {
- ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
- });
- continue;
- }
- auto str = line_json["choices"][0]["delta"]["content"].get();
- if (!str.empty() && str != "[DONE]")
- boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
- }
- return size * nmemb;
- },
- .input = [&] -> void* {
- input.recv.clear();
- input.ch = ch;
- return &input;
- }(),
- .headers = [&] -> auto& {
- static std::unordered_map headers{
- {"Accept", "*/*"},
- {"origin", "https://chat.geekgpt.org"},
- {"referer", "https://chat.geekgpt.org/"},
- {"Content-Type", "application/json"},
- {"authority", "ai.fakeopen.com"},
- {"authorization", "Bearer pk-this-is-a-real-free-pool-token-for-everyone"},
- };
- return headers;
- }(),
- .body = [&] -> std::string {
- constexpr std::string_view ask_json_str = R"({
- "messages": [{
- "role": "user",
- "content": "hello"
- }],
- "model": "gpt-3.5-turbo",
- "temperature": 0.9,
- "presence_penalty": 0,
- "top_p": 1,
- "frequency_penalty": 0,
- "stream": true
- })";
- nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
- ask_request["messages"] = getConversationJson(json);
- std::string ask_request_str = ask_request.dump();
- SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
- return ask_request_str;
- }(),
- .response_header_ptr = nullptr,
- .expect_response_code = 200,
- .ssl_verify = false,
- });
- if (ret) {
+ std::string recv;
+ auto ret = Curl()
+ .setUrl("https://ai.fakeopen.com/v1/chat/completions")
+ .setProxy(m_cfg.http_proxy)
+ .setRecvHeadersCallback([](std::string) { return; })
+ .setRecvBodyCallback([&](std::string str) mutable {
+ recv.append(str);
+ while (true) {
+ auto position = recv.find("\n");
+ if (position == std::string::npos)
+ break;
+ auto msg = recv.substr(0, position + 1);
+ recv.erase(0, position + 1);
+ msg.pop_back();
+ if (msg.empty() || !msg.contains("content"))
+ continue;
+ auto fields = splitString(msg, "data: ");
+ boost::system::error_code err{};
+ nlohmann::json line_json = nlohmann::json::parse(fields.back(), nullptr, false);
+ if (line_json.is_discarded()) {
+ SPDLOG_ERROR("json parse error: [{}]", fields.back());
+ boost::asio::post(ch->get_executor(), [=] {
+ ch->try_send(err, std::format("json parse error: [{}]", fields.back()));
+ });
+ continue;
+ }
+ auto str = line_json["choices"][0]["delta"]["content"].get();
+ if (!str.empty() && str != "[DONE]")
+ boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
+ }
+ return;
+ })
+ .setBody([&] {
+ constexpr std::string_view ask_json_str = R"({
+ "messages": [{
+ "role": "user",
+ "content": "hello"
+ }],
+ "model": "gpt-3.5-turbo",
+ "temperature": 0.9,
+ "presence_penalty": 0,
+ "top_p": 1,
+ "frequency_penalty": 0,
+ "stream": true
+ })";
+ nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
+ ask_request["messages"] = getConversationJson(json);
+ std::string ask_request_str = ask_request.dump();
+ SPDLOG_INFO("request: [{}]", ask_request_str);
+ return ask_request_str;
+ }())
+ .clearHeaders()
+ .setHttpHeaders(headers)
+ .perform();
+ if (ret.has_value()) {
+ SPDLOG_ERROR("https://ai.fakeopen.com/v1/chat/completions: [{}]", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
- co_return;
}
co_return;
}
diff --git a/chatgpt_microservice/src/main.cpp b/chatgpt_microservice/src/main.cpp
index 4487ea5..53e5863 100644
--- a/chatgpt_microservice/src/main.cpp
+++ b/chatgpt_microservice/src/main.cpp
@@ -316,7 +316,7 @@ boost::asio::awaitable doSession(boost::asio::ip::tcp::acceptor& acceptor,
co_return;
}
-int main(int argc, char** argv) {
+int main(int, char** argv) {
curl_global_init(CURL_GLOBAL_ALL);
ScopeExit cleanup{[=] { curl_global_cleanup(); }};