mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-12-15 16:10:39 +03:00
add GitHub callback (#38)
* add gpt-3.5-turbo-stream-GptChatly provider * add GitHub callback
This commit is contained in:
@@ -1,7 +1,8 @@
|
||||
FROM ubuntu:23.04
|
||||
|
||||
#use --build-arg LIB_DIR=/usr/lib for arm64 cpus
|
||||
ARG LIB_DIR=/usr/lib64
|
||||
ARG LIB_DIR=/local/lib
|
||||
RUN mkdir -p /local/lib
|
||||
|
||||
ENV LD_LIBRARY_PATH=$LIB_DIR:$LD_LIBRARY_PATH
|
||||
ENV LIBRARY_PATH=$LIB_DIR:$LIBRARY_PATH
|
||||
@@ -10,9 +11,9 @@ RUN apt-get update -y
|
||||
RUN apt-get install -y libcurl4-openssl-dev wget libnss3 nss-plugin-pem ca-certificates
|
||||
# RUN strings /lib/$(arch)-linux-gnu/libstdc++.so.6 | grep GLIBCXX_3.4
|
||||
|
||||
RUN wget https://github.com/lwthiker/curl-impersonate/releases/download/v0.5.4/libcurl-impersonate-v0.5.4.$(arch)-linux-gnu.tar.gz
|
||||
RUN mv libcurl-impersonate-v0.5.4.$(arch)-linux-gnu.tar.gz $LIB_DIR
|
||||
RUN cd $LIB_DIR && tar -xvf libcurl-impersonate-v0.5.4.$(arch)-linux-gnu.tar.gz && rm -rf libcurl-impersonate-v0.5.4.$(arch)-linux-gnu.tar.gz
|
||||
RUN wget https://github.com/lwthiker/curl-impersonate/releases/download/v0.6.0-alpha.1/libcurl-impersonate-v0.6.0-alpha.1.$(arch)-linux-gnu.tar.gz
|
||||
RUN mv libcurl-impersonate-v0.6.0-alpha.1.$(arch)-linux-gnu.tar.gz $LIB_DIR
|
||||
RUN cd $LIB_DIR && tar -xvf libcurl-impersonate-v0.6.0-alpha.1.$(arch)-linux-gnu.tar.gz && rm -rf libcurl-impersonate-v0.6.0-alpha.1.$(arch)-linux-gnu.tar.gz
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -35,10 +35,10 @@ chmod 777 xmake-v2.8.2.xz.run
|
||||
source ~/.xmake/profile
|
||||
|
||||
3. install libcurl-impersonate, ubuntu (apt-get install libcurl4-openssl-dev) centos7 (yum install libcurl-devel.x86_64)
|
||||
wget https://github.com/lwthiker/curl-impersonate/releases/download/v0.5.4/libcurl-impersonate-v0.5.4.x86_64-linux-gnu.tar.gz
|
||||
sudo mv libcurl-impersonate-v0.5.4.x86_64-linux-gnu.tar.gz /usr/lib64
|
||||
wget https://github.com/lwthiker/curl-impersonate/releases/download/v0.6.0-alpha.1/libcurl-impersonate-v0.6.0-alpha.1.x86_64-linux-gnu.tar.gz
|
||||
sudo mv libcurl-impersonate-v0.6.0-alpha.1.x86_64-linux-gnu.tar.gz /usr/lib64
|
||||
cd /usr/lib64
|
||||
sudo tar -xvf libcurl-impersonate-v0.5.4.x86_64-linux-gnu.tar.gz
|
||||
sudo tar -xvf libcurl-impersonate-v0.6.0-alpha.1.x86_64-linux-gnu.tar.gz
|
||||
export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH
|
||||
export LIBRARY_PATH=/usr/lib64:$LIBRARY_PATH
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ public:
|
||||
boost::asio::awaitable<void> gptForLove(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGptDemo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> llama2(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptChatly(std::shared_ptr<Channel>, nlohmann::json);
|
||||
|
||||
private:
|
||||
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>
|
||||
|
||||
@@ -2892,3 +2892,82 @@ boost::asio::awaitable<void> FreeGpt::llama2(std::shared_ptr<Channel> ch, nlohma
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::gptChatly(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
boost::system::error_code err{};
|
||||
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
struct Input {
|
||||
std::shared_ptr<Channel> ch;
|
||||
std::string recv;
|
||||
};
|
||||
Input input;
|
||||
|
||||
CURLcode res;
|
||||
CURL* curl = curl_easy_init();
|
||||
if (!curl) {
|
||||
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, error_info);
|
||||
co_return;
|
||||
}
|
||||
ScopeExit auto_exit{[=] { curl_easy_cleanup(curl); }};
|
||||
|
||||
auto ret = sendHttpRequest(CurlHttpRequest{
|
||||
.curl = curl,
|
||||
.url = "https://gptchatly.com/fetch-response",
|
||||
.http_proxy = m_cfg.http_proxy,
|
||||
.cb = [](void* contents, size_t size, size_t nmemb, void* userp) mutable -> size_t {
|
||||
boost::system::error_code err{};
|
||||
auto input_ptr = static_cast<Input*>(userp);
|
||||
std::string data{(char*)contents, size * nmemb};
|
||||
auto& [ch, recv] = *input_ptr;
|
||||
nlohmann::json line_json = nlohmann::json::parse(data, nullptr, false);
|
||||
if (line_json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", data);
|
||||
boost::asio::post(ch->get_executor(),
|
||||
[=] { ch->try_send(err, std::format("json parse error: [{}]", data)); });
|
||||
return size * nmemb;
|
||||
}
|
||||
auto str = line_json["chatGPTResponse"].get<std::string>();
|
||||
boost::asio::post(ch->get_executor(), [=] { ch->try_send(err, str); });
|
||||
return size * nmemb;
|
||||
},
|
||||
.input = [&] -> void* {
|
||||
input.recv.clear();
|
||||
input.ch = ch;
|
||||
return &input;
|
||||
}(),
|
||||
.headers = [&] -> auto& {
|
||||
static std::unordered_map<std::string, std::string> headers{
|
||||
{"Accept", "*/*"},
|
||||
{"origin", "https://gptchatly.com"},
|
||||
{"referer", "https://gptchatly.com/"},
|
||||
{"Content-Type", "application/json"},
|
||||
};
|
||||
return headers;
|
||||
}(),
|
||||
.body = [&] -> std::string {
|
||||
constexpr std::string_view ask_json_str = R"({
|
||||
"past_conversations": ""
|
||||
})";
|
||||
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||
ask_request["past_conversations"] = getConversationJson(json);
|
||||
std::string ask_request_str = ask_request.dump();
|
||||
SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
|
||||
return ask_request_str;
|
||||
}(),
|
||||
.response_header_ptr = nullptr,
|
||||
.expect_response_code = 200,
|
||||
.ssl_verify = false,
|
||||
});
|
||||
if (ret) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, ret.value());
|
||||
co_return;
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
@@ -352,6 +352,7 @@ int main(int argc, char** argv) {
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-gptforlove", FreeGpt::gptForLove);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-ChatgptDemo", FreeGpt::chatGptDemo);
|
||||
ADD_METHOD("llama2", FreeGpt::llama2);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-GptChatly", FreeGpt::gptChatly);
|
||||
|
||||
SPDLOG_INFO("active provider:");
|
||||
for (auto& [provider, _] : gpt_function)
|
||||
|
||||
@@ -3,7 +3,7 @@ FROM rockylinux:9.2
|
||||
RUN dnf upgrade --refresh -y
|
||||
|
||||
RUN dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-next-release-latest-9.noarch.rpm -y
|
||||
RUN dnf install chromium -y
|
||||
RUN dnf install chromium nodejs -y
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
Reference in New Issue
Block a user