light refactoring (#35)

* update poetry lock

* simple refactoring

* move gpt-3.5-turbo-stream-aivvm to deprecated provider
This commit is contained in:
Dmitry Afanasyev
2023-10-12 12:12:05 +03:00
committed by GitHub
parent 7ef8d6e19d
commit 94b50f1b7c
14 changed files with 104 additions and 146 deletions

View File

@@ -1005,3 +1005,70 @@ boost::asio::awaitable<void> FreeGpt::chatGptDuo(std::shared_ptr<Channel> ch, nl
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
constexpr std::string_view host = "chat.aivvm.com";
constexpr std::string_view port = "443";
constexpr std::string_view user_agent{
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0)"};
boost::asio::ssl::context ctx(boost::asio::ssl::context::tls);
ctx.set_verify_mode(boost::asio::ssl::verify_none);
auto client = co_await createHttpClient(ctx, host, port);
if (!client.has_value()) {
SPDLOG_ERROR("createHttpClient: {}", client.error());
co_await ch->async_send(err, client.error(), use_nothrow_awaitable);
co_return;
}
auto& stream_ = client.value();
boost::beast::http::request<boost::beast::http::string_body> req{boost::beast::http::verb::post, "/api/chat", 11};
req.set(boost::beast::http::field::host, host);
req.set(boost::beast::http::field::user_agent, user_agent);
req.set("Accept", "*/*");
req.set("accept-language", "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2");
req.set("origin", "https://chat.aivvm.com");
req.set("referer", "https://chat.aivvm.com/zh");
req.set(boost::beast::http::field::content_type, "application/json");
req.set("sec-fetch-dest", "empty");
req.set("sec-fetch-mode", "cors");
req.set("sec-fetch-site", "same-origin");
req.set("DNT", "1");
constexpr std::string_view json_str = R"({
"model":{
"id":"gpt-3.5-turbo",
"name":"GPT-3.5",
"maxLength":12000,
"tokenLimit":4096
},
"messages":[
{
"role":"user",
"content":"hello"
}
],
"key":"",
"prompt":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"temperature":0.7
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
req.prepare_payload();
auto result = co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch](std::string str) {
boost::system::error_code err{};
if (!str.empty())
ch->try_send(err, str);
});
co_return;
}

View File

@@ -26,7 +26,6 @@ public:
boost::asio::awaitable<void> you(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> binjie(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatBase(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> aivvm(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> ylokh(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> vitalentum(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json);

View File

@@ -1471,73 +1471,6 @@ boost::asio::awaitable<void> FreeGpt::chatBase(std::shared_ptr<Channel> ch, nloh
co_return;
}
boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};
constexpr std::string_view host = "chat.aivvm.com";
constexpr std::string_view port = "443";
constexpr std::string_view user_agent{
R"(Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0)"};
boost::asio::ssl::context ctx(boost::asio::ssl::context::tls);
ctx.set_verify_mode(boost::asio::ssl::verify_none);
auto client = co_await createHttpClient(ctx, host, port);
if (!client.has_value()) {
SPDLOG_ERROR("createHttpClient: {}", client.error());
co_await ch->async_send(err, client.error(), use_nothrow_awaitable);
co_return;
}
auto& stream_ = client.value();
boost::beast::http::request<boost::beast::http::string_body> req{boost::beast::http::verb::post, "/api/chat", 11};
req.set(boost::beast::http::field::host, host);
req.set(boost::beast::http::field::user_agent, user_agent);
req.set("Accept", "*/*");
req.set("accept-language", "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2");
req.set("origin", "https://chat.aivvm.com");
req.set("referer", "https://chat.aivvm.com/zh");
req.set(boost::beast::http::field::content_type, "application/json");
req.set("sec-fetch-dest", "empty");
req.set("sec-fetch-mode", "cors");
req.set("sec-fetch-site", "same-origin");
req.set("DNT", "1");
constexpr std::string_view json_str = R"({
"model":{
"id":"gpt-3.5-turbo",
"name":"GPT-3.5",
"maxLength":12000,
"tokenLimit":4096
},
"messages":[
{
"role":"user",
"content":"hello"
}
],
"key":"",
"prompt":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"temperature":0.7
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
req.prepare_payload();
auto result = co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch](std::string str) {
boost::system::error_code err{};
if (!str.empty())
ch->try_send(err, str);
});
co_return;
}
boost::asio::awaitable<void> FreeGpt::ylokh(std::shared_ptr<Channel> ch, nlohmann::json json) {
boost::system::error_code err{};
ScopeExit auto_exit{[&] { ch->close(); }};

View File

@@ -341,7 +341,6 @@ int main(int argc, char** argv) {
ADD_METHOD("gpt-4-turbo-stream-you", FreeGpt::you);
ADD_METHOD("gpt-3-stream-binjie", FreeGpt::binjie);
ADD_METHOD("gpt-4-stream-ChatBase", FreeGpt::chatBase);
ADD_METHOD("gpt-3.5-turbo-stream-aivvm", FreeGpt::aivvm);
ADD_METHOD("gpt-3.5-turbo-16k-stream-Ylokh", FreeGpt::ylokh);
ADD_METHOD("gpt-3.5-turbo-stream-Vitalentum", FreeGpt::vitalentum);
ADD_METHOD("gpt-3.5-turbo-stream-GptGo", FreeGpt::gptGo);

View File

@@ -1,16 +1,9 @@
FROM ubuntu:23.04
FROM rockylinux:9.2
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
RUN apt-get update -y
RUN apt-get install -y python3/lunar python3.11-venv dbus-x11/lunar curl nodejs/lunar tree
RUN dnf upgrade --refresh -y
# install Chrome
# https://stackoverflow.com/questions/70955307/how-to-install-google-chrome-in-a-docker-container
RUN curl -LO https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
RUN apt-get install -y ./google-chrome-stable_current_amd64.deb
RUN rm google-chrome-stable_current_amd64.deb
# Check chrome version
RUN echo "Chrome: " && google-chrome --version
RUN dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-next-release-latest-9.noarch.rpm -y
RUN dnf install chromium -y
WORKDIR /app

View File

@@ -1,44 +1,4 @@
altgraph==0.17.4
attrs==23.1.0
black==23.9.1
blinker==1.6.2
browser-cookie3==0.19.1
certifi==2023.7.22
charset-normalizer==3.3.0
click==8.1.7
docopt==0.6.2
Flask==3.0.0
Flask-Cors==4.0.0
h11==0.14.0
idna==3.4
itsdangerous==2.1.2
jeepney==0.8.0
Jinja2==3.1.2
Js2Py==0.74
lz4==4.3.2
MarkupSafe==2.1.3
mypy-extensions==1.0.0
outcome==1.2.0
packaging==23.2
pathspec==0.11.2
pipreqs==0.4.13
platformdirs==3.11.0
pycryptodomex==3.19.0
PyExecJS==1.5.1
pyinstaller==6.0.0
pyinstaller-hooks-contrib==2023.9
pyjsparser==2.7.1
PySocks==1.7.1
requests==2.31.0
selenium==4.13.0
six==1.16.0
sniffio==1.3.0
sortedcontainers==2.4.0
trio==0.22.2
trio-websocket==0.11.1
typing_extensions==4.8.0
tzlocal==5.1
urllib3==2.0.6
selenium==4.14.0
Werkzeug==3.0.0
wsproto==1.2.0
yarg==0.1.9

View File

@@ -27,7 +27,7 @@ def deepai_refresh():
driver.get("https://deepai.org")
WebDriverWait(driver, 15)
cookies = driver.get_cookies()
print(cookies)
print(cookies, flush=True)
except Exception:
traceback.print_exc()
driver.quit()
@@ -60,6 +60,6 @@ if __name__ == "__main__":
thread.start()
port = os.getenv("PORT", "8860")
ip = os.getenv("IP", "0.0.0.0")
print(f"start zeus at {ip}:{port}")
print(f"start zeus at {ip}:{port}", flush=True)
server = ThreadedWSGIServer(ip, port, app)
server.serve_forever()