add zeus-Vercel and update html (#46)

This commit is contained in:
Dmitry Afanasyev 2023-10-27 18:37:25 +03:00 committed by GitHub
parent 2e7d4880e0
commit 11cfccbb01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 594 additions and 441 deletions

View File

@ -59,6 +59,7 @@ class ChatGptModelsEnum(StrEnum):
gpt_3_5_turbo_stream_FakeGpt = "gpt-3.5-turbo-stream-FakeGpt"
gpt_3_5_turbo_stream_GeekGpt = "gpt-3.5-turbo-stream-GeekGpt"
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
gpt_3_5_turbo_stream_Vercel = "gpt-3.5-turbo-stream-Vercel"
@classmethod
def values(cls) -> set[str]:

View File

@ -22,7 +22,7 @@ class ChatGptAdmin(ModelView, model=ChatGpt):
def create_admin(application: "Application") -> Admin:
admin = Admin(
title='Chat GPT admin',
title="Chat GPT admin",
app=application.fastapi_app,
engine=application.db.async_engine,
base_url=os.path.join(settings.URL_PREFIX, "admin"),

View File

@ -627,13 +627,12 @@ ul {
}
}
@media screen and (max-height: 640px) {
@media screen and (max-height: 640px) and (min-width: 990px) {
body {
height: 87vh
}
}
.shown {
display: flex;
}

View File

@ -57,7 +57,7 @@
</style>
<script src="{{chat_path}}/assets/js/highlight.min.js"></script>
<script src="{{chat_path}}/assets/js/highlightjs-copy.min.js"></script>
<script>window.conversation_id = {{chat_id}} </script>
<script>window.conversation_id = "{{chat_id}}" </script>
<title>ChatGPT</title>
</head>
<body>

File diff suppressed because it is too large Load Diff

View File

@ -38,6 +38,7 @@ public:
boost::asio::awaitable<void> geekGpt(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGptAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> fakeGpt(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> vercel(std::shared_ptr<Channel>, nlohmann::json);
private:
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>

View File

@ -3042,3 +3042,121 @@ boost::asio::awaitable<void> FreeGpt::fakeGpt(std::shared_ptr<Channel> ch, nlohm
co_return;
}
}
boost::asio::awaitable<void> FreeGpt::vercel(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
boost::system::error_code err{};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
auto create_random_number = [] {
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_int_distribution<int> distribution(99, 999);
int random_number = distribution(mt);
return random_number;
};
constexpr std::string_view user_agent_str{
R"(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{}.{} Safari/537.36)"};
std::unordered_multimap<std::string, std::string> headers{
{"Accept", "*/*"},
{"authority", "sdk.vercel.ai"},
{"content-type", "application/json"},
{"referer", "https://sdk.vercel.ai/"},
{"origin", "https://sdk.vercel.ai"},
{"sec-ch-ua", R"("Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117")"},
{"sec-ch-ua-mobile", R"(?0)"},
{"sec-ch-ua-platform", R"("macOS")"},
{"cache-control", "no-cache"},
{"pragma", "no-cache"},
};
headers.emplace("user-agent", std::format(user_agent_str, create_random_number(), create_random_number()));
std::string recv;
Curl curl;
auto ret = curl.setUrl("https://sdk.vercel.ai/openai.jpeg")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
recv.append(str);
return;
})
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_ERROR("https://sdk.vercel.ai/openai.jpeg: [{}]", ret.value());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, ret.value());
co_return;
}
nlohmann::json request;
request["data"] = std::move(recv);
recv.clear();
auto vercel_rsp = callZeus(std::format("{}/vercel", m_cfg.zeus), request.dump());
if (!vercel_rsp.has_value()) {
SPDLOG_ERROR("callZeus vercel error: {}", vercel_rsp.error());
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, vercel_rsp.error());
co_return;
}
headers.erase("custom-encoding");
headers.erase("user-agent");
headers.emplace("custom-encoding", vercel_rsp.value()["data"]);
headers.emplace("user-agent", std::format(user_agent_str, create_random_number(), create_random_number()));
for (int i = 0; i < 20; i++) {
auto ret =
curl.setUrl("https://sdk.vercel.ai/api/generate")
.setProxy(m_cfg.http_proxy)
.setRecvHeadersCallback([](std::string) { return; })
.setRecvBodyCallback([&](std::string str) mutable {
if (str == "Internal Server Error" || str == "Rate limit exceeded") {
SPDLOG_WARN("vercel: [{}]", str);
return;
}
boost::asio::post(ch->get_executor(), [=, str = std::move(str)] { ch->try_send(err, str); });
return;
})
.setBody([&] {
constexpr std::string_view json_str = R"({
"model":"openai:gpt-3.5-turbo",
"messages":[
{
"role":"user",
"content":"hello"
}
],
"playgroundId":"403bce4c-7eb6-47b0-b1b5-0cb6b2469f70",
"chatIndex":0,
"temperature":0.7,
"maximumLength":4096,
"topP":1,
"topK":1,
"presencePenalty":1,
"frequencyPenalty":1,
"stopSequences":[]
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
request["playgroundId"] = createUuidString();
SPDLOG_INFO("request: [{}]", request.dump(2));
return request.dump();
}())
.clearHeaders()
.setHttpHeaders(headers)
.perform();
if (ret.has_value()) {
SPDLOG_WARN("https://sdk.vercel.ai/api/generate: [{}]", ret.value());
co_await timeout(std::chrono::seconds(2));
continue;
}
co_return;
}
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, "call sdk.vercel.ai error");
co_return;
}

View File

@ -353,6 +353,7 @@ int main(int argc, char** argv) {
ADD_METHOD("llama2", FreeGpt::llama2);
ADD_METHOD("gpt-3.5-turbo-stream-chatGptAi", FreeGpt::chatGptAi);
ADD_METHOD("gpt-3.5-turbo-stream-FakeGpt", FreeGpt::fakeGpt);
ADD_METHOD("gpt-3.5-turbo-stream-Vercel", FreeGpt::vercel);
SPDLOG_INFO("active provider:");
for (auto& [provider, _] : gpt_function)

View File

@ -2,3 +2,4 @@ Flask==3.0.0
PyExecJS==1.5.1
selenium==4.14.0
Werkzeug==3.0.0
requests==2.31.0

View File

@ -1,3 +1,4 @@
import base64
import json
import os
import threading
@ -5,7 +6,7 @@ import time
import traceback
import execjs
from flask import Flask
from flask import Flask, request
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from werkzeug.serving import ThreadedWSGIServer
@ -55,6 +56,26 @@ return o.toString()
return json.dumps(dict)
# curl -X POST -d '{}' -H "Content-Type: application/json" http://127.0.0.1:8860/vercel
@app.route("/vercel", methods=["POST"])
def get_anti_bot_token():
request_body = json.loads(request.data)
raw_data = json.loads(base64.b64decode(request_body["data"], validate=True))
js_script = """const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
return (%s)(%s)""" % (
raw_data["c"],
raw_data["a"],
)
raw_token = json.dumps(
{"r": execjs.compile(js_script).call(""), "t": raw_data["t"]},
separators=(",", ":"),
)
dict = {"data": base64.b64encode(raw_token.encode("utf-16le")).decode()}
return json.dumps(dict)
if __name__ == "__main__":
thread = threading.Thread(target=deepai_refresh)
thread.start()