add gpt/chat api prefix (#33)

* add gpt/chat api prefix

* add chatgpt backend url
This commit is contained in:
Dmitry Afanasyev 2023-10-11 14:21:50 +03:00 committed by GitHub
parent 7cd0f30c55
commit 9e3fac0b94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 142 additions and 110 deletions

View File

@ -45,7 +45,6 @@ class ChatGptModelsEnum(StrEnum):
gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum" gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum"
gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo" gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo"
gpt_3_5_turbo_stream_Aibn = "gpt-3.5-turbo-stream-Aibn" gpt_3_5_turbo_stream_Aibn = "gpt-3.5-turbo-stream-Aibn"
gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo"
gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt" gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt"
gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle" gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle"
gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online" gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online"
@ -53,6 +52,7 @@ class ChatGptModelsEnum(StrEnum):
gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo" gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo"
gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o" gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o"
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove" gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo"
@classmethod @classmethod
def values(cls) -> set[str]: def values(cls) -> set[str]:
@ -60,7 +60,4 @@ class ChatGptModelsEnum(StrEnum):
@staticmethod @staticmethod
def _deprecated() -> set[str]: def _deprecated() -> set[str]:
return { return {"gpt-3.5-turbo-stream-H2o", "gpt-3.5-turbo-stream-gptforlove", "gpt-3.5-turbo-ChatgptDuo"}
"gpt-3.5-turbo-stream-H2o",
"gpt-3.5-turbo-stream-gptforlove",
}

View File

@ -45,7 +45,7 @@ async def about_bot(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
async def website(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: async def website(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if not update.effective_message: if not update.effective_message:
return None return None
website = urljoin(settings.DOMAIN, f"{settings.URL_PREFIX}/chat/") website = urljoin(settings.DOMAIN, f"{settings.chat_prefix}/")
await update.effective_message.reply_text(f"Веб версия: {website}") await update.effective_message.reply_text(f"Веб версия: {website}")

View File

@ -9,7 +9,7 @@ from loguru import logger
from sqlalchemy import delete, desc, select, update from sqlalchemy import delete, desc, select, update
from sqlalchemy.dialects.sqlite import insert from sqlalchemy.dialects.sqlite import insert
from constants import CHATGPT_BASE_URI, INVALID_GPT_REQUEST_MESSAGES from constants import INVALID_GPT_REQUEST_MESSAGES
from core.bot.models.chat_gpt import ChatGpt from core.bot.models.chat_gpt import ChatGpt
from infra.database.db_adapter import Database from infra.database.db_adapter import Database
from settings.config import AppSettings from settings.config import AppSettings
@ -86,7 +86,7 @@ class ChatGPTRepository:
transport = AsyncHTTPTransport(retries=3) transport = AsyncHTTPTransport(retries=3)
async with AsyncClient(base_url=self.settings.GPT_BASE_HOST, transport=transport, timeout=50) as client: async with AsyncClient(base_url=self.settings.GPT_BASE_HOST, transport=transport, timeout=50) as client:
return await client.post(CHATGPT_BASE_URI, json=data, timeout=50) return await client.post(self.settings.chatgpt_backend_url, json=data, timeout=50)
@staticmethod @staticmethod
def _build_request_data(*, question: str, chatgpt_model: str) -> dict[str, Any]: def _build_request_data(*, question: str, chatgpt_model: str) -> dict[str, Any]:

View File

@ -14,6 +14,7 @@ START_WITH_WEBHOOK="false"
# ==== domain settings ==== # ==== domain settings ====
DOMAIN="http://localhost" DOMAIN="http://localhost"
URL_PREFIX= URL_PREFIX=
CHAT_PREFIX="/chat"
# ==== gpt settings ==== # ==== gpt settings ====
GPT_BASE_HOST="http://localhost" GPT_BASE_HOST="http://localhost"

View File

@ -14,6 +14,7 @@ START_WITH_WEBHOOK="false"
# ==== domain settings ==== # ==== domain settings ====
DOMAIN="http://localhost" DOMAIN="http://localhost"
URL_PREFIX= URL_PREFIX=
CHAT_PREFIX="/chat"
# ==== gpt settings ==== # ==== gpt settings ====
GPT_BASE_HOST="http://localhost" GPT_BASE_HOST="http://localhost"

View File

@ -33,6 +33,7 @@ START_WITH_WEBHOOK="false"
# ==== domain settings ==== # ==== domain settings ====
DOMAIN="https://mydomain.com" DOMAIN="https://mydomain.com"
URL_PREFIX="/gpt" URL_PREFIX="/gpt"
CHAT_PREFIX="/chat"
# ==== gpt settings ==== # ==== gpt settings ====
GPT_BASE_HOST="http://chatgpt_chat_service:8858" GPT_BASE_HOST="http://chatgpt_chat_service:8858"

View File

@ -8,7 +8,7 @@ from pydantic import model_validator
from pydantic_settings import BaseSettings from pydantic_settings import BaseSettings
from yarl import URL from yarl import URL
from constants import API_PREFIX from constants import API_PREFIX, CHATGPT_BASE_URI
BASE_DIR = Path(__file__).parent.parent BASE_DIR = Path(__file__).parent.parent
SHARED_DIR = BASE_DIR.resolve().joinpath("shared") SHARED_DIR = BASE_DIR.resolve().joinpath("shared")
@ -76,6 +76,7 @@ class AppSettings(SentrySettings, LoggingSettings, BaseSettings):
START_WITH_WEBHOOK: bool = False START_WITH_WEBHOOK: bool = False
DOMAIN: str = "https://localhost" DOMAIN: str = "https://localhost"
URL_PREFIX: str = "" URL_PREFIX: str = ""
CHAT_PREFIX: str = ""
DB_NAME: str = "chatgpt.db" DB_NAME: str = "chatgpt.db"
DB_ECHO: bool = False DB_ECHO: bool = False
@ -107,6 +108,14 @@ class AppSettings(SentrySettings, LoggingSettings, BaseSettings):
return "/" + "/".join([self.URL_PREFIX.strip("/"), API_PREFIX.strip("/")]) return "/" + "/".join([self.URL_PREFIX.strip("/"), API_PREFIX.strip("/")])
return API_PREFIX return API_PREFIX
@cached_property
def chat_prefix(self) -> str:
return self.URL_PREFIX + self.CHAT_PREFIX
@cached_property
def chatgpt_backend_url(self) -> str:
return self.chat_prefix + CHATGPT_BASE_URI
@cached_property @cached_property
def token_part(self) -> str: def token_part(self) -> str:
return self.TELEGRAM_API_TOKEN[15:30] return self.TELEGRAM_API_TOKEN[15:30]

View File

@ -4,7 +4,7 @@ from typing import Any, Iterator
import respx import respx
from httpx import Response from httpx import Response
from constants import CHATGPT_BASE_URI from settings.config import settings
@contextmanager @contextmanager
@ -16,7 +16,7 @@ def mocked_ask_question_api(
assert_all_called=True, assert_all_called=True,
base_url=host, base_url=host,
) as respx_mock: ) as respx_mock:
ask_question_route = respx_mock.post(url=CHATGPT_BASE_URI, name="ask_question") ask_question_route = respx_mock.post(url=settings.chatgpt_backend_url, name="ask_question")
ask_question_route.return_value = return_value ask_question_route.return_value = return_value
ask_question_route.side_effect = side_effect ask_question_route.side_effect = side_effect
yield respx_mock yield respx_mock

View File

@ -112,7 +112,7 @@ const ask_gpt = async (message) => {
await new Promise((r) => setTimeout(r, 1000)); await new Promise((r) => setTimeout(r, 1000));
window.scrollTo(0, 0); window.scrollTo(0, 0);
const response = await fetch(`/backend-api/v2/conversation`, { const response = await fetch(`{{api_path}}/backend-api/v2/conversation`, {
method: `POST`, method: `POST`,
signal: window.controller.signal, signal: window.controller.signal,
headers: { headers: {
@ -151,7 +151,7 @@ const ask_gpt = async (message) => {
if ( if (
chunk.includes( chunk.includes(
`<form id="challenge-form" action="/backend-api/v2/conversation?` `<form id="challenge-form" action="{{api_path}}/backend-api/v2/conversation?`
) )
) { ) {
chunk = `cloudflare token expired, please refresh the page.`; chunk = `cloudflare token expired, please refresh the page.`;

View File

@ -927,3 +927,81 @@ boost::asio::awaitable<void> FreeGpt::h2o(std::shared_ptr<Channel> ch, nlohmann:
}); });
co_return; co_return;
} }
boost::asio::awaitable<void> FreeGpt::chatGptDuo(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
CURLcode res;
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, error_info);
co_return;
}
curl_easy_setopt(curl, CURLOPT_URL, "https://chatgptduo.com/");
auto request_data = urlEncode(std::format("prompt=('{}',)&search=('{}',)&purpose=ask", prompt, prompt));
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_data.c_str());
struct Input {
std::shared_ptr<Channel> ch;
};
Input input{ch};
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
boost::system::error_code err{};
auto input_ptr = static_cast<Input*>(userp);
std::string data{(char*)contents, size * nmemb};
auto& [ch] = *input_ptr;
boost::asio::post(ch->get_executor(), [=, data = std::move(data)] mutable {
nlohmann::json json = nlohmann::json::parse(data, nullptr, false);
if (json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", data);
ch->try_send(err, data);
return;
}
if (json.contains("answer")) {
auto str = json["answer"].get<std::string>();
ch->try_send(err, str);
} else {
ch->try_send(err, std::format("Invalid JSON: {}", json.dump()));
}
return;
});
return size * nmemb;
};
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
curlEasySetopt(curl);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/x-www-form-urlencoded");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] {
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
}};
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
ch->try_send(err, error_info);
co_return;
}
int32_t response_code;
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
if (response_code != 200) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("you http code:{}", response_code));
co_return;
}
co_return;
}

View File

@ -14,7 +14,7 @@ struct Config {
std::string http_proxy; std::string http_proxy;
std::string api_key; std::string api_key;
std::vector<std::string> ip_white_list; std::vector<std::string> ip_white_list;
std::string zeus{"http://chatgpt_zeus_service:8860"}; std::string zeus{"http://127.0.0.1:8860"};
}; };
YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy, YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy,
http_proxy, api_key, ip_white_list, zeus) http_proxy, api_key, ip_white_list, zeus)

View File

@ -31,7 +31,6 @@ public:
boost::asio::awaitable<void> vitalentum(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> vitalentum(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> aibn(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> aibn(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGptDuo(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> freeGpt(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> freeGpt(std::shared_ptr<Channel>, nlohmann::json);
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json); boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);

View File

@ -427,6 +427,12 @@ auto getConversationJson(const nlohmann::json& json) {
return conversation; return conversation;
} }
template <typename T = std::chrono::milliseconds>
uint64_t getTimestamp(std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now()) {
uint64_t timestamp = std::chrono::duration_cast<T>(now.time_since_epoch()).count();
return timestamp;
}
struct HttpResponse { struct HttpResponse {
int32_t http_response_code; int32_t http_response_code;
std::vector<std::unordered_multimap<std::string, std::string>> http_header; std::vector<std::unordered_multimap<std::string, std::string>> http_header;
@ -1881,8 +1887,7 @@ boost::asio::awaitable<void> FreeGpt::aibn(std::shared_ptr<Channel> ch, nlohmann
} }
return sha_stream.str(); return sha_stream.str();
}; };
uint64_t timestamp = uint64_t timestamp = getTimestamp<std::chrono::seconds>();
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
std::string signature = generate_signature(timestamp, prompt); std::string signature = generate_signature(timestamp, prompt);
constexpr std::string_view request_str{R"({ constexpr std::string_view request_str{R"({
@ -1934,84 +1939,6 @@ boost::asio::awaitable<void> FreeGpt::aibn(std::shared_ptr<Channel> ch, nlohmann
co_return; co_return;
} }
boost::asio::awaitable<void> FreeGpt::chatGptDuo(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
boost::system::error_code err{};
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
CURLcode res;
CURL* curl = curl_easy_init();
if (!curl) {
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, error_info);
co_return;
}
curl_easy_setopt(curl, CURLOPT_URL, "https://chatgptduo.com/");
auto request_data = urlEncode(std::format("prompt=('{}',)&search=('{}',)&purpose=ask", prompt, prompt));
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_data.c_str());
struct Input {
std::shared_ptr<Channel> ch;
};
Input input{ch};
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
boost::system::error_code err{};
auto input_ptr = static_cast<Input*>(userp);
std::string data{(char*)contents, size * nmemb};
auto& [ch] = *input_ptr;
boost::asio::post(ch->get_executor(), [=, data = std::move(data)] mutable {
nlohmann::json json = nlohmann::json::parse(data, nullptr, false);
if (json.is_discarded()) {
SPDLOG_ERROR("json parse error: [{}]", data);
ch->try_send(err, data);
return;
}
if (json.contains("answer")) {
auto str = json["answer"].get<std::string>();
ch->try_send(err, str);
} else {
ch->try_send(err, std::format("Invalid JSON: {}", json.dump()));
}
return;
});
return size * nmemb;
};
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
curlEasySetopt(curl);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/x-www-form-urlencoded");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] {
curl_slist_free_all(headers);
curl_easy_cleanup(curl);
}};
res = curl_easy_perform(curl);
if (res != CURLE_OK) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
ch->try_send(err, error_info);
co_return;
}
int32_t response_code;
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
if (response_code != 200) {
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
ch->try_send(err, std::format("you http code:{}", response_code));
co_return;
}
co_return;
}
boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlohmann::json json) { boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable)); co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
@ -2049,8 +1976,24 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
auto generate_signature = [](uint64_t timestamp, const std::string& message, const std::string& id) {
std::string s = std::to_string(timestamp) + ":" + id + ":" + message + ":7YN8z6d6";
unsigned char hash[SHA256_DIGEST_LENGTH];
SHA256_CTX sha256;
if (!SHA256_Init(&sha256))
throw std::runtime_error("SHA-256 initialization failed");
if (!SHA256_Update(&sha256, s.c_str(), s.length()))
throw std::runtime_error("SHA-256 update failed");
if (!SHA256_Final(hash, &sha256))
throw std::runtime_error("SHA-256 finalization failed");
std::stringstream ss;
for (int i = 0; i < SHA256_DIGEST_LENGTH; i++)
ss << std::hex << std::setw(2) << std::setfill('0') << static_cast<int>(hash[i]);
return ss.str();
};
uint64_t timestamp = getTimestamp();
constexpr std::string_view request_str{R"({ constexpr std::string_view request_str{R"({
"conversationId": "temp", "conversationId": "id_1696984301982",
"conversationType": "chat_continuous", "conversationType": "chat_continuous",
"botId": "chat_continuous", "botId": "chat_continuous",
"globalSettings": { "globalSettings": {
@ -2058,18 +2001,22 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
"model": "gpt-3.5-turbo", "model": "gpt-3.5-turbo",
"messageHistorySize": 5, "messageHistorySize": 5,
"temperature": 0.7, "temperature": 0.7,
"top_p": 1, "top_p": 1
"stream": false
}, },
"botSettings": {}, "botSettings": {},
"prompt": "hello", "prompt": "hello",
"messages": [{ "messages": [{
"role": "user", "role": "user",
"content": "hello" "content": "hello"
}] }],
"sign": "15d8e701706743ffa74f8b96c97bd1f79354c7da4a97438c81c6bb259004cd77",
"timestamp": 1696984302017
})"}; })"};
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false); nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
auto conversation_id = std::format("id_{}", timestamp - 35);
request["conversationId"] = conversation_id;
request["timestamp"] = timestamp;
request["sign"] = generate_signature(timestamp, prompt, conversation_id);
request["messages"] = getConversationJson(json); request["messages"] = getConversationJson(json);
request["prompt"] = prompt; request["prompt"] = prompt;
@ -2080,6 +2027,8 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
struct curl_slist* headers = nullptr; struct curl_slist* headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json"); headers = curl_slist_append(headers, "Content-Type: application/json");
headers = curl_slist_append(headers, "Origin: https://chatforai.store");
headers = curl_slist_append(headers, "Referer: https://chatforai.store/");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
ScopeExit auto_exit{[=] { ScopeExit auto_exit{[=] {
@ -2156,8 +2105,7 @@ boost::asio::awaitable<void> FreeGpt::freeGpt(std::shared_ptr<Channel> ch, nlohm
} }
return sha_stream.str(); return sha_stream.str();
}; };
uint64_t timestamp = uint64_t timestamp = getTimestamp<std::chrono::seconds>();
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
std::string signature = generate_signature(timestamp, prompt); std::string signature = generate_signature(timestamp, prompt);
constexpr std::string_view request_str{R"({ constexpr std::string_view request_str{R"({
@ -2388,8 +2336,7 @@ boost::asio::awaitable<void> FreeGpt::gptalk(std::shared_ptr<Channel> ch, nlohma
headers = curl_slist_append(headers, "x-auth-appid: 2229"); headers = curl_slist_append(headers, "x-auth-appid: 2229");
headers = curl_slist_append(headers, "x-auth-openid: "); headers = curl_slist_append(headers, "x-auth-openid: ");
headers = curl_slist_append(headers, "x-auth-platform: "); headers = curl_slist_append(headers, "x-auth-platform: ");
uint64_t timestamp = uint64_t timestamp = getTimestamp<std::chrono::seconds>();
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
auto auth_timestamp = std::format("x-auth-timestamp: {}", timestamp); auto auth_timestamp = std::format("x-auth-timestamp: {}", timestamp);
headers = curl_slist_append(headers, auth_timestamp.c_str()); headers = curl_slist_append(headers, auth_timestamp.c_str());
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
@ -2842,9 +2789,7 @@ boost::asio::awaitable<void> FreeGpt::chatGptDemo(std::shared_ptr<Channel> ch, n
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false); nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
ask_request["question"] = prompt; ask_request["question"] = prompt;
ask_request["chat_id"] = chat_id; ask_request["chat_id"] = chat_id;
uint64_t timestamp = uint64_t timestamp = getTimestamp();
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch())
.count();
request["timestamp"] = timestamp; request["timestamp"] = timestamp;
std::string ask_request_str = ask_request.dump(); std::string ask_request_str = ask_request.dump();
SPDLOG_INFO("ask_request_str: [{}]", ask_request_str); SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);

View File

@ -133,7 +133,8 @@ boost::asio::awaitable<void> startSession(boost::asio::ip::tcp::socket sock, Con
co_return; co_return;
} }
auto assets_path = std::format("{}{}", cfg.chat_path, ASSETS_PATH); auto assets_path = std::format("{}{}", cfg.chat_path, ASSETS_PATH);
SPDLOG_INFO("assets_path: [{}]", assets_path); auto api_path = std::format("{}{}", cfg.chat_path, API_PATH);
SPDLOG_INFO("assets_path: [{}], api_path: [{}]", assets_path, api_path);
while (true) { while (true) {
boost::beast::flat_buffer buffer; boost::beast::flat_buffer buffer;
boost::beast::http::request<boost::beast::http::string_body> request; boost::beast::http::request<boost::beast::http::string_body> request;
@ -179,6 +180,7 @@ boost::asio::awaitable<void> startSession(boost::asio::ip::tcp::socket sock, Con
return std::regex_replace(str, pattern, replacement); return std::regex_replace(str, pattern, replacement);
}; };
data["chat_path"] = format_string(cfg.chat_path); data["chat_path"] = format_string(cfg.chat_path);
data["api_path"] = cfg.chat_path;
} else { } else {
data["chat_path"] = cfg.chat_path; data["chat_path"] = cfg.chat_path;
} }
@ -211,7 +213,7 @@ boost::asio::awaitable<void> startSession(boost::asio::ip::tcp::socket sock, Con
boost::beast::http::message_generator rsp = std::move(res); boost::beast::http::message_generator rsp = std::move(res);
co_await boost::beast::async_write(stream, std::move(rsp), use_nothrow_awaitable); co_await boost::beast::async_write(stream, std::move(rsp), use_nothrow_awaitable);
} }
} else if (request.target() == API_PATH) { } else if (request.target() == api_path) {
std::string model; std::string model;
nlohmann::json request_body; nlohmann::json request_body;
bool flag = false; bool flag = false;
@ -344,7 +346,6 @@ int main(int argc, char** argv) {
ADD_METHOD("gpt-3.5-turbo-stream-Vitalentum", FreeGpt::vitalentum); ADD_METHOD("gpt-3.5-turbo-stream-Vitalentum", FreeGpt::vitalentum);
ADD_METHOD("gpt-3.5-turbo-stream-GptGo", FreeGpt::gptGo); ADD_METHOD("gpt-3.5-turbo-stream-GptGo", FreeGpt::gptGo);
ADD_METHOD("gpt-3.5-turbo-stream-Aibn", FreeGpt::aibn); ADD_METHOD("gpt-3.5-turbo-stream-Aibn", FreeGpt::aibn);
ADD_METHOD("gpt-3.5-turbo-ChatgptDuo", FreeGpt::chatGptDuo);
ADD_METHOD("gpt-3.5-turbo-stream-FreeGpt", FreeGpt::freeGpt); ADD_METHOD("gpt-3.5-turbo-stream-FreeGpt", FreeGpt::freeGpt);
ADD_METHOD("gpt-4-stream-Chatgpt4Online", FreeGpt::chatGpt4Online); ADD_METHOD("gpt-4-stream-Chatgpt4Online", FreeGpt::chatGpt4Online);
ADD_METHOD("gpt-3.5-turbo-stream-gptalk", FreeGpt::gptalk); ADD_METHOD("gpt-3.5-turbo-stream-gptalk", FreeGpt::gptalk);