mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-09-11 22:30:41 +03:00
add gpt/chat api prefix (#33)
* add gpt/chat api prefix * add chatgpt backend url
This commit is contained in:
parent
7cd0f30c55
commit
9e3fac0b94
@ -45,7 +45,6 @@ class ChatGptModelsEnum(StrEnum):
|
||||
gpt_3_5_turbo_stream_Vitalentum = "gpt-3.5-turbo-stream-Vitalentum"
|
||||
gpt_3_5_turbo_stream_GptGo = "gpt-3.5-turbo-stream-GptGo"
|
||||
gpt_3_5_turbo_stream_Aibn = "gpt-3.5-turbo-stream-Aibn"
|
||||
gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo"
|
||||
gpt_3_5_turbo_stream_FreeGpt = "gpt-3.5-turbo-stream-FreeGpt"
|
||||
gpt_3_5_turbo_stream_Cromicle = "gpt-3.5-turbo-stream-Cromicle"
|
||||
gpt_4_stream_Chatgpt4Online = "gpt-4-stream-Chatgpt4Online"
|
||||
@ -53,6 +52,7 @@ class ChatGptModelsEnum(StrEnum):
|
||||
gpt_3_5_turbo_stream_ChatgptDemo = "gpt-3.5-turbo-stream-ChatgptDemo"
|
||||
gpt_3_5_turbo_stream_H2o = "gpt-3.5-turbo-stream-H2o"
|
||||
gpt_3_5_turbo_stream_gptforlove = "gpt-3.5-turbo-stream-gptforlove"
|
||||
gpt_3_5_turbo_ChatgptDuo = "gpt-3.5-turbo-ChatgptDuo"
|
||||
|
||||
@classmethod
|
||||
def values(cls) -> set[str]:
|
||||
@ -60,7 +60,4 @@ class ChatGptModelsEnum(StrEnum):
|
||||
|
||||
@staticmethod
|
||||
def _deprecated() -> set[str]:
|
||||
return {
|
||||
"gpt-3.5-turbo-stream-H2o",
|
||||
"gpt-3.5-turbo-stream-gptforlove",
|
||||
}
|
||||
return {"gpt-3.5-turbo-stream-H2o", "gpt-3.5-turbo-stream-gptforlove", "gpt-3.5-turbo-ChatgptDuo"}
|
||||
|
@ -45,7 +45,7 @@ async def about_bot(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
async def website(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
if not update.effective_message:
|
||||
return None
|
||||
website = urljoin(settings.DOMAIN, f"{settings.URL_PREFIX}/chat/")
|
||||
website = urljoin(settings.DOMAIN, f"{settings.chat_prefix}/")
|
||||
await update.effective_message.reply_text(f"Веб версия: {website}")
|
||||
|
||||
|
||||
|
@ -9,7 +9,7 @@ from loguru import logger
|
||||
from sqlalchemy import delete, desc, select, update
|
||||
from sqlalchemy.dialects.sqlite import insert
|
||||
|
||||
from constants import CHATGPT_BASE_URI, INVALID_GPT_REQUEST_MESSAGES
|
||||
from constants import INVALID_GPT_REQUEST_MESSAGES
|
||||
from core.bot.models.chat_gpt import ChatGpt
|
||||
from infra.database.db_adapter import Database
|
||||
from settings.config import AppSettings
|
||||
@ -86,7 +86,7 @@ class ChatGPTRepository:
|
||||
|
||||
transport = AsyncHTTPTransport(retries=3)
|
||||
async with AsyncClient(base_url=self.settings.GPT_BASE_HOST, transport=transport, timeout=50) as client:
|
||||
return await client.post(CHATGPT_BASE_URI, json=data, timeout=50)
|
||||
return await client.post(self.settings.chatgpt_backend_url, json=data, timeout=50)
|
||||
|
||||
@staticmethod
|
||||
def _build_request_data(*, question: str, chatgpt_model: str) -> dict[str, Any]:
|
||||
|
@ -14,6 +14,7 @@ START_WITH_WEBHOOK="false"
|
||||
# ==== domain settings ====
|
||||
DOMAIN="http://localhost"
|
||||
URL_PREFIX=
|
||||
CHAT_PREFIX="/chat"
|
||||
|
||||
# ==== gpt settings ====
|
||||
GPT_BASE_HOST="http://localhost"
|
||||
|
@ -14,6 +14,7 @@ START_WITH_WEBHOOK="false"
|
||||
# ==== domain settings ====
|
||||
DOMAIN="http://localhost"
|
||||
URL_PREFIX=
|
||||
CHAT_PREFIX="/chat"
|
||||
|
||||
# ==== gpt settings ====
|
||||
GPT_BASE_HOST="http://localhost"
|
||||
|
@ -33,6 +33,7 @@ START_WITH_WEBHOOK="false"
|
||||
# ==== domain settings ====
|
||||
DOMAIN="https://mydomain.com"
|
||||
URL_PREFIX="/gpt"
|
||||
CHAT_PREFIX="/chat"
|
||||
|
||||
# ==== gpt settings ====
|
||||
GPT_BASE_HOST="http://chatgpt_chat_service:8858"
|
||||
|
@ -8,7 +8,7 @@ from pydantic import model_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
from yarl import URL
|
||||
|
||||
from constants import API_PREFIX
|
||||
from constants import API_PREFIX, CHATGPT_BASE_URI
|
||||
|
||||
BASE_DIR = Path(__file__).parent.parent
|
||||
SHARED_DIR = BASE_DIR.resolve().joinpath("shared")
|
||||
@ -76,6 +76,7 @@ class AppSettings(SentrySettings, LoggingSettings, BaseSettings):
|
||||
START_WITH_WEBHOOK: bool = False
|
||||
DOMAIN: str = "https://localhost"
|
||||
URL_PREFIX: str = ""
|
||||
CHAT_PREFIX: str = ""
|
||||
|
||||
DB_NAME: str = "chatgpt.db"
|
||||
DB_ECHO: bool = False
|
||||
@ -107,6 +108,14 @@ class AppSettings(SentrySettings, LoggingSettings, BaseSettings):
|
||||
return "/" + "/".join([self.URL_PREFIX.strip("/"), API_PREFIX.strip("/")])
|
||||
return API_PREFIX
|
||||
|
||||
@cached_property
|
||||
def chat_prefix(self) -> str:
|
||||
return self.URL_PREFIX + self.CHAT_PREFIX
|
||||
|
||||
@cached_property
|
||||
def chatgpt_backend_url(self) -> str:
|
||||
return self.chat_prefix + CHATGPT_BASE_URI
|
||||
|
||||
@cached_property
|
||||
def token_part(self) -> str:
|
||||
return self.TELEGRAM_API_TOKEN[15:30]
|
||||
|
@ -4,7 +4,7 @@ from typing import Any, Iterator
|
||||
import respx
|
||||
from httpx import Response
|
||||
|
||||
from constants import CHATGPT_BASE_URI
|
||||
from settings.config import settings
|
||||
|
||||
|
||||
@contextmanager
|
||||
@ -16,7 +16,7 @@ def mocked_ask_question_api(
|
||||
assert_all_called=True,
|
||||
base_url=host,
|
||||
) as respx_mock:
|
||||
ask_question_route = respx_mock.post(url=CHATGPT_BASE_URI, name="ask_question")
|
||||
ask_question_route = respx_mock.post(url=settings.chatgpt_backend_url, name="ask_question")
|
||||
ask_question_route.return_value = return_value
|
||||
ask_question_route.side_effect = side_effect
|
||||
yield respx_mock
|
||||
|
@ -112,7 +112,7 @@ const ask_gpt = async (message) => {
|
||||
await new Promise((r) => setTimeout(r, 1000));
|
||||
window.scrollTo(0, 0);
|
||||
|
||||
const response = await fetch(`/backend-api/v2/conversation`, {
|
||||
const response = await fetch(`{{api_path}}/backend-api/v2/conversation`, {
|
||||
method: `POST`,
|
||||
signal: window.controller.signal,
|
||||
headers: {
|
||||
@ -151,7 +151,7 @@ const ask_gpt = async (message) => {
|
||||
|
||||
if (
|
||||
chunk.includes(
|
||||
`<form id="challenge-form" action="/backend-api/v2/conversation?`
|
||||
`<form id="challenge-form" action="{{api_path}}/backend-api/v2/conversation?`
|
||||
)
|
||||
) {
|
||||
chunk = `cloudflare token expired, please refresh the page.`;
|
||||
|
@ -927,3 +927,81 @@ boost::asio::awaitable<void> FreeGpt::h2o(std::shared_ptr<Channel> ch, nlohmann:
|
||||
});
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatGptDuo(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
|
||||
boost::system::error_code err{};
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
CURLcode res;
|
||||
CURL* curl = curl_easy_init();
|
||||
if (!curl) {
|
||||
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, error_info);
|
||||
co_return;
|
||||
}
|
||||
|
||||
curl_easy_setopt(curl, CURLOPT_URL, "https://chatgptduo.com/");
|
||||
auto request_data = urlEncode(std::format("prompt=('{}',)&search=('{}',)&purpose=ask", prompt, prompt));
|
||||
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_data.c_str());
|
||||
|
||||
struct Input {
|
||||
std::shared_ptr<Channel> ch;
|
||||
};
|
||||
Input input{ch};
|
||||
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
|
||||
boost::system::error_code err{};
|
||||
auto input_ptr = static_cast<Input*>(userp);
|
||||
std::string data{(char*)contents, size * nmemb};
|
||||
auto& [ch] = *input_ptr;
|
||||
boost::asio::post(ch->get_executor(), [=, data = std::move(data)] mutable {
|
||||
nlohmann::json json = nlohmann::json::parse(data, nullptr, false);
|
||||
if (json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", data);
|
||||
ch->try_send(err, data);
|
||||
return;
|
||||
}
|
||||
if (json.contains("answer")) {
|
||||
auto str = json["answer"].get<std::string>();
|
||||
ch->try_send(err, str);
|
||||
} else {
|
||||
ch->try_send(err, std::format("Invalid JSON: {}", json.dump()));
|
||||
}
|
||||
return;
|
||||
});
|
||||
return size * nmemb;
|
||||
};
|
||||
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
|
||||
curlEasySetopt(curl);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
|
||||
|
||||
struct curl_slist* headers = nullptr;
|
||||
headers = curl_slist_append(headers, "Content-Type: application/x-www-form-urlencoded");
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
|
||||
|
||||
ScopeExit auto_exit{[=] {
|
||||
curl_slist_free_all(headers);
|
||||
curl_easy_cleanup(curl);
|
||||
}};
|
||||
|
||||
res = curl_easy_perform(curl);
|
||||
|
||||
if (res != CURLE_OK) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
|
||||
ch->try_send(err, error_info);
|
||||
co_return;
|
||||
}
|
||||
int32_t response_code;
|
||||
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
|
||||
if (response_code != 200) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, std::format("you http code:{}", response_code));
|
||||
co_return;
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ struct Config {
|
||||
std::string http_proxy;
|
||||
std::string api_key;
|
||||
std::vector<std::string> ip_white_list;
|
||||
std::string zeus{"http://chatgpt_zeus_service:8860"};
|
||||
std::string zeus{"http://127.0.0.1:8860"};
|
||||
};
|
||||
YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy,
|
||||
http_proxy, api_key, ip_white_list, zeus)
|
||||
|
@ -31,7 +31,6 @@ public:
|
||||
boost::asio::awaitable<void> vitalentum(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aibn(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGptDuo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> freeGpt(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);
|
||||
|
@ -427,6 +427,12 @@ auto getConversationJson(const nlohmann::json& json) {
|
||||
return conversation;
|
||||
}
|
||||
|
||||
template <typename T = std::chrono::milliseconds>
|
||||
uint64_t getTimestamp(std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now()) {
|
||||
uint64_t timestamp = std::chrono::duration_cast<T>(now.time_since_epoch()).count();
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
struct HttpResponse {
|
||||
int32_t http_response_code;
|
||||
std::vector<std::unordered_multimap<std::string, std::string>> http_header;
|
||||
@ -1881,8 +1887,7 @@ boost::asio::awaitable<void> FreeGpt::aibn(std::shared_ptr<Channel> ch, nlohmann
|
||||
}
|
||||
return sha_stream.str();
|
||||
};
|
||||
uint64_t timestamp =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
uint64_t timestamp = getTimestamp<std::chrono::seconds>();
|
||||
std::string signature = generate_signature(timestamp, prompt);
|
||||
|
||||
constexpr std::string_view request_str{R"({
|
||||
@ -1934,84 +1939,6 @@ boost::asio::awaitable<void> FreeGpt::aibn(std::shared_ptr<Channel> ch, nlohmann
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatGptDuo(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
|
||||
boost::system::error_code err{};
|
||||
ScopeExit _exit{[=] { boost::asio::post(ch->get_executor(), [=] { ch->close(); }); }};
|
||||
auto prompt = json.at("meta").at("content").at("parts").at(0).at("content").get<std::string>();
|
||||
|
||||
CURLcode res;
|
||||
CURL* curl = curl_easy_init();
|
||||
if (!curl) {
|
||||
auto error_info = std::format("curl_easy_init() failed:{}", curl_easy_strerror(res));
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, error_info);
|
||||
co_return;
|
||||
}
|
||||
|
||||
curl_easy_setopt(curl, CURLOPT_URL, "https://chatgptduo.com/");
|
||||
auto request_data = urlEncode(std::format("prompt=('{}',)&search=('{}',)&purpose=ask", prompt, prompt));
|
||||
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_data.c_str());
|
||||
|
||||
struct Input {
|
||||
std::shared_ptr<Channel> ch;
|
||||
};
|
||||
Input input{ch};
|
||||
auto action_cb = [](void* contents, size_t size, size_t nmemb, void* userp) -> size_t {
|
||||
boost::system::error_code err{};
|
||||
auto input_ptr = static_cast<Input*>(userp);
|
||||
std::string data{(char*)contents, size * nmemb};
|
||||
auto& [ch] = *input_ptr;
|
||||
boost::asio::post(ch->get_executor(), [=, data = std::move(data)] mutable {
|
||||
nlohmann::json json = nlohmann::json::parse(data, nullptr, false);
|
||||
if (json.is_discarded()) {
|
||||
SPDLOG_ERROR("json parse error: [{}]", data);
|
||||
ch->try_send(err, data);
|
||||
return;
|
||||
}
|
||||
if (json.contains("answer")) {
|
||||
auto str = json["answer"].get<std::string>();
|
||||
ch->try_send(err, str);
|
||||
} else {
|
||||
ch->try_send(err, std::format("Invalid JSON: {}", json.dump()));
|
||||
}
|
||||
return;
|
||||
});
|
||||
return size * nmemb;
|
||||
};
|
||||
size_t (*action_fn)(void* contents, size_t size, size_t nmemb, void* userp) = action_cb;
|
||||
curlEasySetopt(curl);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
|
||||
|
||||
struct curl_slist* headers = nullptr;
|
||||
headers = curl_slist_append(headers, "Content-Type: application/x-www-form-urlencoded");
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
|
||||
|
||||
ScopeExit auto_exit{[=] {
|
||||
curl_slist_free_all(headers);
|
||||
curl_easy_cleanup(curl);
|
||||
}};
|
||||
|
||||
res = curl_easy_perform(curl);
|
||||
|
||||
if (res != CURLE_OK) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
auto error_info = std::format("curl_easy_perform() failed:{}", curl_easy_strerror(res));
|
||||
ch->try_send(err, error_info);
|
||||
co_return;
|
||||
}
|
||||
int32_t response_code;
|
||||
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response_code);
|
||||
if (response_code != 200) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(ch->get_executor(), boost::asio::use_awaitable));
|
||||
ch->try_send(err, std::format("you http code:{}", response_code));
|
||||
co_return;
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlohmann::json json) {
|
||||
co_await boost::asio::post(boost::asio::bind_executor(*m_thread_pool_ptr, boost::asio::use_awaitable));
|
||||
|
||||
@ -2049,8 +1976,24 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, action_fn);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &input);
|
||||
|
||||
auto generate_signature = [](uint64_t timestamp, const std::string& message, const std::string& id) {
|
||||
std::string s = std::to_string(timestamp) + ":" + id + ":" + message + ":7YN8z6d6";
|
||||
unsigned char hash[SHA256_DIGEST_LENGTH];
|
||||
SHA256_CTX sha256;
|
||||
if (!SHA256_Init(&sha256))
|
||||
throw std::runtime_error("SHA-256 initialization failed");
|
||||
if (!SHA256_Update(&sha256, s.c_str(), s.length()))
|
||||
throw std::runtime_error("SHA-256 update failed");
|
||||
if (!SHA256_Final(hash, &sha256))
|
||||
throw std::runtime_error("SHA-256 finalization failed");
|
||||
std::stringstream ss;
|
||||
for (int i = 0; i < SHA256_DIGEST_LENGTH; i++)
|
||||
ss << std::hex << std::setw(2) << std::setfill('0') << static_cast<int>(hash[i]);
|
||||
return ss.str();
|
||||
};
|
||||
uint64_t timestamp = getTimestamp();
|
||||
constexpr std::string_view request_str{R"({
|
||||
"conversationId": "temp",
|
||||
"conversationId": "id_1696984301982",
|
||||
"conversationType": "chat_continuous",
|
||||
"botId": "chat_continuous",
|
||||
"globalSettings": {
|
||||
@ -2058,18 +2001,22 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messageHistorySize": 5,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"stream": false
|
||||
"top_p": 1
|
||||
},
|
||||
"botSettings": {},
|
||||
"prompt": "hello",
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": "hello"
|
||||
}]
|
||||
}],
|
||||
"sign": "15d8e701706743ffa74f8b96c97bd1f79354c7da4a97438c81c6bb259004cd77",
|
||||
"timestamp": 1696984302017
|
||||
})"};
|
||||
nlohmann::json request = nlohmann::json::parse(request_str, nullptr, false);
|
||||
|
||||
auto conversation_id = std::format("id_{}", timestamp - 35);
|
||||
request["conversationId"] = conversation_id;
|
||||
request["timestamp"] = timestamp;
|
||||
request["sign"] = generate_signature(timestamp, prompt, conversation_id);
|
||||
request["messages"] = getConversationJson(json);
|
||||
request["prompt"] = prompt;
|
||||
|
||||
@ -2080,6 +2027,8 @@ boost::asio::awaitable<void> FreeGpt::chatForAi(std::shared_ptr<Channel> ch, nlo
|
||||
|
||||
struct curl_slist* headers = nullptr;
|
||||
headers = curl_slist_append(headers, "Content-Type: application/json");
|
||||
headers = curl_slist_append(headers, "Origin: https://chatforai.store");
|
||||
headers = curl_slist_append(headers, "Referer: https://chatforai.store/");
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
|
||||
|
||||
ScopeExit auto_exit{[=] {
|
||||
@ -2156,8 +2105,7 @@ boost::asio::awaitable<void> FreeGpt::freeGpt(std::shared_ptr<Channel> ch, nlohm
|
||||
}
|
||||
return sha_stream.str();
|
||||
};
|
||||
uint64_t timestamp =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
uint64_t timestamp = getTimestamp<std::chrono::seconds>();
|
||||
std::string signature = generate_signature(timestamp, prompt);
|
||||
|
||||
constexpr std::string_view request_str{R"({
|
||||
@ -2388,8 +2336,7 @@ boost::asio::awaitable<void> FreeGpt::gptalk(std::shared_ptr<Channel> ch, nlohma
|
||||
headers = curl_slist_append(headers, "x-auth-appid: 2229");
|
||||
headers = curl_slist_append(headers, "x-auth-openid: ");
|
||||
headers = curl_slist_append(headers, "x-auth-platform: ");
|
||||
uint64_t timestamp =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
uint64_t timestamp = getTimestamp<std::chrono::seconds>();
|
||||
auto auth_timestamp = std::format("x-auth-timestamp: {}", timestamp);
|
||||
headers = curl_slist_append(headers, auth_timestamp.c_str());
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
|
||||
@ -2842,9 +2789,7 @@ boost::asio::awaitable<void> FreeGpt::chatGptDemo(std::shared_ptr<Channel> ch, n
|
||||
nlohmann::json ask_request = nlohmann::json::parse(ask_json_str, nullptr, false);
|
||||
ask_request["question"] = prompt;
|
||||
ask_request["chat_id"] = chat_id;
|
||||
uint64_t timestamp =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
uint64_t timestamp = getTimestamp();
|
||||
request["timestamp"] = timestamp;
|
||||
std::string ask_request_str = ask_request.dump();
|
||||
SPDLOG_INFO("ask_request_str: [{}]", ask_request_str);
|
||||
|
@ -133,7 +133,8 @@ boost::asio::awaitable<void> startSession(boost::asio::ip::tcp::socket sock, Con
|
||||
co_return;
|
||||
}
|
||||
auto assets_path = std::format("{}{}", cfg.chat_path, ASSETS_PATH);
|
||||
SPDLOG_INFO("assets_path: [{}]", assets_path);
|
||||
auto api_path = std::format("{}{}", cfg.chat_path, API_PATH);
|
||||
SPDLOG_INFO("assets_path: [{}], api_path: [{}]", assets_path, api_path);
|
||||
while (true) {
|
||||
boost::beast::flat_buffer buffer;
|
||||
boost::beast::http::request<boost::beast::http::string_body> request;
|
||||
@ -179,6 +180,7 @@ boost::asio::awaitable<void> startSession(boost::asio::ip::tcp::socket sock, Con
|
||||
return std::regex_replace(str, pattern, replacement);
|
||||
};
|
||||
data["chat_path"] = format_string(cfg.chat_path);
|
||||
data["api_path"] = cfg.chat_path;
|
||||
} else {
|
||||
data["chat_path"] = cfg.chat_path;
|
||||
}
|
||||
@ -211,7 +213,7 @@ boost::asio::awaitable<void> startSession(boost::asio::ip::tcp::socket sock, Con
|
||||
boost::beast::http::message_generator rsp = std::move(res);
|
||||
co_await boost::beast::async_write(stream, std::move(rsp), use_nothrow_awaitable);
|
||||
}
|
||||
} else if (request.target() == API_PATH) {
|
||||
} else if (request.target() == api_path) {
|
||||
std::string model;
|
||||
nlohmann::json request_body;
|
||||
bool flag = false;
|
||||
@ -344,7 +346,6 @@ int main(int argc, char** argv) {
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-Vitalentum", FreeGpt::vitalentum);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-GptGo", FreeGpt::gptGo);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-Aibn", FreeGpt::aibn);
|
||||
ADD_METHOD("gpt-3.5-turbo-ChatgptDuo", FreeGpt::chatGptDuo);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-FreeGpt", FreeGpt::freeGpt);
|
||||
ADD_METHOD("gpt-4-stream-Chatgpt4Online", FreeGpt::chatGpt4Online);
|
||||
ADD_METHOD("gpt-3.5-turbo-stream-gptalk", FreeGpt::gptalk);
|
||||
|
Loading…
x
Reference in New Issue
Block a user