mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2025-12-16 21:20:39 +03:00
rename chatgpt microservice (#30)
* rename chagpt microservice * add deprecated models
This commit is contained in:
19
chatgpt_microservice/include/cfg.h
Normal file
19
chatgpt_microservice/include/cfg.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <yaml_cpp_struct.hpp>
|
||||
|
||||
struct Config {
|
||||
std::string client_root_path;
|
||||
std::size_t interval{300};
|
||||
std::size_t work_thread_num{8};
|
||||
std::string host{"0.0.0.0"};
|
||||
std::string port{"8858"};
|
||||
std::string chat_path{"/chat"};
|
||||
std::vector<std::string> providers;
|
||||
bool enable_proxy;
|
||||
std::string http_proxy;
|
||||
std::string api_key;
|
||||
std::vector<std::string> ip_white_list;
|
||||
};
|
||||
YCS_ADD_STRUCT(Config, client_root_path, interval, work_thread_num, host, port, chat_path, providers, enable_proxy,
|
||||
http_proxy, api_key, ip_white_list)
|
||||
52
chatgpt_microservice/include/free_gpt.h
Normal file
52
chatgpt_microservice/include/free_gpt.h
Normal file
@@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
|
||||
#include <expected>
|
||||
#include <memory>
|
||||
|
||||
#include <boost/asio/awaitable.hpp>
|
||||
#include <boost/asio/experimental/channel.hpp>
|
||||
#include <boost/asio/thread_pool.hpp>
|
||||
#include <boost/beast.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "cfg.h"
|
||||
|
||||
class FreeGpt final {
|
||||
public:
|
||||
using Channel = boost::asio::experimental::channel<void(boost::system::error_code, std::string)>;
|
||||
|
||||
FreeGpt(Config&);
|
||||
|
||||
boost::asio::awaitable<void> aiTianhu(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aiTianhuSpace(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> deepAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aiChat(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGptAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> acytoo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> openAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> h2o(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> yqcloud(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> huggingChat(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> you(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> binjie(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatBase(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aivvm(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> ylokh(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> vitalentum(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptGo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> aibn(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGptDuo(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatForAi(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> freeGpt(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> cromicle(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> chatGpt4Online(std::shared_ptr<Channel>, nlohmann::json);
|
||||
boost::asio::awaitable<void> gptalk(std::shared_ptr<Channel>, nlohmann::json);
|
||||
|
||||
private:
|
||||
boost::asio::awaitable<std::expected<boost::beast::ssl_stream<boost::beast::tcp_stream>, std::string>>
|
||||
createHttpClient(boost::asio::ssl::context&, std::string_view /* host */, std::string_view /* port */);
|
||||
|
||||
Config& m_cfg;
|
||||
std::shared_ptr<boost::asio::thread_pool> m_thread_pool_ptr;
|
||||
};
|
||||
103
chatgpt_microservice/include/helper.hpp
Normal file
103
chatgpt_microservice/include/helper.hpp
Normal file
@@ -0,0 +1,103 @@
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_generators.hpp>
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
|
||||
constexpr auto use_nothrow_awaitable = boost::asio::as_tuple(boost::asio::use_awaitable);
|
||||
|
||||
class IoContextPool final {
|
||||
public:
|
||||
explicit IoContextPool(std::size_t);
|
||||
|
||||
void start();
|
||||
void stop();
|
||||
|
||||
boost::asio::io_context& getIoContext();
|
||||
|
||||
private:
|
||||
std::vector<std::shared_ptr<boost::asio::io_context>> m_io_contexts;
|
||||
std::list<boost::asio::any_io_executor> m_work;
|
||||
std::size_t m_next_io_context;
|
||||
std::vector<std::jthread> m_threads;
|
||||
};
|
||||
|
||||
inline IoContextPool::IoContextPool(std::size_t pool_size) : m_next_io_context(0) {
|
||||
if (pool_size == 0)
|
||||
throw std::runtime_error("IoContextPool size is 0");
|
||||
for (std::size_t i = 0; i < pool_size; ++i) {
|
||||
auto io_context_ptr = std::make_shared<boost::asio::io_context>();
|
||||
m_io_contexts.emplace_back(io_context_ptr);
|
||||
m_work.emplace_back(
|
||||
boost::asio::require(io_context_ptr->get_executor(), boost::asio::execution::outstanding_work.tracked));
|
||||
}
|
||||
}
|
||||
|
||||
inline void IoContextPool::start() {
|
||||
for (auto& context : m_io_contexts)
|
||||
m_threads.emplace_back(std::jthread([&] { context->run(); }));
|
||||
}
|
||||
|
||||
inline void IoContextPool::stop() {
|
||||
for (auto& context_ptr : m_io_contexts)
|
||||
context_ptr->stop();
|
||||
}
|
||||
|
||||
inline boost::asio::io_context& IoContextPool::getIoContext() {
|
||||
boost::asio::io_context& io_context = *m_io_contexts[m_next_io_context];
|
||||
++m_next_io_context;
|
||||
if (m_next_io_context == m_io_contexts.size())
|
||||
m_next_io_context = 0;
|
||||
return io_context;
|
||||
}
|
||||
|
||||
inline boost::asio::awaitable<void> timeout(std::chrono::seconds duration) {
|
||||
auto now = std::chrono::steady_clock::now() + duration;
|
||||
boost::asio::steady_timer timer(co_await boost::asio::this_coro::executor);
|
||||
timer.expires_at(now);
|
||||
[[maybe_unused]] auto [ec] = co_await timer.async_wait(boost::asio::as_tuple(boost::asio::use_awaitable));
|
||||
co_return;
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
inline auto getEnv(Args&&... args) {
|
||||
auto impl = []<std::size_t... I>(auto&& tp, std::index_sequence<I...>) {
|
||||
auto func = [](std::string_view env_name) {
|
||||
const char* env = std::getenv(env_name.data());
|
||||
if (env == nullptr)
|
||||
return std::string{};
|
||||
return std::string{env};
|
||||
};
|
||||
return std::make_tuple(func(std::get<I>(tp))...);
|
||||
};
|
||||
return impl(std::forward_as_tuple(args...), std::index_sequence_for<Args...>{});
|
||||
}
|
||||
|
||||
class ScopeExit {
|
||||
public:
|
||||
ScopeExit(const ScopeExit&) = delete;
|
||||
ScopeExit& operator=(const ScopeExit&) = delete;
|
||||
|
||||
template <typename Callable>
|
||||
explicit ScopeExit(Callable&& call) : m_call(std::forward<Callable>(call)) {}
|
||||
|
||||
~ScopeExit() {
|
||||
if (m_call)
|
||||
m_call();
|
||||
}
|
||||
|
||||
void clear() { m_call = decltype(m_call)(); }
|
||||
|
||||
private:
|
||||
std::function<void()> m_call;
|
||||
};
|
||||
|
||||
inline std::string createUuidString() {
|
||||
static thread_local boost::uuids::random_generator gen;
|
||||
return boost::uuids::to_string(gen());
|
||||
}
|
||||
Reference in New Issue
Block a user