add auth context (#62)

* add user table and superuser creation

* add gpt-4-stream-aivvm provider

* rename user migration to auth migration
This commit is contained in:
Dmitry Afanasyev
2023-11-28 23:06:26 +03:00
committed by GitHub
parent c80b001740
commit 2359481fb7
17 changed files with 668 additions and 280 deletions

View File

@@ -2313,30 +2313,50 @@ boost::asio::awaitable<void> FreeGpt::aivvm(std::shared_ptr<Channel> ch, nlohman
req.set("sec-fetch-site", "same-origin");
req.set("DNT", "1");
req.set("Cookie", std::get<1>(item));
constexpr std::string_view json_str = R"({
"model":{
"id":"gpt-3.5-turbo",
"name":"GPT-3.5",
"maxLength":12000,
"tokenLimit":4096
},
"messages":[
{
"role":"user",
"content":"hello"
}
],
"key":"",
"prompt":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"temperature":0.7
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
auto model = json.at("model").get<std::string>();
if (model == "gpt-3.5-turbo-stream-aivvm") {
constexpr std::string_view json_str = R"({
"model":{
"id":"gpt-3.5-turbo",
"name":"GPT-3.5",
"maxLength":12000,
"tokenLimit":4096
},
"messages":[
{
"role":"user",
"content":"hello"
}
],
"key":"",
"prompt":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"temperature":0.7
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
} else {
constexpr std::string_view json_str = R"({
"model":{
"id":"gpt-4",
"name":"GPT-4"
},
"messages":[
{
"role":"user",
"content":"hello"
}
],
"key":"",
"prompt":"You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"temperature":0.7
})";
nlohmann::json request = nlohmann::json::parse(json_str, nullptr, false);
request["messages"] = getConversationJson(json);
SPDLOG_INFO("{}", request.dump(2));
req.body() = request.dump();
}
req.prepare_payload();
auto result = co_await sendRequestRecvChunk(ch, stream_, req, 200, [&ch](std::string str) {

View File

@@ -351,6 +351,7 @@ int main(int, char** argv) {
ADD_METHOD("gpt-3.5-turbo-stream-FakeGpt", FreeGpt::fakeGpt);
ADD_METHOD("gpt-3.5-turbo-stream-Vercel", FreeGpt::vercel);
ADD_METHOD("gpt-3.5-turbo-stream-aivvm", FreeGpt::aivvm);
ADD_METHOD("gpt-4-stream-aivvm", FreeGpt::aivvm);
ADD_METHOD("gpt-4-ChatGpt4Online", FreeGpt::chatGpt4Online);
ADD_METHOD("gpt-3.5-turbo-stream-ChatAnywhere", FreeGpt::chatAnywhere);
ADD_METHOD("gpt-3.5-turbo-ChatgptNext", FreeGpt::chatGptNext);