mirror of
https://github.com/Balshgit/gpt_chat_bot.git
synced 2026-02-04 16:50:38 +03:00
microservices are able to run (#5)
This commit is contained in:
19
chat_gpt_microservice/g4f/Provider/Provider.py
Normal file
19
chat_gpt_microservice/g4f/Provider/Provider.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
url = None
|
||||
model = None
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
return
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
41
chat_gpt_microservice/g4f/Provider/Providers/AiService.py
Normal file
41
chat_gpt_microservice/g4f/Provider/Providers/AiService.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://aiservice.vercel.app/api/chat/answer"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
base = ""
|
||||
for message in messages:
|
||||
base += "%s: %s\n" % (message["role"], message["content"])
|
||||
base += "assistant:"
|
||||
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"content-type": "text/plain;charset=UTF-8",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"Referer": "https://aiservice.vercel.app/chat",
|
||||
}
|
||||
data = {"input": base}
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
if response.status_code == 200:
|
||||
_json = response.json()
|
||||
yield _json["data"]
|
||||
else:
|
||||
print(f"Error Occurred::{response.status_code}")
|
||||
return None
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
46
chat_gpt_microservice/g4f/Provider/Providers/Aichat.py
Normal file
46
chat_gpt_microservice/g4f/Provider/Providers/Aichat.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://hteyun.com"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {
|
||||
"model": model,
|
||||
"temperature": 0.7,
|
||||
"presence_penalty": 0,
|
||||
"messages": messages,
|
||||
}
|
||||
response = requests.post(url + "/api/chat-stream", json=data, stream=True)
|
||||
|
||||
if stream:
|
||||
for chunk in response.iter_content(chunk_size=None):
|
||||
chunk = chunk.decode("utf-8")
|
||||
if chunk.strip():
|
||||
message = json.loads(chunk)["choices"][0]["message"]["content"]
|
||||
yield message
|
||||
else:
|
||||
message = response.json()["choices"][0]["message"]["content"]
|
||||
yield message
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
96
chat_gpt_microservice/g4f/Provider/Providers/Ails.py
Normal file
96
chat_gpt_microservice/g4f/Provider/Providers/Ails.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Dict, get_type_hints
|
||||
|
||||
import requests
|
||||
from g4f.typing import sha256
|
||||
|
||||
url: str = "https://ai.ls"
|
||||
model: str = "gpt-3.5-turbo"
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
working = True
|
||||
|
||||
|
||||
class Utils:
|
||||
def hash(json_data: Dict[str, str]) -> sha256:
|
||||
base_string: str = "%s:%s:%s:%s" % (
|
||||
json_data["t"],
|
||||
json_data["m"],
|
||||
"WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf",
|
||||
len(json_data["m"]),
|
||||
)
|
||||
|
||||
return hashlib.sha256(base_string.encode()).hexdigest()
|
||||
|
||||
def format_timestamp(timestamp: int) -> str:
|
||||
e = timestamp
|
||||
n = e % 10
|
||||
r = n + 1 if n % 2 == 0 else n
|
||||
return str(e - n + r)
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
|
||||
headers = {
|
||||
"authority": "api.caipacity.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"authorization": "Bearer free",
|
||||
"client-id": str(uuid.uuid4()),
|
||||
"client-v": "0.1.249",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://ai.ls",
|
||||
"referer": "https://ai.ls/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "cross-site",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
timestamp = Utils.format_timestamp(int(time.time() * 1000))
|
||||
|
||||
sig = {
|
||||
"d": datetime.now().strftime("%Y-%m-%d"),
|
||||
"t": timestamp,
|
||||
"s": Utils.hash({"t": timestamp, "m": messages[-1]["content"]}),
|
||||
}
|
||||
|
||||
json_data = json.dumps(
|
||||
separators=(",", ":"),
|
||||
obj={
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.6,
|
||||
"stream": True,
|
||||
"messages": messages,
|
||||
}
|
||||
| sig,
|
||||
)
|
||||
|
||||
response = requests.post(
|
||||
"https://api.caipacity.com/v1/chat/completions",
|
||||
headers=headers,
|
||||
data=json_data,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for token in response.iter_lines():
|
||||
if b"content" in token:
|
||||
completion_chunk = json.loads(token.decode().replace("data: ", ""))
|
||||
token = completion_chunk["choices"][0]["delta"].get("content")
|
||||
if token != None:
|
||||
yield token
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
87
chat_gpt_microservice/g4f/Provider/Providers/Bard.py
Normal file
87
chat_gpt_microservice/g4f/Provider/Providers/Bard.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
from typing import get_type_hints
|
||||
|
||||
import browser_cookie3
|
||||
import requests
|
||||
|
||||
url = "https://bard.google.com"
|
||||
model = ["Palm2"]
|
||||
supports_stream = False
|
||||
needs_auth = True
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(domain_name=".google.com")}["__Secure-1PSID"]
|
||||
|
||||
formatted = "\n".join(["%s: %s" % (message["role"], message["content"]) for message in messages])
|
||||
prompt = f"{formatted}\nAssistant:"
|
||||
|
||||
proxy = kwargs.get("proxy", False)
|
||||
if proxy == False:
|
||||
print("warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work")
|
||||
|
||||
snlm0e = None
|
||||
conversation_id = None
|
||||
response_id = None
|
||||
choice_id = None
|
||||
|
||||
client = requests.Session()
|
||||
client.proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"} if proxy else None
|
||||
|
||||
client.headers = {
|
||||
"authority": "bard.google.com",
|
||||
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
|
||||
"origin": "https://bard.google.com",
|
||||
"referer": "https://bard.google.com/",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
|
||||
"x-same-domain": "1",
|
||||
"cookie": f"__Secure-1PSID={psid}",
|
||||
}
|
||||
|
||||
snlm0e = (
|
||||
re.search(r"SNlM0e\":\"(.*?)\"", client.get("https://bard.google.com/").text).group(1) if not snlm0e else snlm0e
|
||||
)
|
||||
|
||||
params = {
|
||||
"bl": "boq_assistant-bard-web-server_20230326.21_p0",
|
||||
"_reqid": random.randint(1111, 9999),
|
||||
"rt": "c",
|
||||
}
|
||||
|
||||
data = {
|
||||
"at": snlm0e,
|
||||
"f.req": json.dumps(
|
||||
[
|
||||
None,
|
||||
json.dumps([[prompt], None, [conversation_id, response_id, choice_id]]),
|
||||
]
|
||||
),
|
||||
}
|
||||
|
||||
intents = ".".join(["assistant", "lamda", "BardFrontendService"])
|
||||
|
||||
response = client.post(
|
||||
f"https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate",
|
||||
data=data,
|
||||
params=params,
|
||||
)
|
||||
|
||||
chat_data = json.loads(response.content.splitlines()[3])[0][2]
|
||||
if chat_data:
|
||||
json_chat_data = json.loads(chat_data)
|
||||
|
||||
yield json_chat_data[0][0]
|
||||
|
||||
else:
|
||||
yield "error"
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
64
chat_gpt_microservice/g4f/Provider/Providers/Better.py
Normal file
64
chat_gpt_microservice/g4f/Provider/Providers/Better.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://openai-proxy-api.vercel.app/v1/"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4",
|
||||
]
|
||||
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58",
|
||||
"Referer": "https://chat.ylokh.xyz/",
|
||||
"Origin": "https://chat.ylokh.xyz",
|
||||
"Connection": "keep-alive",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"messages": messages,
|
||||
"temperature": 1.0,
|
||||
"model": model,
|
||||
"stream": stream,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://openai-proxy-api.vercel.app/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for token in response.iter_lines():
|
||||
decoded = token.decode("utf-8")
|
||||
if decoded.startswith("data: "):
|
||||
data_str = decoded.replace("data: ", "")
|
||||
data = json.loads(data_str)
|
||||
if "choices" in data and "delta" in data["choices"][0]:
|
||||
delta = data["choices"][0]["delta"]
|
||||
content = delta.get("content", "")
|
||||
finish_reason = delta.get("finish_reason", "")
|
||||
|
||||
if finish_reason == "stop":
|
||||
break
|
||||
if content:
|
||||
yield content
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
355
chat_gpt_microservice/g4f/Provider/Providers/Bing.py
Normal file
355
chat_gpt_microservice/g4f/Provider/Providers/Bing.py
Normal file
@@ -0,0 +1,355 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import ssl
|
||||
import uuid
|
||||
from typing import get_type_hints
|
||||
|
||||
import aiohttp
|
||||
import certifi
|
||||
import requests
|
||||
|
||||
url = "https://bing.com/chat"
|
||||
model = ["gpt-4"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.load_verify_locations(certifi.where())
|
||||
|
||||
|
||||
class optionsSets:
|
||||
optionSet: dict = {"tone": str, "optionsSets": list}
|
||||
|
||||
jailbreak: dict = {
|
||||
"optionsSets": [
|
||||
"saharasugg",
|
||||
"enablenewsfc",
|
||||
"clgalileo",
|
||||
"gencontentv3",
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
"h3precise"
|
||||
# "harmonyv3",
|
||||
"dtappid",
|
||||
"cricinfo",
|
||||
"cricinfov2",
|
||||
"dv3sugg",
|
||||
"nojbfedge",
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
class Defaults:
|
||||
delimiter = "\x1e"
|
||||
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
||||
|
||||
allowedMessageTypes = [
|
||||
"Chat",
|
||||
"Disengaged",
|
||||
"AdsQuery",
|
||||
"SemanticSerp",
|
||||
"GenerateContentQuery",
|
||||
"SearchQuery",
|
||||
"ActionRequest",
|
||||
"Context",
|
||||
"Progress",
|
||||
"AdsQuery",
|
||||
"SemanticSerp",
|
||||
]
|
||||
|
||||
sliceIds = [
|
||||
# "222dtappid",
|
||||
# "225cricinfo",
|
||||
# "224locals0"
|
||||
"winmuid3tf",
|
||||
"osbsdusgreccf",
|
||||
"ttstmout",
|
||||
"crchatrev",
|
||||
"winlongmsgtf",
|
||||
"ctrlworkpay",
|
||||
"norespwtf",
|
||||
"tempcacheread",
|
||||
"temptacache",
|
||||
"505scss0",
|
||||
"508jbcars0",
|
||||
"515enbotdets0",
|
||||
"5082tsports",
|
||||
"515vaoprvs",
|
||||
"424dagslnv1s0",
|
||||
"kcimgattcf",
|
||||
"427startpms0",
|
||||
]
|
||||
|
||||
location = {
|
||||
"locale": "en-US",
|
||||
"market": "en-US",
|
||||
"region": "US",
|
||||
"locationHints": [
|
||||
{
|
||||
"country": "United States",
|
||||
"state": "California",
|
||||
"city": "Los Angeles",
|
||||
"timezoneoffset": 8,
|
||||
"countryConfidence": 8,
|
||||
"Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
|
||||
"RegionType": 2,
|
||||
"SourceType": 1,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _format(msg: dict) -> str:
|
||||
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
||||
|
||||
|
||||
async def create_conversation():
|
||||
for _ in range(5):
|
||||
create = requests.get(
|
||||
"https://www.bing.com/turing/conversation/create",
|
||||
headers={
|
||||
"authority": "edgeservices.bing.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"110.0.1587.69"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-ch-ua-platform-version": '"15.0.0"',
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "none",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
|
||||
"x-edge-shopping-flag": "1",
|
||||
"x-forwarded-for": Defaults.ip_address,
|
||||
},
|
||||
)
|
||||
|
||||
conversationId = create.json().get("conversationId")
|
||||
clientId = create.json().get("clientId")
|
||||
conversationSignature = create.json().get("conversationSignature")
|
||||
|
||||
if not conversationId or not clientId or not conversationSignature and _ == 4:
|
||||
raise Exception("Failed to create conversation.")
|
||||
|
||||
return conversationId, clientId, conversationSignature
|
||||
|
||||
|
||||
async def stream_generate(
|
||||
prompt: str,
|
||||
mode: optionsSets.optionSet = optionsSets.jailbreak,
|
||||
context: bool or str = False,
|
||||
):
|
||||
timeout = aiohttp.ClientTimeout(total=900)
|
||||
session = aiohttp.ClientSession(timeout=timeout)
|
||||
|
||||
conversationId, clientId, conversationSignature = await create_conversation()
|
||||
|
||||
wss = await session.ws_connect(
|
||||
"wss://sydney.bing.com/sydney/ChatHub",
|
||||
ssl=ssl_context,
|
||||
autoping=False,
|
||||
headers={
|
||||
"accept": "application/json",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"109.0.1518.78"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": "",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-ch-ua-platform-version": '"15.0.0"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"x-ms-client-request-id": str(uuid.uuid4()),
|
||||
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
|
||||
"Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
|
||||
"Referrer-Policy": "origin-when-cross-origin",
|
||||
"x-forwarded-for": Defaults.ip_address,
|
||||
},
|
||||
)
|
||||
|
||||
await wss.send_str(_format({"protocol": "json", "version": 1}))
|
||||
await wss.receive(timeout=900)
|
||||
|
||||
struct = {
|
||||
"arguments": [
|
||||
{
|
||||
**mode,
|
||||
"source": "cib",
|
||||
"allowedMessageTypes": Defaults.allowedMessageTypes,
|
||||
"sliceIds": Defaults.sliceIds,
|
||||
"traceId": os.urandom(16).hex(),
|
||||
"isStartOfSession": True,
|
||||
"message": Defaults.location
|
||||
| {
|
||||
"author": "user",
|
||||
"inputMethod": "Keyboard",
|
||||
"text": prompt,
|
||||
"messageType": "Chat",
|
||||
},
|
||||
"conversationSignature": conversationSignature,
|
||||
"participant": {"id": clientId},
|
||||
"conversationId": conversationId,
|
||||
}
|
||||
],
|
||||
"invocationId": "0",
|
||||
"target": "chat",
|
||||
"type": 4,
|
||||
}
|
||||
|
||||
if context:
|
||||
struct["arguments"][0]["previousMessages"] = [
|
||||
{
|
||||
"author": "user",
|
||||
"description": context,
|
||||
"contextType": "WebPage",
|
||||
"messageType": "Context",
|
||||
"messageId": "discover-web--page-ping-mriduna-----",
|
||||
}
|
||||
]
|
||||
|
||||
await wss.send_str(_format(struct))
|
||||
|
||||
final = False
|
||||
draw = False
|
||||
resp_txt = ""
|
||||
result_text = ""
|
||||
resp_txt_no_link = ""
|
||||
cache_text = ""
|
||||
|
||||
while not final:
|
||||
msg = await wss.receive(timeout=900)
|
||||
objects = msg.data.split(Defaults.delimiter)
|
||||
|
||||
for obj in objects:
|
||||
if obj is None or not obj:
|
||||
continue
|
||||
|
||||
response = json.loads(obj)
|
||||
if response.get("type") == 1 and response["arguments"][0].get(
|
||||
"messages",
|
||||
):
|
||||
if not draw:
|
||||
if (response["arguments"][0]["messages"][0]["contentOrigin"] != "Apology") and not draw:
|
||||
resp_txt = result_text + response["arguments"][0]["messages"][0]["adaptiveCards"][0]["body"][
|
||||
0
|
||||
].get("text", "")
|
||||
resp_txt_no_link = result_text + response["arguments"][0]["messages"][0].get("text", "")
|
||||
|
||||
if response["arguments"][0]["messages"][0].get(
|
||||
"messageType",
|
||||
):
|
||||
resp_txt = (
|
||||
resp_txt
|
||||
+ response["arguments"][0]["messages"][0]["adaptiveCards"][0]["body"][0]["inlines"][
|
||||
0
|
||||
].get("text")
|
||||
+ "\n"
|
||||
)
|
||||
result_text = (
|
||||
result_text
|
||||
+ response["arguments"][0]["messages"][0]["adaptiveCards"][0]["body"][0]["inlines"][
|
||||
0
|
||||
].get("text")
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
if cache_text.endswith(" "):
|
||||
final = True
|
||||
if wss and not wss.closed:
|
||||
await wss.close()
|
||||
if session and not session.closed:
|
||||
await session.close()
|
||||
|
||||
yield (resp_txt.replace(cache_text, ""))
|
||||
cache_text = resp_txt
|
||||
|
||||
elif response.get("type") == 2:
|
||||
if response["item"]["result"].get("error"):
|
||||
if wss and not wss.closed:
|
||||
await wss.close()
|
||||
if session and not session.closed:
|
||||
await session.close()
|
||||
|
||||
raise Exception(f"{response['item']['result']['value']}: {response['item']['result']['message']}")
|
||||
|
||||
if draw:
|
||||
cache = response["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"]
|
||||
response["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"] = cache + resp_txt
|
||||
|
||||
if response["item"]["messages"][-1]["contentOrigin"] == "Apology" and resp_txt:
|
||||
response["item"]["messages"][-1]["text"] = resp_txt_no_link
|
||||
response["item"]["messages"][-1]["adaptiveCards"][0]["body"][0]["text"] = resp_txt
|
||||
|
||||
# print('Preserved the message from being deleted', file=sys.stderr)
|
||||
|
||||
final = True
|
||||
if wss and not wss.closed:
|
||||
await wss.close()
|
||||
if session and not session.closed:
|
||||
await session.close()
|
||||
|
||||
|
||||
def run(generator):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
gen = generator.__aiter__()
|
||||
|
||||
while True:
|
||||
try:
|
||||
next_val = loop.run_until_complete(gen.__anext__())
|
||||
yield next_val
|
||||
|
||||
except StopAsyncIteration:
|
||||
break
|
||||
# print('Done')
|
||||
|
||||
|
||||
def convert(messages):
|
||||
context = ""
|
||||
|
||||
for message in messages:
|
||||
context += "[%s](#message)\n%s\n\n" % (message["role"], message["content"])
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
if len(messages) < 2:
|
||||
prompt = messages[0]["content"]
|
||||
context = False
|
||||
|
||||
else:
|
||||
prompt = messages[-1]["content"]
|
||||
context = convert(messages[:-1])
|
||||
|
||||
response = run(stream_generate(prompt, optionsSets.jailbreak, context))
|
||||
for token in response:
|
||||
yield (token)
|
||||
|
||||
# print('Done')
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
58
chat_gpt_microservice/g4f/Provider/Providers/ChatFree.py
Normal file
58
chat_gpt_microservice/g4f/Provider/Providers/ChatFree.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://v.chatfree.cc"
|
||||
model = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
"authority": "chat.dfehub.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://v.chatfree.cc",
|
||||
"referer": "https://v.chatfree.cc/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"model": model,
|
||||
"temperature": 0.5,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
"top_p": 1,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://v.chatfree.cc/api/openai/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
)
|
||||
|
||||
for chunk in response.iter_lines():
|
||||
if b"content" in chunk:
|
||||
data = json.loads(chunk.decode().split("data: ")[1])
|
||||
yield (data["choices"][0]["delta"]["content"])
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
60
chat_gpt_microservice/g4f/Provider/Providers/ChatgptAi.py
Normal file
60
chat_gpt_microservice/g4f/Provider/Providers/ChatgptAi.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import os
|
||||
import re
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://chatgpt.ai/gpt-4/"
|
||||
model = ["gpt-4"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
chat = ""
|
||||
for message in messages:
|
||||
chat += "%s: %s\n" % (message["role"], message["content"])
|
||||
chat += "assistant: "
|
||||
|
||||
response = requests.get("https://chatgpt.ai/")
|
||||
nonce, post_id, _, bot_id = re.findall(
|
||||
r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width',
|
||||
response.text,
|
||||
)[0]
|
||||
|
||||
headers = {
|
||||
"authority": "chatgpt.ai",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"cache-control": "no-cache",
|
||||
"origin": "https://chatgpt.ai",
|
||||
"pragma": "no-cache",
|
||||
"referer": "https://chatgpt.ai/gpt-4/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
data = {
|
||||
"_wpnonce": nonce,
|
||||
"post_id": post_id,
|
||||
"url": "https://chatgpt.ai/gpt-4",
|
||||
"action": "wpaicg_chat_shortcode_message",
|
||||
"message": chat,
|
||||
"bot_id": bot_id,
|
||||
}
|
||||
|
||||
response = requests.post("https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data)
|
||||
|
||||
yield (response.json()["data"])
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
113
chat_gpt_microservice/g4f/Provider/Providers/ChatgptLogin.py
Normal file
113
chat_gpt_microservice/g4f/Provider/Providers/ChatgptLogin.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://chatgptlogin.ac"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
def get_nonce():
|
||||
res = requests.get(
|
||||
"https://chatgptlogin.ac/use-chatgpt-free/",
|
||||
headers={
|
||||
"Referer": "https://chatgptlogin.ac/use-chatgpt-free/",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
},
|
||||
)
|
||||
|
||||
src = re.search(
|
||||
r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">',
|
||||
res.text,
|
||||
).group(1)
|
||||
decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8")
|
||||
return re.search(r"let restNonce = '(.*?)';", decoded_string).group(1)
|
||||
|
||||
def transform(messages: list) -> list:
|
||||
def html_encode(string: str) -> str:
|
||||
table = {
|
||||
'"': """,
|
||||
"'": "'",
|
||||
"&": "&",
|
||||
">": ">",
|
||||
"<": "<",
|
||||
"\n": "<br>",
|
||||
"\t": " ",
|
||||
" ": " ",
|
||||
}
|
||||
|
||||
for key in table:
|
||||
string = string.replace(key, table[key])
|
||||
|
||||
return string
|
||||
|
||||
return [
|
||||
{
|
||||
"id": os.urandom(6).hex(),
|
||||
"role": message["role"],
|
||||
"content": message["content"],
|
||||
"who": "AI: " if message["role"] == "assistant" else "User: ",
|
||||
"html": html_encode(message["content"]),
|
||||
}
|
||||
for message in messages
|
||||
]
|
||||
|
||||
headers = {
|
||||
"authority": "chatgptlogin.ac",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://chatgptlogin.ac",
|
||||
"referer": "https://chatgptlogin.ac/use-chatgpt-free/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"x-wp-nonce": get_nonce(),
|
||||
}
|
||||
|
||||
conversation = transform(messages)
|
||||
|
||||
json_data = {
|
||||
"env": "chatbot",
|
||||
"session": "N/A",
|
||||
"prompt": "Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
"context": "Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
"messages": conversation,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"userName": '<div class="mwai-name-text">User:</div>',
|
||||
"aiName": '<div class="mwai-name-text">AI:</div>',
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.8,
|
||||
"maxTokens": 1024,
|
||||
"maxResults": 1,
|
||||
"apiKey": "",
|
||||
"service": "openai",
|
||||
"embeddingsIndex": "",
|
||||
"stop": "",
|
||||
"clientId": os.urandom(6).hex(),
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
)
|
||||
|
||||
return response.json()["reply"]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
48
chat_gpt_microservice/g4f/Provider/Providers/DeepAi.py
Normal file
48
chat_gpt_microservice/g4f/Provider/Providers/DeepAi.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://deepai.org"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
def md5(text: str) -> str:
|
||||
return hashlib.md5(text.encode()).hexdigest()[::-1]
|
||||
|
||||
def get_api_key(user_agent: str) -> str:
|
||||
part1 = str(random.randint(0, 10**11))
|
||||
part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
|
||||
|
||||
return f"tryit-{part1}-{part2}"
|
||||
|
||||
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
|
||||
|
||||
headers = {"api-key": get_api_key(user_agent), "user-agent": user_agent}
|
||||
|
||||
files = {"chat_style": (None, "chat"), "chatHistory": (None, json.dumps(messages))}
|
||||
|
||||
r = requests.post(
|
||||
"https://api.deepai.org/chat_response",
|
||||
headers=headers,
|
||||
files=files,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for chunk in r.iter_content(chunk_size=None):
|
||||
r.raise_for_status()
|
||||
yield chunk.decode()
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
66
chat_gpt_microservice/g4f/Provider/Providers/Easychat.py
Normal file
66
chat_gpt_microservice/g4f/Provider/Providers/Easychat.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://free.easychat.work"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
"authority": "free.easychat.work",
|
||||
"accept": "text/event-stream",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type": "application/json",
|
||||
"endpoint": "",
|
||||
"origin": "https://free.easychat.work",
|
||||
"plugins": "0",
|
||||
"referer": "https://free.easychat.work/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"usesearch": "false",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"model": model,
|
||||
"temperature": 0.5,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
"top_p": 1,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://free.easychat.work/api/openai/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
)
|
||||
|
||||
for chunk in response.iter_lines():
|
||||
if b"content" in chunk:
|
||||
data = json.loads(chunk.decode().split("data: ")[1])
|
||||
yield (data["choices"][0]["delta"]["content"])
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
46
chat_gpt_microservice/g4f/Provider/Providers/Ezcht.py
Normal file
46
chat_gpt_microservice/g4f/Provider/Providers/Ezcht.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://gpt4.ezchat.top"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {
|
||||
"model": model,
|
||||
"temperature": 0.7,
|
||||
"presence_penalty": 0,
|
||||
"messages": messages,
|
||||
}
|
||||
response = requests.post(url + "/api/openai/v1/chat/completions", json=data, stream=True)
|
||||
|
||||
if stream:
|
||||
for chunk in response.iter_content(chunk_size=None):
|
||||
chunk = chunk.decode("utf-8")
|
||||
if chunk.strip():
|
||||
message = json.loads(chunk)["choices"][0]["message"]["content"]
|
||||
yield message
|
||||
else:
|
||||
message = response.json()["choices"][0]["message"]["content"]
|
||||
yield message
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
59
chat_gpt_microservice/g4f/Provider/Providers/Fakeopen.py
Normal file
59
chat_gpt_microservice/g4f/Provider/Providers/Fakeopen.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://ai.fakeopen.com/v1/"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
]
|
||||
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"accept": "text/event-stream",
|
||||
"Cache-Control": "no-cache",
|
||||
"Proxy-Connection": "keep-alive",
|
||||
"Authorization": f"Bearer {os.environ.get('FAKE_OPEN_KEY', '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')}",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"messages": messages,
|
||||
"temperature": 1.0,
|
||||
"model": model,
|
||||
"stream": stream,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://ai.fakeopen.com/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for token in response.iter_lines():
|
||||
decoded = token.decode("utf-8")
|
||||
if decoded == "[DONE]":
|
||||
break
|
||||
if decoded.startswith("data: "):
|
||||
data_str = decoded.replace("data: ", "")
|
||||
if data_str != "[DONE]":
|
||||
data = json.loads(data_str)
|
||||
if "choices" in data and "delta" in data["choices"][0] and "content" in data["choices"][0]["delta"]:
|
||||
yield data["choices"][0]["delta"]["content"]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
41
chat_gpt_microservice/g4f/Provider/Providers/Forefront.py
Normal file
41
chat_gpt_microservice/g4f/Provider/Providers/Forefront.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://forefront.com"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
json_data = {
|
||||
"text": messages[-1]["content"],
|
||||
"action": "noauth",
|
||||
"id": "",
|
||||
"parentId": "",
|
||||
"workspaceId": "",
|
||||
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
|
||||
"model": "gpt-4",
|
||||
"messages": messages[:-1] if len(messages) > 1 else [],
|
||||
"internetMode": "auto",
|
||||
}
|
||||
response = requests.post(
|
||||
"https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
for token in response.iter_lines():
|
||||
if b"delta" in token:
|
||||
token = json.loads(token.decode().split("data: ")[1])["delta"]
|
||||
yield (token)
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
68
chat_gpt_microservice/g4f/Provider/Providers/GetGpt.py
Normal file
68
chat_gpt_microservice/g4f/Provider/Providers/GetGpt.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
url = "https://chat.getgpt.world/"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
def encrypt(e):
|
||||
t = os.urandom(8).hex().encode("utf-8")
|
||||
n = os.urandom(8).hex().encode("utf-8")
|
||||
r = e.encode("utf-8")
|
||||
cipher = AES.new(t, AES.MODE_CBC, n)
|
||||
ciphertext = cipher.encrypt(pad_data(r))
|
||||
return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8")
|
||||
|
||||
def pad_data(data: bytes) -> bytes:
|
||||
block_size = AES.block_size
|
||||
padding_size = block_size - len(data) % block_size
|
||||
padding = bytes([padding_size] * padding_size)
|
||||
return data + padding
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Referer": "https://chat.getgpt.world/",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
data = json.dumps(
|
||||
{
|
||||
"messages": messages,
|
||||
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
||||
"max_tokens": kwargs.get("max_tokens", 4000),
|
||||
"model": "gpt-3.5-turbo",
|
||||
"presence_penalty": kwargs.get("presence_penalty", 0),
|
||||
"temperature": kwargs.get("temperature", 1),
|
||||
"top_p": kwargs.get("top_p", 1),
|
||||
"stream": True,
|
||||
"uuid": str(uuid.uuid4()),
|
||||
}
|
||||
)
|
||||
|
||||
res = requests.post(
|
||||
"https://chat.getgpt.world/api/chat/stream",
|
||||
headers=headers,
|
||||
json={"signature": encrypt(data)},
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for line in res.iter_lines():
|
||||
if b"content" in line:
|
||||
line_json = json.loads(line.decode("utf-8").split("data: ")[1])
|
||||
yield (line_json["choices"][0]["delta"]["content"])
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://gpt4.xunika.uk/"
|
||||
model = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {
|
||||
"model": model,
|
||||
"temperature": 0.7,
|
||||
"presence_penalty": 0,
|
||||
"messages": messages,
|
||||
}
|
||||
response = requests.post(url + "/api/openai/v1/chat/completions", json=data, stream=True)
|
||||
|
||||
yield response.json()["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
114
chat_gpt_microservice/g4f/Provider/Providers/H2o.py
Normal file
114
chat_gpt_microservice/g4f/Provider/Providers/H2o.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import os
|
||||
from json import loads
|
||||
from typing import get_type_hints
|
||||
from uuid import uuid4
|
||||
|
||||
from requests import Session
|
||||
|
||||
url = "https://gpt-gm.h2o.ai"
|
||||
model = ["falcon-40b", "falcon-7b", "llama-13b"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
models = {
|
||||
"falcon-7b": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
|
||||
"falcon-40b": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
||||
"llama-13b": "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b",
|
||||
}
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
conversation = "instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n"
|
||||
for message in messages:
|
||||
conversation += "%s: %s\n" % (message["role"], message["content"])
|
||||
conversation += "assistant:"
|
||||
|
||||
client = Session()
|
||||
client.headers = {
|
||||
"authority": "gpt-gm.h2o.ai",
|
||||
"origin": "https://gpt-gm.h2o.ai",
|
||||
"referer": "https://gpt-gm.h2o.ai/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
client.get("https://gpt-gm.h2o.ai/")
|
||||
response = client.post(
|
||||
"https://gpt-gm.h2o.ai/settings",
|
||||
data={
|
||||
"ethicsModalAccepted": "true",
|
||||
"shareConversationsWithModelAuthors": "true",
|
||||
"ethicsModalAcceptedAt": "",
|
||||
"activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
||||
"searchEnabled": "true",
|
||||
},
|
||||
)
|
||||
|
||||
headers = {
|
||||
"authority": "gpt-gm.h2o.ai",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"origin": "https://gpt-gm.h2o.ai",
|
||||
"referer": "https://gpt-gm.h2o.ai/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
json_data = {"model": models[model]}
|
||||
|
||||
response = client.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=json_data)
|
||||
conversationId = response.json()["conversationId"]
|
||||
|
||||
completion = client.post(
|
||||
f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
|
||||
stream=True,
|
||||
json={
|
||||
"inputs": conversation,
|
||||
"parameters": {
|
||||
"temperature": kwargs.get("temperature", 0.4),
|
||||
"truncate": kwargs.get("truncate", 2048),
|
||||
"max_new_tokens": kwargs.get("max_new_tokens", 1024),
|
||||
"do_sample": kwargs.get("do_sample", True),
|
||||
"repetition_penalty": kwargs.get("repetition_penalty", 1.2),
|
||||
"return_full_text": kwargs.get("return_full_text", False),
|
||||
},
|
||||
"stream": True,
|
||||
"options": {
|
||||
"id": kwargs.get("id", str(uuid4())),
|
||||
"response_id": kwargs.get("response_id", str(uuid4())),
|
||||
"is_retry": False,
|
||||
"use_cache": False,
|
||||
"web_search_id": "",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
for line in completion.iter_lines():
|
||||
if b"data" in line:
|
||||
line = loads(line.decode("utf-8").replace("data:", ""))
|
||||
token = line["token"]["text"]
|
||||
|
||||
if token == "<|endoftext|>":
|
||||
break
|
||||
else:
|
||||
yield (token)
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
60
chat_gpt_microservice/g4f/Provider/Providers/Liaobots.py
Normal file
60
chat_gpt_microservice/g4f/Provider/Providers/Liaobots.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://liaobots.com"
|
||||
model = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"]
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
working = False
|
||||
|
||||
models = {
|
||||
"gpt-4": {"id": "gpt-4", "name": "GPT-4", "maxLength": 24000, "tokenLimit": 8000},
|
||||
"gpt-3.5-turbo": {
|
||||
"id": "gpt-3.5-turbo",
|
||||
"name": "GPT-3.5",
|
||||
"maxLength": 12000,
|
||||
"tokenLimit": 4000,
|
||||
},
|
||||
"gpt-3.5-turbo-16k": {
|
||||
"id": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT-3.5-16k",
|
||||
"maxLength": 48000,
|
||||
"tokenLimit": 16000,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
|
||||
print(kwargs)
|
||||
|
||||
headers = {
|
||||
"authority": "liaobots.com",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://liaobots.com",
|
||||
"referer": "https://liaobots.com/",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||
"x-auth-code": "qlcUMVn1KLMhd",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"conversationId": chatId,
|
||||
"model": models[model],
|
||||
"messages": messages,
|
||||
"key": "",
|
||||
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
||||
}
|
||||
|
||||
response = requests.post("https://liaobots.com/api/chat", headers=headers, json=json_data, stream=True)
|
||||
|
||||
for token in response.iter_content(chunk_size=2046):
|
||||
yield (token.decode("utf-8"))
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
50
chat_gpt_microservice/g4f/Provider/Providers/Lockchat.py
Normal file
50
chat_gpt_microservice/g4f/Provider/Providers/Lockchat.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "http://supertest.lockchat.app"
|
||||
model = ["gpt-4", "gpt-3.5-turbo"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
payload = {
|
||||
"temperature": 0.7,
|
||||
"messages": messages,
|
||||
"model": model,
|
||||
"stream": True,
|
||||
}
|
||||
headers = {
|
||||
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
|
||||
}
|
||||
response = requests.post(
|
||||
"http://supertest.lockchat.app/v1/chat/completions",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
)
|
||||
for token in response.iter_lines():
|
||||
if b"The model: `gpt-4` does not exist" in token:
|
||||
print("error, retrying...")
|
||||
_create_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
temperature=temperature,
|
||||
**kwargs,
|
||||
)
|
||||
if b"content" in token:
|
||||
token = json.loads(token.decode("utf-8").split("data: ")[1])["choices"][0]["delta"].get("content")
|
||||
if token:
|
||||
yield (token)
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
26
chat_gpt_microservice/g4f/Provider/Providers/Mishalsgpt.py
Normal file
26
chat_gpt_microservice/g4f/Provider/Providers/Mishalsgpt.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://mishalsgpt.vercel.app"
|
||||
model = ["gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {"model": model, "temperature": 0.7, "messages": messages}
|
||||
response = requests.post(url + "/api/openai/v1/chat/completions", headers=headers, json=data, stream=True)
|
||||
yield response.json()["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
37
chat_gpt_microservice/g4f/Provider/Providers/Phind.py
Normal file
37
chat_gpt_microservice/g4f/Provider/Providers/Phind.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from typing import get_type_hints
|
||||
|
||||
url = "https://phind.com"
|
||||
model = ["gpt-4"]
|
||||
supports_stream = True
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({"model": model, "messages": messages}, separators=(",", ":"))
|
||||
|
||||
cmd = ["python", f"{path}/helpers/phind.py", config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b""):
|
||||
if b"<title>Just a moment...</title>" in line:
|
||||
os.system("clear" if os.name == "posix" else "cls")
|
||||
yield "Clouflare error, please try again..."
|
||||
os._exit(0)
|
||||
|
||||
else:
|
||||
if b"ping - 2023-" in line:
|
||||
continue
|
||||
|
||||
yield line.decode("cp1251") # [:-1]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
29
chat_gpt_microservice/g4f/Provider/Providers/Theb.py
Normal file
29
chat_gpt_microservice/g4f/Provider/Providers/Theb.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from typing import get_type_hints
|
||||
|
||||
url = "https://theb.ai"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({"messages": messages, "model": model}, separators=(",", ":"))
|
||||
|
||||
cmd = ["python3", f"{path}/helpers/theb.py", config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b""):
|
||||
yield line.decode("utf-8")
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
514
chat_gpt_microservice/g4f/Provider/Providers/Vercel.py
Normal file
514
chat_gpt_microservice/g4f/Provider/Providers/Vercel.py
Normal file
@@ -0,0 +1,514 @@
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
from typing import get_type_hints
|
||||
|
||||
import execjs
|
||||
from curl_cffi import requests
|
||||
|
||||
url = "https://play.vercel.ai"
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
models = {
|
||||
"claude-instant-v1": "anthropic:claude-instant-v1",
|
||||
"claude-v1": "anthropic:claude-v1",
|
||||
"alpaca-7b": "replicate:replicate/alpaca-7b",
|
||||
"stablelm-tuned-alpha-7b": "replicate:stability-ai/stablelm-tuned-alpha-7b",
|
||||
"bloom": "huggingface:bigscience/bloom",
|
||||
"bloomz": "huggingface:bigscience/bloomz",
|
||||
"flan-t5-xxl": "huggingface:google/flan-t5-xxl",
|
||||
"flan-ul2": "huggingface:google/flan-ul2",
|
||||
"gpt-neox-20b": "huggingface:EleutherAI/gpt-neox-20b",
|
||||
"oasst-sft-4-pythia-12b-epoch-3.5": "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
"santacoder": "huggingface:bigcode/santacoder",
|
||||
"command-medium-nightly": "cohere:command-medium-nightly",
|
||||
"command-xlarge-nightly": "cohere:command-xlarge-nightly",
|
||||
"code-cushman-001": "openai:code-cushman-001",
|
||||
"code-davinci-002": "openai:code-davinci-002",
|
||||
"gpt-3.5-turbo": "openai:gpt-3.5-turbo",
|
||||
"text-ada-001": "openai:text-ada-001",
|
||||
"text-babbage-001": "openai:text-babbage-001",
|
||||
"text-curie-001": "openai:text-curie-001",
|
||||
"text-davinci-002": "openai:text-davinci-002",
|
||||
"text-davinci-003": "openai:text-davinci-003",
|
||||
}
|
||||
model = models.keys()
|
||||
|
||||
vercel_models = {
|
||||
"anthropic:claude-instant-v1": {
|
||||
"id": "anthropic:claude-instant-v1",
|
||||
"provider": "anthropic",
|
||||
"providerHumanName": "Anthropic",
|
||||
"makerHumanName": "Anthropic",
|
||||
"minBillingTier": "hobby",
|
||||
"parameters": {
|
||||
"temperature": {"value": 1, "range": [0, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"topK": {"value": 1, "range": [1, 500]},
|
||||
"presencePenalty": {"value": 1, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 1, "range": [0, 1]},
|
||||
"stopSequences": {"value": ["\n\nHuman:"], "range": []},
|
||||
},
|
||||
"name": "claude-instant-v1",
|
||||
},
|
||||
"anthropic:claude-v1": {
|
||||
"id": "anthropic:claude-v1",
|
||||
"provider": "anthropic",
|
||||
"providerHumanName": "Anthropic",
|
||||
"makerHumanName": "Anthropic",
|
||||
"minBillingTier": "hobby",
|
||||
"parameters": {
|
||||
"temperature": {"value": 1, "range": [0, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"topK": {"value": 1, "range": [1, 500]},
|
||||
"presencePenalty": {"value": 1, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 1, "range": [0, 1]},
|
||||
"stopSequences": {"value": ["\n\nHuman:"], "range": []},
|
||||
},
|
||||
"name": "claude-v1",
|
||||
},
|
||||
"replicate:replicate/alpaca-7b": {
|
||||
"id": "replicate:replicate/alpaca-7b",
|
||||
"provider": "replicate",
|
||||
"providerHumanName": "Replicate",
|
||||
"makerHumanName": "Stanford",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.75, "range": [0.01, 5]},
|
||||
"maximumLength": {"value": 200, "range": [50, 512]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"repetitionPenalty": {"value": 1.1765, "range": [0.01, 5]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
"version": "2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21",
|
||||
"name": "alpaca-7b",
|
||||
},
|
||||
"replicate:stability-ai/stablelm-tuned-alpha-7b": {
|
||||
"id": "replicate:stability-ai/stablelm-tuned-alpha-7b",
|
||||
"provider": "replicate",
|
||||
"makerHumanName": "StabilityAI",
|
||||
"providerHumanName": "Replicate",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.75, "range": [0.01, 5]},
|
||||
"maximumLength": {"value": 200, "range": [50, 512]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"repetitionPenalty": {"value": 1.1765, "range": [0.01, 5]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
"version": "4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b",
|
||||
"name": "stablelm-tuned-alpha-7b",
|
||||
},
|
||||
"huggingface:bigscience/bloom": {
|
||||
"id": "huggingface:bigscience/bloom",
|
||||
"provider": "huggingface",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"makerHumanName": "BigScience",
|
||||
"instructions": "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 0.99]},
|
||||
"topK": {"value": 4, "range": [1, 500]},
|
||||
"repetitionPenalty": {"value": 1.03, "range": [0.1, 2]},
|
||||
},
|
||||
"name": "bloom",
|
||||
},
|
||||
"huggingface:bigscience/bloomz": {
|
||||
"id": "huggingface:bigscience/bloomz",
|
||||
"provider": "huggingface",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"makerHumanName": "BigScience",
|
||||
"instructions": 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".',
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 0.99]},
|
||||
"topK": {"value": 4, "range": [1, 500]},
|
||||
"repetitionPenalty": {"value": 1.03, "range": [0.1, 2]},
|
||||
},
|
||||
"name": "bloomz",
|
||||
},
|
||||
"huggingface:google/flan-t5-xxl": {
|
||||
"id": "huggingface:google/flan-t5-xxl",
|
||||
"provider": "huggingface",
|
||||
"makerHumanName": "Google",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"name": "flan-t5-xxl",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 0.99]},
|
||||
"topK": {"value": 4, "range": [1, 500]},
|
||||
"repetitionPenalty": {"value": 1.03, "range": [0.1, 2]},
|
||||
},
|
||||
},
|
||||
"huggingface:google/flan-ul2": {
|
||||
"id": "huggingface:google/flan-ul2",
|
||||
"provider": "huggingface",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"makerHumanName": "Google",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 0.99]},
|
||||
"topK": {"value": 4, "range": [1, 500]},
|
||||
"repetitionPenalty": {"value": 1.03, "range": [0.1, 2]},
|
||||
},
|
||||
"name": "flan-ul2",
|
||||
},
|
||||
"huggingface:EleutherAI/gpt-neox-20b": {
|
||||
"id": "huggingface:EleutherAI/gpt-neox-20b",
|
||||
"provider": "huggingface",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"makerHumanName": "EleutherAI",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 0.99]},
|
||||
"topK": {"value": 4, "range": [1, 500]},
|
||||
"repetitionPenalty": {"value": 1.03, "range": [0.1, 2]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
"name": "gpt-neox-20b",
|
||||
},
|
||||
"huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": {
|
||||
"id": "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
"provider": "huggingface",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"makerHumanName": "OpenAssistant",
|
||||
"parameters": {
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"typicalP": {"value": 0.2, "range": [0.1, 0.99]},
|
||||
"repetitionPenalty": {"value": 1, "range": [0.1, 2]},
|
||||
},
|
||||
"name": "oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
},
|
||||
"huggingface:bigcode/santacoder": {
|
||||
"id": "huggingface:bigcode/santacoder",
|
||||
"provider": "huggingface",
|
||||
"providerHumanName": "HuggingFace",
|
||||
"makerHumanName": "BigCode",
|
||||
"instructions": 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.',
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 0.95, "range": [0.01, 0.99]},
|
||||
"topK": {"value": 4, "range": [1, 500]},
|
||||
"repetitionPenalty": {"value": 1.03, "range": [0.1, 2]},
|
||||
},
|
||||
"name": "santacoder",
|
||||
},
|
||||
"cohere:command-medium-nightly": {
|
||||
"id": "cohere:command-medium-nightly",
|
||||
"provider": "cohere",
|
||||
"providerHumanName": "Cohere",
|
||||
"makerHumanName": "Cohere",
|
||||
"name": "command-medium-nightly",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.9, "range": [0, 2]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0, 1]},
|
||||
"topK": {"value": 0, "range": [0, 500]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"cohere:command-xlarge-nightly": {
|
||||
"id": "cohere:command-xlarge-nightly",
|
||||
"provider": "cohere",
|
||||
"providerHumanName": "Cohere",
|
||||
"makerHumanName": "Cohere",
|
||||
"name": "command-xlarge-nightly",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.9, "range": [0, 2]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0, 1]},
|
||||
"topK": {"value": 0, "range": [0, 500]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"openai:gpt-4": {
|
||||
"id": "openai:gpt-4",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"name": "gpt-4",
|
||||
"minBillingTier": "pro",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.7, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"openai:code-cushman-001": {
|
||||
"id": "openai:code-cushman-001",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
"name": "code-cushman-001",
|
||||
},
|
||||
"openai:code-davinci-002": {
|
||||
"id": "openai:code-davinci-002",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
"name": "code-davinci-002",
|
||||
},
|
||||
"openai:gpt-3.5-turbo": {
|
||||
"id": "openai:gpt-3.5-turbo",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.7, "range": [0, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"topK": {"value": 1, "range": [1, 500]},
|
||||
"presencePenalty": {"value": 1, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 1, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
"name": "gpt-3.5-turbo",
|
||||
},
|
||||
"openai:text-ada-001": {
|
||||
"id": "openai:text-ada-001",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"name": "text-ada-001",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"openai:text-babbage-001": {
|
||||
"id": "openai:text-babbage-001",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"name": "text-babbage-001",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"openai:text-curie-001": {
|
||||
"id": "openai:text-curie-001",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"name": "text-curie-001",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"openai:text-davinci-002": {
|
||||
"id": "openai:text-davinci-002",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"name": "text-davinci-002",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
"openai:text-davinci-003": {
|
||||
"id": "openai:text-davinci-003",
|
||||
"provider": "openai",
|
||||
"providerHumanName": "OpenAI",
|
||||
"makerHumanName": "OpenAI",
|
||||
"name": "text-davinci-003",
|
||||
"parameters": {
|
||||
"temperature": {"value": 0.5, "range": [0.1, 1]},
|
||||
"maximumLength": {"value": 200, "range": [50, 1024]},
|
||||
"topP": {"value": 1, "range": [0.1, 1]},
|
||||
"presencePenalty": {"value": 0, "range": [0, 1]},
|
||||
"frequencyPenalty": {"value": 0, "range": [0, 1]},
|
||||
"stopSequences": {"value": [], "range": []},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# based on https://github.com/ading2210/vercel-llm-api // modified
|
||||
class Client:
|
||||
def __init__(self):
|
||||
self.session = requests.Session()
|
||||
self.headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36",
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Accept-Language": "en-US,en;q=0.5",
|
||||
"Te": "trailers",
|
||||
"Upgrade-Insecure-Requests": "1",
|
||||
}
|
||||
self.session.headers.update(self.headers)
|
||||
|
||||
def get_token(self):
|
||||
b64 = self.session.get("https://sdk.vercel.ai/openai.jpeg").text
|
||||
data = json.loads(base64.b64decode(b64))
|
||||
|
||||
code = "const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}" % (data["c"], data["a"])
|
||||
|
||||
token_string = json.dumps(
|
||||
separators=(",", ":"),
|
||||
obj={"r": execjs.compile(code).call("token"), "t": data["t"]},
|
||||
)
|
||||
|
||||
return base64.b64encode(token_string.encode()).decode()
|
||||
|
||||
def get_default_params(self, model_id):
|
||||
return {key: param["value"] for key, param in vercel_models[model_id]["parameters"].items()}
|
||||
|
||||
def generate(self, model_id: str, prompt: str, params: dict = {}):
|
||||
if not ":" in model_id:
|
||||
model_id = models[model_id]
|
||||
|
||||
defaults = self.get_default_params(model_id)
|
||||
|
||||
payload = (
|
||||
defaults
|
||||
| params
|
||||
| {
|
||||
"prompt": prompt,
|
||||
"model": model_id,
|
||||
}
|
||||
)
|
||||
|
||||
headers = self.headers | {
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Custom-Encoding": self.get_token(),
|
||||
"Host": "sdk.vercel.ai",
|
||||
"Origin": "https://sdk.vercel.ai",
|
||||
"Referrer": "https://sdk.vercel.ai",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
}
|
||||
|
||||
chunks_queue = queue.Queue()
|
||||
error = None
|
||||
response = None
|
||||
|
||||
def callback(data):
|
||||
chunks_queue.put(data.decode())
|
||||
|
||||
def request_thread():
|
||||
nonlocal response, error
|
||||
for _ in range(3):
|
||||
try:
|
||||
response = self.session.post(
|
||||
"https://sdk.vercel.ai/api/generate",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
content_callback=callback,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
except Exception as e:
|
||||
if _ == 2:
|
||||
error = e
|
||||
|
||||
else:
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=request_thread, daemon=True)
|
||||
thread.start()
|
||||
|
||||
text = ""
|
||||
index = 0
|
||||
while True:
|
||||
try:
|
||||
chunk = chunks_queue.get(block=True, timeout=0.1)
|
||||
|
||||
except queue.Empty:
|
||||
if error:
|
||||
raise error
|
||||
|
||||
elif response:
|
||||
break
|
||||
|
||||
else:
|
||||
continue
|
||||
|
||||
text += chunk
|
||||
lines = text.split("\n")
|
||||
|
||||
if len(lines) - 1 > index:
|
||||
new = lines[index:-1]
|
||||
for word in new:
|
||||
yield json.loads(word)
|
||||
index = len(lines) - 1
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
yield "Vercel is currently not working."
|
||||
return
|
||||
|
||||
conversation = "This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n"
|
||||
|
||||
for message in messages:
|
||||
conversation += "%s: %s\n" % (message["role"], message["content"])
|
||||
|
||||
conversation += "assistant: "
|
||||
|
||||
completion = Client().generate(model, conversation)
|
||||
|
||||
for token in completion:
|
||||
yield token
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
39
chat_gpt_microservice/g4f/Provider/Providers/Weuseing.py
Normal file
39
chat_gpt_microservice/g4f/Provider/Providers/Weuseing.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://api.gptplus.one"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4",
|
||||
}
|
||||
data = {
|
||||
"messages": messages,
|
||||
"model": model,
|
||||
}
|
||||
response = requests.post("https://api.gptplus.one/chat-process", json=data, stream=True)
|
||||
print(response)
|
||||
|
||||
for token in response.iter_content(chunk_size=None):
|
||||
yield (token.decode("utf-8"))
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
72
chat_gpt_microservice/g4f/Provider/Providers/Wewordle.py
Normal file
72
chat_gpt_microservice/g4f/Provider/Providers/Wewordle.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://wewordle.org/gptapi/v1/android/turbo"
|
||||
model = ["gpt-3.5-turbo"]
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
base = ""
|
||||
for message in messages:
|
||||
base += "%s: %s\n" % (message["role"], message["content"])
|
||||
base += "assistant:"
|
||||
# randomize user id and app id
|
||||
_user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
|
||||
_app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
|
||||
# make current date with format utc
|
||||
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"pragma": "no-cache",
|
||||
"Content-Type": "application/json",
|
||||
"Connection": "keep-alive",
|
||||
}
|
||||
data = {
|
||||
"user": _user_id,
|
||||
"messages": [{"role": "user", "content": base}],
|
||||
"subscriber": {
|
||||
"originalPurchaseDate": None,
|
||||
"originalApplicationVersion": None,
|
||||
"allPurchaseDatesMillis": {},
|
||||
"entitlements": {"active": {}, "all": {}},
|
||||
"allPurchaseDates": {},
|
||||
"allExpirationDatesMillis": {},
|
||||
"allExpirationDates": {},
|
||||
"originalAppUserId": f"$RCAnonymousID:{_app_id}",
|
||||
"latestExpirationDate": None,
|
||||
"requestDate": _request_date,
|
||||
"latestExpirationDateMillis": None,
|
||||
"nonSubscriptionTransactions": [],
|
||||
"originalPurchaseDateMillis": None,
|
||||
"managementURL": None,
|
||||
"allPurchasedProductIdentifiers": [],
|
||||
"firstSeen": _request_date,
|
||||
"activeSubscriptions": [],
|
||||
},
|
||||
}
|
||||
response = requests.post(url, headers=headers, data=json.dumps(data))
|
||||
if response.status_code == 200:
|
||||
_json = response.json()
|
||||
if "message" in _json:
|
||||
message_content = _json["message"]["content"]
|
||||
message_content = message_content.replace("**assistant:** ", "")
|
||||
yield message_content
|
||||
else:
|
||||
print(f"Error Occurred::{response.status_code}")
|
||||
return None
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
46
chat_gpt_microservice/g4f/Provider/Providers/Xiaor.py
Normal file
46
chat_gpt_microservice/g4f/Provider/Providers/Xiaor.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import json
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://xiaor.eu.org"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {
|
||||
"model": model,
|
||||
"temperature": 0.7,
|
||||
"presence_penalty": 0,
|
||||
"messages": messages,
|
||||
}
|
||||
response = requests.post(url + "/p1/v1/chat/completions", json=data, stream=True)
|
||||
|
||||
if stream:
|
||||
for chunk in response.iter_content(chunk_size=None):
|
||||
chunk = chunk.decode("utf-8")
|
||||
if chunk.strip():
|
||||
message = json.loads(chunk)["choices"][0]["message"]["content"]
|
||||
yield message
|
||||
else:
|
||||
message = response.json()["choices"][0]["message"]["content"]
|
||||
yield message
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
20
chat_gpt_microservice/g4f/Provider/Providers/You.py
Normal file
20
chat_gpt_microservice/g4f/Provider/Providers/You.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
url = "https://you.com"
|
||||
model = "gpt-3.5-turbo"
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({"messages": messages}, separators=(",", ":"))
|
||||
|
||||
cmd = ["python3", f"{path}/helpers/you.py", config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b""):
|
||||
yield line.decode("utf-8") # [:-1]
|
||||
45
chat_gpt_microservice/g4f/Provider/Providers/Yqcloud.py
Normal file
45
chat_gpt_microservice/g4f/Provider/Providers/Yqcloud.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://chat9.yqcloud.top/"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
|
||||
headers = {
|
||||
"authority": "api.aichatos.cloud",
|
||||
"origin": "https://chat9.yqcloud.top",
|
||||
"referer": "https://chat9.yqcloud.top/",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"prompt": str(messages),
|
||||
"userId": f"#/chat/{chatId}",
|
||||
"network": True,
|
||||
"apikey": "",
|
||||
"system": "",
|
||||
"withoutContext": False,
|
||||
}
|
||||
response = requests.post(
|
||||
"https://api.aichatos.cloud/api/generateStream",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
for token in response.iter_content(chunk_size=2046):
|
||||
yield (token.decode("utf-8"))
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
63
chat_gpt_microservice/g4f/Provider/Providers/Zeabur.py
Normal file
63
chat_gpt_microservice/g4f/Provider/Providers/Zeabur.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://gptleg.zeabur.app"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
"Authority": "chat.dfehub.com",
|
||||
"Content-Type": "application/json",
|
||||
"Method": "POST",
|
||||
"Path": "/api/openai/v1/chat/completions",
|
||||
"Scheme": "https",
|
||||
"Accept": "text/event-stream",
|
||||
"Accept-Language": "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": "https://gptleg.zeabur.app",
|
||||
"Referer": "https://gptleg.zeabur.app/",
|
||||
"Sec-Ch-Ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"Sec-Ch-Ua-Mobile": "?0",
|
||||
"Sec-Ch-Ua-Platform": '"Windows"',
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
}
|
||||
|
||||
data = {
|
||||
"model": model,
|
||||
"temperature": 0.7,
|
||||
"max_tokens": "16000",
|
||||
"presence_penalty": 0,
|
||||
"messages": messages,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
url + "/api/openai/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=data,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
yield response.json()["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
@@ -0,0 +1,53 @@
|
||||
import json
|
||||
import sys
|
||||
from re import findall
|
||||
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
prompt = config["messages"][-1]["content"]
|
||||
|
||||
headers = {
|
||||
"authority": "api.gptplus.one",
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4",
|
||||
"content-type": "application/octet-stream",
|
||||
"origin": "https://ai.gptforlove.com/",
|
||||
"referer": "https://ai.gptforlove.com/",
|
||||
"sec-ch-ua": '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "cross-site",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
json_data = {"prompt": prompt, "options": {}}
|
||||
|
||||
|
||||
def format(chunk):
|
||||
try:
|
||||
completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
|
||||
print(completion_chunk, flush=True, end="")
|
||||
|
||||
except Exception:
|
||||
print(f"[ERROR] an error occured, retrying... | [[{chunk.decode()}]]", flush=True)
|
||||
return
|
||||
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = requests.post(
|
||||
"https://api.gptplus.one/api/chat-process",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
content_callback=format,
|
||||
impersonate="chrome110",
|
||||
)
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print("[ERROR] an error occured, retrying... |", e, flush=True)
|
||||
continue
|
||||
@@ -0,0 +1,81 @@
|
||||
import datetime
|
||||
import json
|
||||
import sys
|
||||
import urllib.parse
|
||||
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
prompt = config["messages"][-1]["content"]
|
||||
|
||||
skill = "expert" if config["model"] == "gpt-4" else "intermediate"
|
||||
|
||||
json_data = json.dumps(
|
||||
{
|
||||
"question": prompt,
|
||||
"options": {
|
||||
"skill": skill,
|
||||
"date": datetime.datetime.now().strftime("%d/%m/%Y"),
|
||||
"language": "en",
|
||||
"detailed": True,
|
||||
"creative": True,
|
||||
"customLinks": [],
|
||||
},
|
||||
},
|
||||
separators=(",", ":"),
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Pragma": "no-cache",
|
||||
"Accept": "*/*",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Accept-Language": "en-GB,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Content-Length": str(len(json_data)),
|
||||
"Origin": "https://www.phind.com",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15",
|
||||
"Referer": f"https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox",
|
||||
"Connection": "keep-alive",
|
||||
"Host": "www.phind.com",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
}
|
||||
|
||||
|
||||
def output(chunk):
|
||||
try:
|
||||
if b"PHIND_METADATA" in chunk:
|
||||
return
|
||||
|
||||
if chunk == b"data: \r\ndata: \r\ndata: \r\n\r\n":
|
||||
chunk = b"data: \n\r\n\r\n"
|
||||
|
||||
chunk = chunk.decode()
|
||||
|
||||
chunk = chunk.replace("data: \r\n\r\ndata: ", "data: \n")
|
||||
chunk = chunk.replace("\r\ndata: \r\ndata: \r\n\r\n", "\n\r\n\r\n")
|
||||
chunk = chunk.replace("data: ", "").replace("\r\n\r\n", "")
|
||||
|
||||
print(chunk, flush=True, end="")
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
pass
|
||||
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = requests.post(
|
||||
"https://www.phind.com/api/infer/answer",
|
||||
headers=headers,
|
||||
data=json_data,
|
||||
content_callback=output,
|
||||
timeout=999999,
|
||||
impersonate="safari15_5",
|
||||
)
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print("an error occured, retrying... |", e, flush=True)
|
||||
continue
|
||||
53
chat_gpt_microservice/g4f/Provider/Providers/helpers/theb.py
Normal file
53
chat_gpt_microservice/g4f/Provider/Providers/helpers/theb.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import json
|
||||
import sys
|
||||
from re import findall
|
||||
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
prompt = config["messages"][-1]["content"]
|
||||
|
||||
headers = {
|
||||
"authority": "chatbot.theb.ai",
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://chatbot.theb.ai",
|
||||
"referer": "https://chatbot.theb.ai/",
|
||||
"sec-ch-ua": '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
json_data = {"prompt": prompt, "options": {}}
|
||||
|
||||
|
||||
def format(chunk):
|
||||
try:
|
||||
completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
|
||||
print(completion_chunk, flush=True, end="")
|
||||
|
||||
except Exception:
|
||||
print(f"[ERROR] an error occured, retrying... | [[{chunk.decode()}]]", flush=True)
|
||||
return
|
||||
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = requests.post(
|
||||
"https://chatbot.theb.ai/api/chat-process",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
content_callback=format,
|
||||
impersonate="chrome110",
|
||||
)
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print("[ERROR] an error occured, retrying... |", e, flush=True)
|
||||
continue
|
||||
82
chat_gpt_microservice/g4f/Provider/Providers/helpers/you.py
Normal file
82
chat_gpt_microservice/g4f/Provider/Providers/helpers/you.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import json
|
||||
import sys
|
||||
import urllib.parse
|
||||
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
messages = config["messages"]
|
||||
prompt = ""
|
||||
|
||||
|
||||
def transform(messages: list) -> list:
|
||||
result = []
|
||||
i = 0
|
||||
|
||||
while i < len(messages):
|
||||
if messages[i]["role"] == "user":
|
||||
question = messages[i]["content"]
|
||||
i += 1
|
||||
|
||||
if i < len(messages) and messages[i]["role"] == "assistant":
|
||||
answer = messages[i]["content"]
|
||||
i += 1
|
||||
else:
|
||||
answer = ""
|
||||
|
||||
result.append({"question": question, "answer": answer})
|
||||
|
||||
elif messages[i]["role"] == "assistant":
|
||||
result.append({"question": "", "answer": messages[i]["content"]})
|
||||
i += 1
|
||||
|
||||
elif messages[i]["role"] == "system":
|
||||
result.append({"question": messages[i]["content"], "answer": ""})
|
||||
i += 1
|
||||
|
||||
return result
|
||||
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Accept-Language": "en-GB,en;q=0.9",
|
||||
"Sec-Fetch-Mode": "navigate",
|
||||
"Host": "you.com",
|
||||
"Origin": "https://you.com",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15",
|
||||
"Referer": "https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "document",
|
||||
"Priority": "u=0, i",
|
||||
}
|
||||
|
||||
if messages[-1]["role"] == "user":
|
||||
prompt = messages[-1]["content"]
|
||||
messages = messages[:-1]
|
||||
|
||||
params = urllib.parse.urlencode({"q": prompt, "domain": "youchat", "chat": transform(messages)})
|
||||
|
||||
|
||||
def output(chunk):
|
||||
if b'"youChatToken"' in chunk:
|
||||
chunk_json = json.loads(chunk.decode().split("data: ")[1])
|
||||
|
||||
print(chunk_json["youChatToken"], flush=True, end="")
|
||||
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = requests.get(
|
||||
f"https://you.com/api/streamingSearch?{params}",
|
||||
headers=headers,
|
||||
content_callback=output,
|
||||
impersonate="safari15_5",
|
||||
)
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print("an error occured, retrying... |", e, flush=True)
|
||||
continue
|
||||
44
chat_gpt_microservice/g4f/Provider/Providers/hteyun.py
Normal file
44
chat_gpt_microservice/g4f/Provider/Providers/hteyun.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import os
|
||||
from typing import get_type_hints
|
||||
|
||||
import requests
|
||||
|
||||
url = "https://hteyun.com"
|
||||
model = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-0613",
|
||||
]
|
||||
supports_stream = True
|
||||
needs_auth = False
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Accept-Language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4",
|
||||
"Origin": "https://hteyun.com",
|
||||
"Referer": "https://hteyun.com/chat/",
|
||||
}
|
||||
data = {
|
||||
"messages": messages,
|
||||
"model": model,
|
||||
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using russian language.",
|
||||
"temperature": 0.7,
|
||||
"presence_penalty": 0,
|
||||
}
|
||||
response = requests.post(url + "/api/chat-stream", json=data, headers=headers, stream=True)
|
||||
print(response.json())
|
||||
|
||||
# Извлечение текста из response
|
||||
return response.json()["text"]
|
||||
|
||||
|
||||
params = f"g4f.Providers.{os.path.basename(__file__)[:-3]} supports: " + "(%s)" % ", ".join(
|
||||
[
|
||||
f"{name}: {get_type_hints(_create_completion)[name].__name__}"
|
||||
for name in _create_completion.__code__.co_varnames[: _create_completion.__code__.co_argcount]
|
||||
]
|
||||
)
|
||||
35
chat_gpt_microservice/g4f/Provider/__init__.py
Normal file
35
chat_gpt_microservice/g4f/Provider/__init__.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from . import Provider
|
||||
from .Providers import (
|
||||
Aichat,
|
||||
Ails,
|
||||
AiService,
|
||||
Bard,
|
||||
Better,
|
||||
Bing,
|
||||
ChatFree,
|
||||
ChatgptAi,
|
||||
ChatgptLogin,
|
||||
DeepAi,
|
||||
Easychat,
|
||||
Ezcht,
|
||||
Fakeopen,
|
||||
Forefront,
|
||||
GetGpt,
|
||||
Gravityengine,
|
||||
H2o,
|
||||
Liaobots,
|
||||
Lockchat,
|
||||
Mishalsgpt,
|
||||
Phind,
|
||||
Theb,
|
||||
Vercel,
|
||||
Weuseing,
|
||||
Wewordle,
|
||||
Xiaor,
|
||||
You,
|
||||
Yqcloud,
|
||||
Zeabur,
|
||||
hteyun,
|
||||
)
|
||||
|
||||
Palm = Bard
|
||||
Reference in New Issue
Block a user