フォーク元 g4f/gpt4free
コミットを比較
10 コミット
作成者 | SHA1 | 日付 |
---|---|---|
g4f | cdf4e72541 | |
bkutasi | 2a2e05d4dc | |
bkutasi | bc309e2855 | |
abc | d21dfa3293 | |
abc | 6fc11056fa | |
abc | 240d6cf87a | |
abc | f10d74eb6a | |
abc | abaed3da53 | |
abc | 61d14252fd | |
abc | e4871265e8 |
32
README.md
32
README.md
|
@ -29,20 +29,32 @@ for token in chat_completion:
|
|||
|
||||
providers:
|
||||
```py
|
||||
g4f.Providers.You
|
||||
g4f.Providers.Ails
|
||||
g4f.Providers.Phind
|
||||
from g4f.Provider import (
|
||||
Phind,
|
||||
You,
|
||||
Bing,
|
||||
Openai,
|
||||
Yqcloud,
|
||||
Theb,
|
||||
Aichat,
|
||||
Ora,
|
||||
Aws,
|
||||
Bard,
|
||||
Vercel,
|
||||
Pierangelo,
|
||||
Forefront
|
||||
)
|
||||
|
||||
|
||||
# usage:
|
||||
|
||||
response = g4f.ChatCompletion.create(..., provider=g4f.Providers.ProviderName)
|
||||
response = g4f.ChatCompletion.create(..., provider=ProviderName)
|
||||
```
|
||||
|
||||
```py
|
||||
import g4f
|
||||
|
||||
|
||||
print(g4f.Providers.Ails.params) # supported args
|
||||
print(g4f.Provider.Ails.params) # supported args
|
||||
|
||||
# Automatic selection of provider
|
||||
|
||||
|
@ -61,7 +73,7 @@ print(response)
|
|||
|
||||
|
||||
# Set with provider
|
||||
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Phind, messages=[
|
||||
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.Openai, messages=[
|
||||
{"role": "user", "content": "Hello world"}], stream=True)
|
||||
|
||||
for message in response:
|
||||
|
@ -71,11 +83,11 @@ for message in response:
|
|||
### Dev
|
||||
|
||||
(more instructions soon)
|
||||
the `g4f.Providers`class
|
||||
the `g4f.Provider`class
|
||||
|
||||
default:
|
||||
|
||||
`./g4f/Providers/ProviderName.py`:
|
||||
`./g4f/Provider/Providers/ProviderName.py`:
|
||||
```python
|
||||
import os
|
||||
|
||||
|
@ -89,6 +101,6 @@ def _create_completion(prompt: str, args...):
|
|||
yield ...
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
params = f'g4f.Provider.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
||||
```
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
import g4f
|
||||
import sys
|
||||
|
||||
# Automatic selection of provider, streamed completion
|
||||
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo',
|
||||
messages=[{"role": "user",
|
||||
"content": "Write a poem about a tree."}],
|
||||
stream=True)
|
||||
|
||||
for message in response:
|
||||
print(message, end="")
|
||||
sys.stdout.flush()
|
|
@ -0,0 +1,32 @@
|
|||
import g4f
|
||||
import sys
|
||||
|
||||
# Provider selection
|
||||
provider=g4f.Provider.Phind
|
||||
|
||||
# Streaming is not supported by these providers
|
||||
if provider in {g4f.Provider.Aws, g4f.Provider.Ora,
|
||||
g4f.Provider.Bard, g4f.Provider.Aichat}:
|
||||
stream=False
|
||||
else:
|
||||
stream=True
|
||||
|
||||
print(provider.params) # supported args
|
||||
|
||||
# Getting the response
|
||||
response = g4f.ChatCompletion.create(model='gpt-4',
|
||||
messages=[{"role": "user",
|
||||
"content": "Write a poem about a tree."}],
|
||||
stream=stream,
|
||||
provider=provider)
|
||||
|
||||
# Printing the response
|
||||
if stream:
|
||||
for message in response:
|
||||
print(message, end="")
|
||||
sys.stdout.flush()
|
||||
print("\n")
|
||||
else:
|
||||
print(response)
|
||||
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
import openai
|
||||
import sys
|
||||
|
||||
openai.api_key = ""
|
||||
openai.api_base = "http://127.0.0.1:1337"
|
||||
|
||||
chat_completion = openai.ChatCompletion.create(stream=True,
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user",
|
||||
"content": "Write a poem about a tree."}])
|
||||
|
||||
for token in chat_completion:
|
||||
content = token["choices"][0]["delta"].get("content")
|
||||
if content is not None:
|
||||
print(content, end="")
|
||||
sys.stdout.flush()
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
import os
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = None
|
||||
model = None
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
return
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,37 @@
|
|||
import os, requests
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://chat-gpt.org/chat'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
'authority': 'chat-gpt.org',
|
||||
'accept': '*/*',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chat-gpt.org',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat-gpt.org/chat',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'message': messages[-1]['content'],
|
||||
'temperature': 1,
|
||||
'presence_penalty': 0,
|
||||
'top_p': 1,
|
||||
'frequency_penalty': 0
|
||||
}
|
||||
|
||||
response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
|
||||
yield response.json()['message']
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
import requests
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://4aiu6ctrknfxkoaigkigzh5lwm0cciuc.lambda-url.ap-east-1.on.aws/chat/completions'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = False
|
||||
|
||||
class Auth(requests.auth.AuthBase):
|
||||
def __init__(self):
|
||||
self.token = 'sk-1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL'
|
||||
|
||||
def __call__(self, r):
|
||||
r.headers["authorization"] = "Bearer " + self.token
|
||||
return r
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
response = requests.post(url,
|
||||
auth=Auth(), json={"model": model,"messages": messages})
|
||||
|
||||
yield (response.json()['choices'][0]['message']['content'])
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,76 @@
|
|||
# implement proxy argument
|
||||
|
||||
import os, requests, json, browser_cookie3, re, random
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://bard.google.com'
|
||||
model = ['Palm2']
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
|
||||
domain_name='.google.com')}['__Secure-1PSID']
|
||||
|
||||
formatted = '\n'.join([
|
||||
'%s: %s' % (message['role'], message['content']) for message in messages
|
||||
])
|
||||
prompt = f'{formatted}\nAssistant:'
|
||||
|
||||
proxy = None
|
||||
|
||||
if proxy == None:
|
||||
raise Exception('Proxy is required for Bard (set in g4f/Provider/Providers/Bard.py line 18)')
|
||||
|
||||
snlm0e = False
|
||||
conversation_id = None
|
||||
response_id = None
|
||||
choice_id = None
|
||||
|
||||
client = requests.Session()
|
||||
client.proxies = {
|
||||
'http': f'https://{proxy}',
|
||||
'https': f'https://{proxy}'} if proxy else None
|
||||
|
||||
client.headers = {
|
||||
'authority': 'bard.google.com',
|
||||
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
|
||||
'origin': 'https://bard.google.com',
|
||||
'referer': 'https://bard.google.com/',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||
'x-same-domain': '1',
|
||||
'cookie': f'__Secure-1PSID={psid}'
|
||||
}
|
||||
|
||||
snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
|
||||
client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
|
||||
|
||||
params = {
|
||||
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
|
||||
'_reqid': random.randint(1111, 9999),
|
||||
'rt': 'c'
|
||||
}
|
||||
|
||||
data = {
|
||||
'at': snlm0e,
|
||||
'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
|
||||
|
||||
intents = '.'.join([
|
||||
'assistant',
|
||||
'lamda',
|
||||
'BardFrontendService'
|
||||
])
|
||||
|
||||
response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
|
||||
data=data, params=params)
|
||||
|
||||
chat_data = json.loads(response.content.splitlines()[3])[0][2]
|
||||
if chat_data:
|
||||
json_chat_data = json.loads(chat_data)
|
||||
|
||||
yield json_chat_data[0][0]
|
||||
|
||||
else:
|
||||
yield 'error'
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,28 @@
|
|||
import os
|
||||
import json
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://bing.com/chat'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
'messages': messages,
|
||||
'model': model}, separators=(',', ':'))
|
||||
|
||||
cmd = ['python3', f'{path}/helpers/bing.py', config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
#print(line)
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,36 @@
|
|||
import os
|
||||
import json
|
||||
import requests
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'forefront.com'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
json_data = {
|
||||
'text': messages[-1]['content'],
|
||||
'action': 'noauth',
|
||||
'id': '',
|
||||
'parentId': '',
|
||||
'workspaceId': '',
|
||||
'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
|
||||
'model': 'gpt-4',
|
||||
'messages': messages[:-1] if len(messages) > 1 else [],
|
||||
'internetMode': 'auto'
|
||||
}
|
||||
|
||||
response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
|
||||
json=json_data, stream=True)
|
||||
|
||||
for token in response.iter_lines():
|
||||
if b'delta' in token:
|
||||
token = json.loads(token.decode().split('data: ')[1])['delta']
|
||||
yield (token)
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
import os
|
||||
import json
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://chat.openai.com/chat'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
'messages': messages,
|
||||
'model': model}, separators=(',', ':'))
|
||||
|
||||
cmd = ['python3', f'{path}/helpers/openai.py', config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,42 @@
|
|||
import os, requests, uuid
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://ora.ai'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
'authority': 'ora.ai',
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://ora.ai',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://ora.ai/chat/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'chatbotId': 'adb2b793-e667-46b9-8d80-114eaa9a4c40',
|
||||
'input': messages[-1]['content'],
|
||||
'userId': f'auto:{uuid.uuid4()}',
|
||||
'provider': 'OPEN_AI',
|
||||
'config': False,
|
||||
'includeHistory': False
|
||||
}
|
||||
|
||||
response = requests.post('https://ora.ai/api/conversation',
|
||||
headers=headers, json=json_data)
|
||||
|
||||
yield response.json()['response']
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -3,13 +3,17 @@ import json
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
url = None
|
||||
model = None
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://phind.com'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
'model': model,
|
||||
'messages': messages}, separators=(',', ':'))
|
||||
|
||||
cmd = ['python3', f'{path}/helpers/phind.py', config]
|
||||
|
@ -26,4 +30,7 @@ def _create_completion(model: str, messages: list, **kwargs):
|
|||
if b'ping - 2023-' in line:
|
||||
continue
|
||||
|
||||
yield line.decode('utf-8')[:-1]
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,55 @@
|
|||
import os
|
||||
import requests
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://chat.pierangelo.info'
|
||||
model = ['gpt-4', 'gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
models = {
|
||||
'gpt-4': {
|
||||
'id':'gpt-4',
|
||||
'name':'GPT-4'
|
||||
},
|
||||
'gpt-3.5-turbo': {
|
||||
'id':'gpt-3.5-turbo',
|
||||
'name':'GPT-3.5'
|
||||
}
|
||||
}
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
headers = {
|
||||
'authority': 'chat.pierangelo.info',
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chat.pierangelo.info',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat.pierangelo.info/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'model': models[model],
|
||||
'messages': messages,
|
||||
'key': '',
|
||||
'prompt': "You are ChatGPT, a large language model trained by OpenAI. Answer consisely",
|
||||
'temperature': 0.7
|
||||
}
|
||||
|
||||
response = requests.post('https://chat.pierangelo.info/api/chat',
|
||||
headers=headers, json=json_data, stream=True)
|
||||
|
||||
for token in response:
|
||||
yield (token)
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,28 @@
|
|||
import os
|
||||
import json
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://theb.ai'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
'messages': messages,
|
||||
'model': model}, separators=(',', ':'))
|
||||
|
||||
cmd = ['python3', f'{path}/helpers/theb.py', config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
長すぎる行があるためファイル差分は表示されません
|
@ -3,10 +3,13 @@ import json
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://you.com'
|
||||
model = 'gpt-3.5-turbo'
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
|
@ -17,4 +20,4 @@ def _create_completion(model: str, messages: list, **kwargs):
|
|||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8')[:-1]
|
||||
yield line.decode('utf-8') #[:-1]
|
|
@ -0,0 +1,32 @@
|
|||
import os
|
||||
import time
|
||||
import requests
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
headers = {
|
||||
'authority': 'api.aichatos.cloud',
|
||||
'origin': 'https://chat9.yqcloud.top',
|
||||
'referer': 'https://chat9.yqcloud.top/',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'prompt': 'always respond in english | %s' % messages[-1]['content'],
|
||||
'userId': f'#/chat/{int(time.time() * 1000)}',
|
||||
'network': True,
|
||||
'apikey': '',
|
||||
'system': '',
|
||||
'withoutContext': False,
|
||||
}
|
||||
|
||||
response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
|
||||
for token in response.iter_content(chunk_size=2046):
|
||||
if not b'always respond in english' in token:
|
||||
yield (token.decode('utf-8'))
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,220 @@
|
|||
import sys
|
||||
import ssl
|
||||
import uuid
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import asyncio
|
||||
import certifi
|
||||
import requests
|
||||
import websockets
|
||||
import browser_cookie3
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.load_verify_locations(certifi.where())
|
||||
|
||||
|
||||
|
||||
conversationstyles = {
|
||||
'gpt-4': [ #'precise'
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
"h3imaginative",
|
||||
"travelansgnd",
|
||||
"dv3sugg",
|
||||
"clgalileo",
|
||||
"gencontentv3",
|
||||
"dv3sugg",
|
||||
"responseos",
|
||||
"e2ecachewrite",
|
||||
"cachewriteext",
|
||||
"nodlcpcwrite",
|
||||
"travelansgnd",
|
||||
"nojbfedge",
|
||||
],
|
||||
'balanced': [
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
"galileo",
|
||||
"dv3sugg",
|
||||
"responseos",
|
||||
"e2ecachewrite",
|
||||
"cachewriteext",
|
||||
"nodlcpcwrite",
|
||||
"travelansgnd",
|
||||
"nojbfedge",
|
||||
],
|
||||
'gpt-3.5-turbo': [ #'precise'
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
"galileo",
|
||||
"dv3sugg",
|
||||
"responseos",
|
||||
"e2ecachewrite",
|
||||
"cachewriteext",
|
||||
"nodlcpcwrite",
|
||||
"travelansgnd",
|
||||
"h3precise",
|
||||
"clgalileo",
|
||||
"nojbfedge",
|
||||
]
|
||||
}
|
||||
|
||||
def format(msg: dict) -> str:
|
||||
return json.dumps(msg) + '\x1e'
|
||||
|
||||
def get_token():
|
||||
return
|
||||
|
||||
try:
|
||||
cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
|
||||
return cookies['_U']
|
||||
except:
|
||||
print('Error: could not find bing _U cookie in edge browser.')
|
||||
exit(1)
|
||||
|
||||
class AsyncCompletion:
|
||||
async def create(
|
||||
prompt : str = None,
|
||||
optionSets : list = None,
|
||||
token : str = None): # No auth required anymore
|
||||
|
||||
create = None
|
||||
for _ in range(5):
|
||||
try:
|
||||
create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
|
||||
headers = {
|
||||
'host': 'edgeservices.bing.com',
|
||||
'accept-encoding': 'gzip, deflate, br',
|
||||
'connection': 'keep-alive',
|
||||
'authority': 'edgeservices.bing.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'cache-control': 'max-age=0',
|
||||
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
||||
'sec-ch-ua-arch': '"x86"',
|
||||
'sec-ch-ua-bitness': '"64"',
|
||||
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
||||
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua-platform-version': '"15.0.0"',
|
||||
'sec-fetch-dest': 'document',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-user': '?1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
||||
'x-edge-shopping-flag': '1',
|
||||
'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
|
||||
}
|
||||
)
|
||||
|
||||
conversationId = create.json()['conversationId']
|
||||
clientId = create.json()['clientId']
|
||||
conversationSignature = create.json()['conversationSignature']
|
||||
|
||||
except Exception as e:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
if create == None: raise Exception('Failed to create conversation.')
|
||||
|
||||
wss: websockets.WebSocketClientProtocol or None = None
|
||||
|
||||
wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size = None, ssl = ssl_context,
|
||||
extra_headers = {
|
||||
'accept': 'application/json',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'content-type': 'application/json',
|
||||
'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
|
||||
'sec-ch-ua-arch': '"x86"',
|
||||
'sec-ch-ua-bitness': '"64"',
|
||||
'sec-ch-ua-full-version': '"109.0.1518.78"',
|
||||
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-model': "",
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua-platform-version': '"15.0.0"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'x-ms-client-request-id': str(uuid.uuid4()),
|
||||
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
|
||||
'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
|
||||
'Referrer-Policy': 'origin-when-cross-origin',
|
||||
'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
|
||||
}
|
||||
)
|
||||
|
||||
await wss.send(format({'protocol': 'json', 'version': 1}))
|
||||
await wss.recv()
|
||||
|
||||
struct = {
|
||||
'arguments': [
|
||||
{
|
||||
'source': 'cib',
|
||||
'optionsSets': optionSets,
|
||||
'isStartOfSession': True,
|
||||
'message': {
|
||||
'author': 'user',
|
||||
'inputMethod': 'Keyboard',
|
||||
'text': prompt,
|
||||
'messageType': 'Chat'
|
||||
},
|
||||
'conversationSignature': conversationSignature,
|
||||
'participant': {
|
||||
'id': clientId
|
||||
},
|
||||
'conversationId': conversationId
|
||||
}
|
||||
],
|
||||
'invocationId': '0',
|
||||
'target': 'chat',
|
||||
'type': 4
|
||||
}
|
||||
|
||||
await wss.send(format(struct))
|
||||
|
||||
base_string = ''
|
||||
|
||||
final = False
|
||||
while not final:
|
||||
objects = str(await wss.recv()).split('\x1e')
|
||||
for obj in objects:
|
||||
if obj is None or obj == '':
|
||||
continue
|
||||
|
||||
response = json.loads(obj)
|
||||
#print(response, flush=True, end='')
|
||||
if response.get('type') == 1 and response['arguments'][0].get('messages',):
|
||||
response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
|
||||
|
||||
yield (response_text.replace(base_string, ''))
|
||||
base_string = response_text
|
||||
|
||||
elif response.get('type') == 2:
|
||||
final = True
|
||||
|
||||
await wss.close()
|
||||
|
||||
async def run(optionSets, messages):
|
||||
async for value in AsyncCompletion.create(prompt=messages[-1]['content'],
|
||||
optionSets=optionSets):
|
||||
|
||||
print(value, flush=True, end = '')
|
||||
|
||||
optionSet = conversationstyles[config['model']]
|
||||
asyncio.run(run(optionSet, config['messages']))
|
|
@ -0,0 +1,106 @@
|
|||
import sys
|
||||
import uuid
|
||||
import json
|
||||
import browser_cookie3
|
||||
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
|
||||
def session_auth(cookies):
|
||||
headers = {
|
||||
'authority': 'chat.openai.com',
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat.openai.com/chat',
|
||||
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
return requests.get('https://chat.openai.com/api/auth/session',
|
||||
cookies=cookies, headers=headers, impersonate='chrome110').json()
|
||||
|
||||
all_cookies = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
|
||||
domain_name='chat.openai.com')}
|
||||
|
||||
try:
|
||||
cookies = {
|
||||
'__Secure-next-auth.session-token': all_cookies['__Secure-next-auth.session-token'],
|
||||
}
|
||||
except Exception:
|
||||
print('Failed to get "__Secure-next-auth.session-token" in chrome, please make sure you are authenticated on openai.com')
|
||||
exit(0)
|
||||
|
||||
headers = {
|
||||
'authority': 'chat.openai.com',
|
||||
'accept': 'text/event-stream',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'authorization': 'Bearer ' + session_auth(cookies)['accessToken'],
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chat.openai.com',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat.openai.com/chat',
|
||||
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
payload = {
|
||||
'action': 'next',
|
||||
'history_and_training_disabled': False,
|
||||
'messages': [
|
||||
{
|
||||
'id': str(uuid.uuid4()),
|
||||
'author': {
|
||||
'role': 'user',
|
||||
},
|
||||
'content': {
|
||||
'content_type': 'text',
|
||||
'parts': [
|
||||
config['messages'][-1]['content']
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
'model': 'text-davinci-002-render-sha',
|
||||
'parent_message_id': str(uuid.uuid4()),
|
||||
'supports_modapi': True,
|
||||
'timezone_offset_min': -60
|
||||
}
|
||||
|
||||
completion = ''
|
||||
|
||||
def format(chunk):
|
||||
try:
|
||||
global completion
|
||||
|
||||
if b'parts' in chunk:
|
||||
json_data = json.loads(chunk.decode('utf-8').split('data: ')[1])
|
||||
token = json_data['message']['content']['parts'][0]
|
||||
token = token.replace(completion, '')
|
||||
completion += token
|
||||
|
||||
print(token, flush=True, end = '')
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
for _ in range(3):
|
||||
try:
|
||||
response = requests.post('https://chat.openai.com/backend-api/conversation',
|
||||
json=payload, headers=headers, content_callback=format, impersonate='chrome110')
|
||||
break
|
||||
except:
|
||||
continue
|
|
@ -8,13 +8,15 @@ from curl_cffi import requests
|
|||
config = json.loads(sys.argv[1])
|
||||
prompt = config['messages'][-1]['content']
|
||||
|
||||
skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate'
|
||||
|
||||
json_data = json.dumps({
|
||||
'question': prompt,
|
||||
'options': {
|
||||
'skill': 'expert',
|
||||
'skill': skill,
|
||||
'date': datetime.datetime.now().strftime('%d/%m/%Y'),
|
||||
'language': 'en',
|
||||
'detailed': False,
|
||||
'detailed': True,
|
||||
'creative': True,
|
||||
'customLinks': []}}, separators=(',', ':'))
|
||||
|
||||
|
@ -37,17 +39,31 @@ headers = {
|
|||
|
||||
|
||||
def output(chunk):
|
||||
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
||||
chunk = b'data: \n\r\n\r\n'
|
||||
try:
|
||||
if b'PHIND_METADATA' in chunk:
|
||||
return
|
||||
|
||||
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
||||
chunk = b'data: \n\r\n\r\n'
|
||||
|
||||
chunk = chunk.decode()
|
||||
chunk = chunk.decode()
|
||||
|
||||
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
||||
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
|
||||
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
||||
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
||||
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
|
||||
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
||||
|
||||
print(chunk, flush=True)
|
||||
print(chunk, flush=True, end = '')
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
pass
|
||||
|
||||
|
||||
response = requests.post('https://www.phind.com/api/infer/answer',
|
||||
while True:
|
||||
try:
|
||||
response = requests.post('https://www.phind.com/api/infer/answer',
|
||||
headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print('an error occured, retrying... |', e, flush=True)
|
||||
continue
|
|
@ -0,0 +1,49 @@
|
|||
import json
|
||||
import sys
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
prompt = config['messages'][-1]['content']
|
||||
|
||||
headers = {
|
||||
'authority': 'chatbot.theb.ai',
|
||||
'accept': 'application/json, text/plain, */*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chatbot.theb.ai',
|
||||
'referer': 'https://chatbot.theb.ai/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'prompt': prompt,
|
||||
'options': {}
|
||||
}
|
||||
|
||||
def format(chunk):
|
||||
try:
|
||||
chunk_json = json.loads(chunk.decode('utf-8'))
|
||||
completion_chunk = chunk_json['detail']['choices'][0]['delta']['content']
|
||||
|
||||
print(completion_chunk, flush=True, end = '')
|
||||
|
||||
except Exception as e:
|
||||
print('[ERROR] an error occured, retrying... |', e, flush=True)
|
||||
return
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = requests.post('https://chatbot.theb.ai/api/chat-process',
|
||||
headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print('[ERROR] an error occured, retrying... |', e, flush=True)
|
||||
continue
|
|
@ -65,7 +65,15 @@ def output(chunk):
|
|||
if b'"youChatToken"' in chunk:
|
||||
chunk_json = json.loads(chunk.decode().split('data: ')[1])
|
||||
|
||||
print(chunk_json['youChatToken'], flush=True)
|
||||
print(chunk_json['youChatToken'], flush=True, end = '')
|
||||
|
||||
response = requests.get(f'https://you.com/api/streamingSearch?{params}',
|
||||
while True:
|
||||
try:
|
||||
response = requests.get(f'https://you.com/api/streamingSearch?{params}',
|
||||
headers=headers, content_callback=output, impersonate='safari15_5')
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print('an error occured, retrying... |', e, flush=True)
|
||||
continue
|
|
@ -0,0 +1,18 @@
|
|||
from . import Provider
|
||||
from .Providers import (
|
||||
Phind,
|
||||
You,
|
||||
Bing,
|
||||
Openai,
|
||||
Yqcloud,
|
||||
Theb,
|
||||
Aichat,
|
||||
Ora,
|
||||
Aws,
|
||||
Bard,
|
||||
Vercel,
|
||||
Pierangelo,
|
||||
Forefront
|
||||
)
|
||||
|
||||
Palm = Bard
|
|
@ -1,79 +0,0 @@
|
|||
import os
|
||||
import time
|
||||
import json
|
||||
import uuid
|
||||
import hashlib
|
||||
import requests
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from datetime import datetime
|
||||
|
||||
url: str = 'https://ai.ls'
|
||||
model: str = 'gpt-3.5-turbo'
|
||||
|
||||
|
||||
class Utils:
|
||||
def hash(json_data: Dict[str, str]) -> sha256:
|
||||
|
||||
secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
|
||||
35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
|
||||
|
||||
base_string: str = '%s:%s:%s:%s' % (
|
||||
json_data['t'],
|
||||
json_data['m'],
|
||||
'OVbi[TPN{S#)c{36%9?g;usl)CL',
|
||||
len(json_data['m'])
|
||||
)
|
||||
|
||||
return hashlib.sha256(base_string.encode()).hexdigest()
|
||||
|
||||
def format_timestamp(timestamp: int) -> str:
|
||||
|
||||
e = timestamp
|
||||
n = e % 10
|
||||
r = n + 1 if n % 2 == 0 else n
|
||||
return str(e - n + r)
|
||||
|
||||
|
||||
def _create_completion(model: str,messages: list, temperature: float = 0.6, stream: bool = False):
|
||||
headers = {
|
||||
'authority': 'api.caipacity.com',
|
||||
'accept': '*/*',
|
||||
'authorization': 'Bearer free',
|
||||
'client-id': str(uuid.uuid4()),
|
||||
'client-v': '0.1.26',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://ai.ls',
|
||||
'referer': 'https://ai.ls/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
timestamp = Utils.format_timestamp(int(time.time() * 1000))
|
||||
|
||||
sig = {
|
||||
'd': datetime.now().strftime('%Y-%m-%d'),
|
||||
't': timestamp,
|
||||
's': Utils.hash({
|
||||
't': timestamp,
|
||||
'm': messages[-1]['content']})}
|
||||
|
||||
json_data = json.dumps(separators=(',', ':'), obj={
|
||||
'model': 'gpt-3.5-turbo',
|
||||
'temperature': temperature,
|
||||
'stream': True,
|
||||
'messages': messages} | sig)
|
||||
|
||||
response = requests.post('https://api.caipacity.com/v1/chat/completions?full=false',
|
||||
headers=headers, data=json_data, stream=True)
|
||||
|
||||
for token in response.iter_lines():
|
||||
yield token.decode()
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -1,5 +0,0 @@
|
|||
url = None
|
||||
model = None
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
return
|
|
@ -1 +0,0 @@
|
|||
from . import Ails, Phind, Provider, You
|
|
@ -1,10 +1,9 @@
|
|||
import sys
|
||||
|
||||
from .typing import MetaModels, Union
|
||||
from . import Providers
|
||||
from . import Provider
|
||||
|
||||
|
||||
class Models(metaclass=MetaModels):
|
||||
class Model(metaclass=MetaModels):
|
||||
|
||||
class model:
|
||||
name: str
|
||||
|
@ -14,30 +13,39 @@ class Models(metaclass=MetaModels):
|
|||
class gpt_35_turbo:
|
||||
name: str = 'gpt-3.5-turbo'
|
||||
base_provider: str = 'openai'
|
||||
best_site: str = Providers.Ails
|
||||
best_site: Provider.Provider = Provider.Forefront
|
||||
|
||||
class gpt_4:
|
||||
name: str = 'gpt-4'
|
||||
base_provider: str = 'openai'
|
||||
best_site: str = Providers.Phind
|
||||
best_site: Provider.Provider = Provider.Bing
|
||||
|
||||
class davinvi_003:
|
||||
name: str = 'davinvi-003'
|
||||
base_provider: str = 'openai'
|
||||
best_site: Provider.Provider = Provider.Vercel
|
||||
|
||||
class Utils:
|
||||
convert: dict = {
|
||||
'gpt-3.5-turbo': Models.gpt_35_turbo,
|
||||
'gpt-4': Models.gpt_4
|
||||
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
||||
'gpt-4': Model.gpt_4
|
||||
}
|
||||
|
||||
class ChatCompletion:
|
||||
@staticmethod
|
||||
def create(model: Models.model or str, messages: list, provider: Providers.Provider = None, stream: bool = False, **kwargs):
|
||||
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, **kwargs):
|
||||
try:
|
||||
if isinstance(model, str):
|
||||
model = Utils.convert[model]
|
||||
|
||||
engine = model.best_site if not provider else provider
|
||||
|
||||
return (engine._create_completion(model, messages, **kwargs)
|
||||
if stream else ''.join(engine._create_completion(model, messages, **kwargs)))
|
||||
if not engine.supports_stream and stream == True:
|
||||
print(
|
||||
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
return (engine._create_completion(model.name, messages, stream, **kwargs)
|
||||
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
|
||||
|
||||
except TypeError as e:
|
||||
print(e)
|
||||
|
|
|
@ -12,5 +12,4 @@ class MetaModels(type):
|
|||
' ...'
|
||||
]
|
||||
|
||||
return '\n'.join(output)
|
||||
|
||||
return '\n'.join(output)
|
|
@ -0,0 +1,48 @@
|
|||
import browser_cookie3
|
||||
|
||||
class Utils:
|
||||
browsers = [
|
||||
browser_cookie3.chrome, # 62.74% market share
|
||||
browser_cookie3.safari, # 24.12% market share
|
||||
browser_cookie3.firefox, # 4.56% market share
|
||||
browser_cookie3.edge, # 2.85% market share
|
||||
browser_cookie3.opera, # 1.69% market share
|
||||
browser_cookie3.brave, # 0.96% market share
|
||||
browser_cookie3.opera_gx, # 0.64% market share
|
||||
browser_cookie3.vivaldi, # 0.32% market share
|
||||
]
|
||||
|
||||
def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict:
|
||||
cookies = {}
|
||||
|
||||
if setBrowser != False:
|
||||
for browser in Utils.browsers:
|
||||
if browser.__name__ == setBrowser:
|
||||
try:
|
||||
for c in browser(domain_name=domain):
|
||||
if c.name not in cookies:
|
||||
cookies = cookies | {c.name: c.value}
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
else:
|
||||
for browser in Utils.browsers:
|
||||
try:
|
||||
for c in browser(domain_name=domain):
|
||||
if c.name not in cookies:
|
||||
cookies = cookies | {c.name: c.value}
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if setName:
|
||||
try:
|
||||
return {setName: cookies[setName]}
|
||||
|
||||
except ValueError:
|
||||
print(f'Error: could not find {setName} cookie in any browser.')
|
||||
exit(1)
|
||||
|
||||
else:
|
||||
return cookies
|
|
@ -3,101 +3,88 @@ import time
|
|||
import json
|
||||
import random
|
||||
|
||||
from g4f import Models, ChatCompletion, Providers
|
||||
from flask import Flask, request
|
||||
from g4f import Model, ChatCompletion, Provider
|
||||
from flask import Flask, request, Response
|
||||
from flask_cors import CORS
|
||||
|
||||
app = Flask(__name__)
|
||||
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
|
||||
CORS(app)
|
||||
|
||||
class website:
|
||||
def __init__(self) -> None:
|
||||
self.routes = {
|
||||
'/chat/completions': {
|
||||
'function': self.chat_completions,
|
||||
'methods': ['POST', 'GET']
|
||||
}
|
||||
@app.route("/chat/completions", methods=['POST'])
|
||||
def chat_completions():
|
||||
streaming = request.json.get('stream', False)
|
||||
model = request.json.get('model', 'gpt-3.5-turbo')
|
||||
messages = request.json.get('messages')
|
||||
|
||||
models = {
|
||||
'gpt-3.5-turbo': 'gpt-3.5-turbo-0301'
|
||||
}
|
||||
|
||||
response = ChatCompletion.create(model=Model.gpt_35_turbo, stream=streaming,
|
||||
messages=messages)
|
||||
|
||||
if not streaming:
|
||||
while 'curl_cffi.requests.errors.RequestsError' in response:
|
||||
response = ChatCompletion.create(model=Model.gpt_35_turbo, stream=streaming,
|
||||
messages=messages)
|
||||
|
||||
completion_timestamp = int(time.time())
|
||||
completion_id = ''.join(random.choices(
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
||||
|
||||
return {
|
||||
'id': 'chatcmpl-%s' % completion_id,
|
||||
'object': 'chat.completion',
|
||||
'created': completion_timestamp,
|
||||
'model': models[model],
|
||||
'usage': {
|
||||
'prompt_tokens': None,
|
||||
'completion_tokens': None,
|
||||
'total_tokens': None
|
||||
},
|
||||
'choices': [{
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': response
|
||||
},
|
||||
'finish_reason': 'stop',
|
||||
'index': 0
|
||||
}]
|
||||
}
|
||||
|
||||
self.config = {
|
||||
'host': '0.0.0.0',
|
||||
'port': 1337,
|
||||
'debug': True
|
||||
}
|
||||
|
||||
def chat_completions(self):
|
||||
streaming = request.json.get('stream', False)
|
||||
model = request.json.get('model', 'gpt-3.5-turbo')
|
||||
messages = request.json.get('messages')
|
||||
|
||||
models = {
|
||||
'gpt-3.5-turbo': 'gpt-3.5-turbo-0301'
|
||||
}
|
||||
|
||||
response = ChatCompletion.create(model=Models.gpt_35_turbo, stream=streaming,
|
||||
messages=messages)
|
||||
|
||||
if not streaming:
|
||||
|
||||
def stream():
|
||||
for token in response:
|
||||
completion_timestamp = int(time.time())
|
||||
completion_id = ''.join(random.choices(
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
||||
|
||||
return {
|
||||
'id': 'chatcmpl-%s' % completion_id,
|
||||
'object': 'chat.completion',
|
||||
completion_data = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': models[model],
|
||||
'usage': {
|
||||
'prompt_tokens': None,
|
||||
'completion_tokens': None,
|
||||
'total_tokens': None
|
||||
},
|
||||
'choices': [{
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': response
|
||||
},
|
||||
'finish_reason': 'stop',
|
||||
'index': 0
|
||||
}]
|
||||
'model': 'gpt-3.5-turbo-0301',
|
||||
'choices': [
|
||||
{
|
||||
'delta': {
|
||||
'content': token
|
||||
},
|
||||
'index': 0,
|
||||
'finish_reason': None
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def stream():
|
||||
for token in response:
|
||||
completion_timestamp = int(time.time())
|
||||
completion_id = ''.join(random.choices(
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
||||
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
|
||||
time.sleep(0.1)
|
||||
|
||||
completion_data = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': 'gpt-3.5-turbo-0301',
|
||||
'choices': [
|
||||
{
|
||||
'delta': {
|
||||
'content': token
|
||||
},
|
||||
'index': 0,
|
||||
'finish_reason': None
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
|
||||
time.sleep(0.1)
|
||||
|
||||
return app.response_class(stream(), mimetype='text/event-stream')
|
||||
return app.response_class(stream(), mimetype='text/event-stream')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
website = website()
|
||||
config = {
|
||||
'host': '0.0.0.0',
|
||||
'port': 1337,
|
||||
'debug': True
|
||||
}
|
||||
|
||||
for route in website.routes:
|
||||
app.add_url_rule(
|
||||
route,
|
||||
view_func=website.routes[route]['function'],
|
||||
methods=website.routes[route]['methods']
|
||||
)
|
||||
|
||||
app.run(**website.config)
|
||||
app.run(**config)
|
||||
|
|
読み込み中…
新しいイシューから参照