フォーク元 g4f/gpt4free
major update
このコミットが含まれているのは:
コミット
240d6cf87a
34
README.md
34
README.md
|
@ -29,22 +29,32 @@ for token in chat_completion:
|
|||
|
||||
providers:
|
||||
```py
|
||||
g4f.Providers.Openai # need to be logged in in browser
|
||||
g4f.Providers.Bing # need to be logged in in browser
|
||||
g4f.Providers.You
|
||||
g4f.Providers.Ails
|
||||
g4f.Providers.Phind
|
||||
g4f.Providers.Yqcloud
|
||||
from g4f.Provider import (
|
||||
Phind,
|
||||
You,
|
||||
Bing,
|
||||
Openai,
|
||||
Yqcloud,
|
||||
Theb,
|
||||
Aichat,
|
||||
Ora,
|
||||
Aws,
|
||||
Bard,
|
||||
Vercel,
|
||||
Pierangelo,
|
||||
Forefront
|
||||
)
|
||||
|
||||
|
||||
# usage:
|
||||
response = g4f.ChatCompletion.create(..., provider=g4f.Providers.ProviderName)
|
||||
response = g4f.ChatCompletion.create(..., provider=ProviderName)
|
||||
```
|
||||
|
||||
```py
|
||||
import g4f
|
||||
|
||||
|
||||
print(g4f.Providers.Ails.params) # supported args
|
||||
print(g4f.Provider.Ails.params) # supported args
|
||||
|
||||
# Automatic selection of provider
|
||||
|
||||
|
@ -63,7 +73,7 @@ print(response)
|
|||
|
||||
|
||||
# Set with provider
|
||||
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Openai, messages=[
|
||||
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.Openai, messages=[
|
||||
{"role": "user", "content": "Hello world"}], stream=True)
|
||||
|
||||
for message in response:
|
||||
|
@ -73,11 +83,11 @@ for message in response:
|
|||
### Dev
|
||||
|
||||
(more instructions soon)
|
||||
the `g4f.Providers`class
|
||||
the `g4f.Provider`class
|
||||
|
||||
default:
|
||||
|
||||
`./g4f/Providers/ProviderName.py`:
|
||||
`./g4f/Provider/Providers/ProviderName.py`:
|
||||
```python
|
||||
import os
|
||||
|
||||
|
@ -91,6 +101,6 @@ def _create_completion(prompt: str, args...):
|
|||
yield ...
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
params = f'g4f.Provider.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
||||
```
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
import os
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = None
|
||||
model = None
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
return
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,37 @@
|
|||
import os, requests
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://chat-gpt.org/chat'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
'authority': 'chat-gpt.org',
|
||||
'accept': '*/*',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chat-gpt.org',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat-gpt.org/chat',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'message': messages[-1]['content'],
|
||||
'temperature': 1,
|
||||
'presence_penalty': 0,
|
||||
'top_p': 1,
|
||||
'frequency_penalty': 0
|
||||
}
|
||||
|
||||
response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
|
||||
yield response.json()['message']
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
import requests
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://4aiu6ctrknfxkoaigkigzh5lwm0cciuc.lambda-url.ap-east-1.on.aws/chat/completions'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = False
|
||||
|
||||
class Auth(requests.auth.AuthBase):
|
||||
def __init__(self):
|
||||
self.token = 'sk-1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL'
|
||||
|
||||
def __call__(self, r):
|
||||
r.headers["authorization"] = "Bearer " + self.token
|
||||
return r
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
response = requests.post(url,
|
||||
auth=Auth(), json={"model": model,"messages": messages})
|
||||
|
||||
yield (response.json()['choices'][0]['message']['content'])
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,76 @@
|
|||
# implement proxy argument
|
||||
|
||||
import os, requests, json, browser_cookie3, re, random
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://bard.google.com'
|
||||
model = ['Palm2']
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
|
||||
domain_name='.google.com')}['__Secure-1PSID']
|
||||
|
||||
formatted = '\n'.join([
|
||||
'%s: %s' % (message['role'], message['content']) for message in messages
|
||||
])
|
||||
prompt = f'{formatted}\nAssistant:'
|
||||
|
||||
proxy = None
|
||||
|
||||
if proxy == None:
|
||||
raise Exception('Proxy is required for Bard (set in g4f/Provider/Providers/Bard.py line 18)')
|
||||
|
||||
snlm0e = False
|
||||
conversation_id = None
|
||||
response_id = None
|
||||
choice_id = None
|
||||
|
||||
client = requests.Session()
|
||||
client.proxies = {
|
||||
'http': f'https://{proxy}',
|
||||
'https': f'https://{proxy}'} if proxy else None
|
||||
|
||||
client.headers = {
|
||||
'authority': 'bard.google.com',
|
||||
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
|
||||
'origin': 'https://bard.google.com',
|
||||
'referer': 'https://bard.google.com/',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||
'x-same-domain': '1',
|
||||
'cookie': f'__Secure-1PSID={psid}'
|
||||
}
|
||||
|
||||
snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
|
||||
client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
|
||||
|
||||
params = {
|
||||
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
|
||||
'_reqid': random.randint(1111, 9999),
|
||||
'rt': 'c'
|
||||
}
|
||||
|
||||
data = {
|
||||
'at': snlm0e,
|
||||
'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
|
||||
|
||||
intents = '.'.join([
|
||||
'assistant',
|
||||
'lamda',
|
||||
'BardFrontendService'
|
||||
])
|
||||
|
||||
response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
|
||||
data=data, params=params)
|
||||
|
||||
chat_data = json.loads(response.content.splitlines()[3])[0][2]
|
||||
if chat_data:
|
||||
json_chat_data = json.loads(chat_data)
|
||||
|
||||
yield json_chat_data[0][0]
|
||||
|
||||
else:
|
||||
yield 'error'
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -3,13 +3,13 @@ import json
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://bing.com/chat'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
'messages': messages,
|
||||
|
@ -20,7 +20,8 @@ def _create_completion(model: str, messages: list, **kwargs):
|
|||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8')[:-1]
|
||||
#print(line)
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
|
@ -0,0 +1,36 @@
|
|||
import os
|
||||
import json
|
||||
import requests
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'forefront.com'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
json_data = {
|
||||
'text': messages[-1]['content'],
|
||||
'action': 'noauth',
|
||||
'id': '',
|
||||
'parentId': '',
|
||||
'workspaceId': '',
|
||||
'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
|
||||
'model': 'gpt-4',
|
||||
'messages': messages[:-1] if len(messages) > 1 else [],
|
||||
'internetMode': 'auto'
|
||||
}
|
||||
|
||||
response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
|
||||
json=json_data, stream=True)
|
||||
|
||||
for token in response.iter_lines():
|
||||
if b'delta' in token:
|
||||
token = json.loads(token.decode().split('data: ')[1])['delta']
|
||||
yield (token)
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
||||
|
||||
|
||||
|
||||
|
|
@ -3,12 +3,14 @@ import json
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://chat.openai.com/chat'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
|
@ -20,8 +22,7 @@ def _create_completion(model: str, messages: list, **kwargs):
|
|||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8')[:-1]
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,42 @@
|
|||
import os, requests, uuid
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://ora.ai'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_stream = False
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
headers = {
|
||||
'authority': 'ora.ai',
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://ora.ai',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://ora.ai/chat/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'chatbotId': 'adb2b793-e667-46b9-8d80-114eaa9a4c40',
|
||||
'input': messages[-1]['content'],
|
||||
'userId': f'auto:{uuid.uuid4()}',
|
||||
'provider': 'OPEN_AI',
|
||||
'config': False,
|
||||
'includeHistory': False
|
||||
}
|
||||
|
||||
response = requests.post('https://ora.ai/api/conversation',
|
||||
headers=headers, json=json_data)
|
||||
|
||||
yield response.json()['response']
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -3,12 +3,13 @@ import json
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://phind.com'
|
||||
model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
|
@ -29,7 +30,7 @@ def _create_completion(model: str, messages: list, **kwargs):
|
|||
if b'ping - 2023-' in line:
|
||||
continue
|
||||
|
||||
yield line.decode('utf-8')[:-1]
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,55 @@
|
|||
import os
|
||||
import requests
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://chat.pierangelo.info'
|
||||
model = ['gpt-4', 'gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
models = {
|
||||
'gpt-4': {
|
||||
'id':'gpt-4',
|
||||
'name':'GPT-4'
|
||||
},
|
||||
'gpt-3.5-turbo': {
|
||||
'id':'gpt-3.5-turbo',
|
||||
'name':'GPT-3.5'
|
||||
}
|
||||
}
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
headers = {
|
||||
'authority': 'chat.pierangelo.info',
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chat.pierangelo.info',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat.pierangelo.info/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'model': models[model],
|
||||
'messages': messages,
|
||||
'key': '',
|
||||
'prompt': "You are ChatGPT, a large language model trained by OpenAI. Answer consisely",
|
||||
'temperature': 0.7
|
||||
}
|
||||
|
||||
response = requests.post('https://chat.pierangelo.info/api/chat',
|
||||
headers=headers, json=json_data, stream=True)
|
||||
|
||||
for token in response:
|
||||
yield (token)
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -0,0 +1,28 @@
|
|||
import os
|
||||
import json
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://theb.ai'
|
||||
model = ['gpt-3.5-turbo']
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
'messages': messages,
|
||||
'model': model}, separators=(',', ':'))
|
||||
|
||||
cmd = ['python3', f'{path}/helpers/theb.py', config]
|
||||
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8') #[:-1]
|
||||
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
長すぎる行があるためファイル差分は表示されません
|
@ -3,12 +3,13 @@ import json
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
|
||||
url = 'https://you.com'
|
||||
model = 'gpt-3.5-turbo'
|
||||
supports_stream = True
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
config = json.dumps({
|
||||
|
@ -19,4 +20,4 @@ def _create_completion(model: str, messages: list, **kwargs):
|
|||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
for line in iter(p.stdout.readline, b''):
|
||||
yield line.decode('utf-8')[:-1]
|
||||
yield line.decode('utf-8') #[:-1]
|
|
@ -2,10 +2,10 @@ import os
|
|||
import time
|
||||
import requests
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from ...typing import sha256, Dict, get_type_hints
|
||||
supports_stream = True
|
||||
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
||||
|
||||
headers = {
|
||||
'authority': 'api.aichatos.cloud',
|
|
@ -0,0 +1,18 @@
|
|||
0B28A84E-592E-4FCD-AB3F-93DDEF9C4DF7
|
||||
513A32B7-D024-4CFB-9D70-1B841B245999
|
||||
5D733D23-1C8A-4CD0-8C68-CF3C8E02BD2F
|
||||
606E4968-CAE4-48C8-9C63-10723DAD64B3
|
||||
6DB4C6E6-7720-4A8F-8F65-610B09145F41
|
||||
7348661D-4E36-49B2-BCAB-6E7D153CAF43
|
||||
893565E3-288C-4A09-8267-F507DAC1A70A
|
||||
8FEC6A8C-5E98-4570-95DD-6BBD18263CB8
|
||||
92B8B7DD-DA03-47EA-B614-D506AF6A922A
|
||||
A02FDB66-BE88-42DA-A162-686316F1C9B2
|
||||
A2509F7E-4E7D-4477-A76F-AB3940856C43
|
||||
A9E651A2-CC29-439E-9373-17EF55359704
|
||||
ABE8D912-7356-4E8A-B37E-A5818808075F
|
||||
B530598F-8D63-45EC-A550-3BA23FC1434A
|
||||
CCB5F29D-46BC-455D-80CD-F1666570BB7D
|
||||
CFE0E42C-70B7-4BDA-9DB6-37A917AB4D00
|
||||
D158A715-E29B-4B20-A962-8536DC18D1EC
|
||||
FB7506FF-EAB3-410E-924F-E06A45937857
|
|
@ -2,6 +2,7 @@ import sys
|
|||
import ssl
|
||||
import uuid
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import asyncio
|
||||
import certifi
|
||||
|
@ -14,6 +15,8 @@ config = json.loads(sys.argv[1])
|
|||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.load_verify_locations(certifi.where())
|
||||
|
||||
|
||||
|
||||
conversationstyles = {
|
||||
'gpt-4': [ #'precise'
|
||||
"nlu_direct_response_filter",
|
||||
|
@ -72,6 +75,8 @@ def format(msg: dict) -> str:
|
|||
return json.dumps(msg) + '\x1e'
|
||||
|
||||
def get_token():
|
||||
return
|
||||
|
||||
try:
|
||||
cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
|
||||
return cookies['_U']
|
||||
|
@ -83,20 +88,49 @@ class AsyncCompletion:
|
|||
async def create(
|
||||
prompt : str = None,
|
||||
optionSets : list = None,
|
||||
token : str = get_token()):
|
||||
token : str = None): # No auth required anymore
|
||||
|
||||
create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
|
||||
headers = {
|
||||
'host' : 'edgeservices.bing.com',
|
||||
'authority' : 'edgeservices.bing.com',
|
||||
'cookie' : f'_U={token}',
|
||||
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
||||
}
|
||||
)
|
||||
create = None
|
||||
for _ in range(5):
|
||||
try:
|
||||
create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
|
||||
headers = {
|
||||
'host': 'edgeservices.bing.com',
|
||||
'accept-encoding': 'gzip, deflate, br',
|
||||
'connection': 'keep-alive',
|
||||
'authority': 'edgeservices.bing.com',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'cache-control': 'max-age=0',
|
||||
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
||||
'sec-ch-ua-arch': '"x86"',
|
||||
'sec-ch-ua-bitness': '"64"',
|
||||
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
||||
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua-platform-version': '"15.0.0"',
|
||||
'sec-fetch-dest': 'document',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-user': '?1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
||||
'x-edge-shopping-flag': '1',
|
||||
'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
|
||||
}
|
||||
)
|
||||
|
||||
conversationId = create.json()['conversationId']
|
||||
clientId = create.json()['clientId']
|
||||
conversationSignature = create.json()['conversationSignature']
|
||||
conversationId = create.json()['conversationId']
|
||||
clientId = create.json()['clientId']
|
||||
conversationSignature = create.json()['conversationSignature']
|
||||
|
||||
except Exception as e:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
if create == None: raise Exception('Failed to create conversation.')
|
||||
|
||||
wss: websockets.WebSocketClientProtocol or None = None
|
||||
|
||||
|
@ -164,6 +198,7 @@ class AsyncCompletion:
|
|||
continue
|
||||
|
||||
response = json.loads(obj)
|
||||
#print(response, flush=True, end='')
|
||||
if response.get('type') == 1 and response['arguments'][0].get('messages',):
|
||||
response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
|
||||
|
||||
|
@ -179,7 +214,7 @@ async def run(optionSets, messages):
|
|||
async for value in AsyncCompletion.create(prompt=messages[-1]['content'],
|
||||
optionSets=optionSets):
|
||||
|
||||
print(value, flush=True)
|
||||
print(value, flush=True, end = '')
|
||||
|
||||
optionSet = conversationstyles[config['model']]
|
||||
asyncio.run(run(optionSet, config['messages']))
|
|
@ -83,15 +83,24 @@ payload = {
|
|||
completion = ''
|
||||
|
||||
def format(chunk):
|
||||
global completion
|
||||
|
||||
if b'parts' in chunk:
|
||||
json_data = json.loads(chunk.decode('utf-8').split('data: ')[1])
|
||||
token = json_data['message']['content']['parts'][0]
|
||||
token = token.replace(completion, '')
|
||||
completion += token
|
||||
|
||||
print(token, flush=True)
|
||||
try:
|
||||
global completion
|
||||
|
||||
if b'parts' in chunk:
|
||||
json_data = json.loads(chunk.decode('utf-8').split('data: ')[1])
|
||||
token = json_data['message']['content']['parts'][0]
|
||||
token = token.replace(completion, '')
|
||||
completion += token
|
||||
|
||||
print(token, flush=True, end = '')
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
response = requests.post('https://chat.openai.com/backend-api/conversation',
|
||||
json=payload, headers=headers, content_callback=format, impersonate='chrome110')
|
||||
for _ in range(3):
|
||||
try:
|
||||
response = requests.post('https://chat.openai.com/backend-api/conversation',
|
||||
json=payload, headers=headers, content_callback=format, impersonate='chrome110')
|
||||
break
|
||||
except:
|
||||
continue
|
|
@ -16,7 +16,7 @@ json_data = json.dumps({
|
|||
'skill': skill,
|
||||
'date': datetime.datetime.now().strftime('%d/%m/%Y'),
|
||||
'language': 'en',
|
||||
'detailed': False,
|
||||
'detailed': True,
|
||||
'creative': True,
|
||||
'customLinks': []}}, separators=(',', ':'))
|
||||
|
||||
|
@ -39,17 +39,31 @@ headers = {
|
|||
|
||||
|
||||
def output(chunk):
|
||||
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
||||
chunk = b'data: \n\r\n\r\n'
|
||||
try:
|
||||
if b'PHIND_METADATA' in chunk:
|
||||
return
|
||||
|
||||
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
||||
chunk = b'data: \n\r\n\r\n'
|
||||
|
||||
chunk = chunk.decode()
|
||||
chunk = chunk.decode()
|
||||
|
||||
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
||||
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
|
||||
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
||||
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
||||
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
|
||||
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
||||
|
||||
print(chunk, flush=True)
|
||||
print(chunk, flush=True, end = '')
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
pass
|
||||
|
||||
|
||||
response = requests.post('https://www.phind.com/api/infer/answer',
|
||||
while True:
|
||||
try:
|
||||
response = requests.post('https://www.phind.com/api/infer/answer',
|
||||
headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print('an error occured, retrying... |', e, flush=True)
|
||||
continue
|
|
@ -0,0 +1,49 @@
|
|||
import json
|
||||
import sys
|
||||
from curl_cffi import requests
|
||||
|
||||
config = json.loads(sys.argv[1])
|
||||
prompt = config['messages'][-1]['content']
|
||||
|
||||
headers = {
|
||||
'authority': 'chatbot.theb.ai',
|
||||
'accept': 'application/json, text/plain, */*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chatbot.theb.ai',
|
||||
'referer': 'https://chatbot.theb.ai/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'prompt': prompt,
|
||||
'options': {}
|
||||
}
|
||||
|
||||
def format(chunk):
|
||||
try:
|
||||
chunk_json = json.loads(chunk.decode('utf-8'))
|
||||
completion_chunk = chunk_json['detail']['choices'][0]['delta']['content']
|
||||
|
||||
print(completion_chunk, flush=True, end = '')
|
||||
|
||||
except Exception as e:
|
||||
print('[ERROR] an error occured, retrying... |', e, flush=True)
|
||||
return
|
||||
|
||||
while True:
|
||||
try:
|
||||
response = requests.post('https://chatbot.theb.ai/api/chat-process',
|
||||
headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print('[ERROR] an error occured, retrying... |', e, flush=True)
|
||||
continue
|
|
@ -65,7 +65,15 @@ def output(chunk):
|
|||
if b'"youChatToken"' in chunk:
|
||||
chunk_json = json.loads(chunk.decode().split('data: ')[1])
|
||||
|
||||
print(chunk_json['youChatToken'], flush=True)
|
||||
print(chunk_json['youChatToken'], flush=True, end = '')
|
||||
|
||||
response = requests.get(f'https://you.com/api/streamingSearch?{params}',
|
||||
while True:
|
||||
try:
|
||||
response = requests.get(f'https://you.com/api/streamingSearch?{params}',
|
||||
headers=headers, content_callback=output, impersonate='safari15_5')
|
||||
|
||||
exit(0)
|
||||
|
||||
except Exception as e:
|
||||
print('an error occured, retrying... |', e, flush=True)
|
||||
continue
|
|
@ -0,0 +1,18 @@
|
|||
from . import Provider
|
||||
from .Providers import (
|
||||
Phind,
|
||||
You,
|
||||
Bing,
|
||||
Openai,
|
||||
Yqcloud,
|
||||
Theb,
|
||||
Aichat,
|
||||
Ora,
|
||||
Aws,
|
||||
Bard,
|
||||
Vercel,
|
||||
Pierangelo,
|
||||
Forefront
|
||||
)
|
||||
|
||||
Palm = Bard
|
|
@ -1,79 +0,0 @@
|
|||
import os
|
||||
import time
|
||||
import json
|
||||
import uuid
|
||||
import hashlib
|
||||
import requests
|
||||
|
||||
from ..typing import sha256, Dict, get_type_hints
|
||||
from datetime import datetime
|
||||
|
||||
url: str = 'https://ai.ls'
|
||||
model: str = 'gpt-3.5-turbo'
|
||||
|
||||
|
||||
class Utils:
|
||||
def hash(json_data: Dict[str, str]) -> sha256:
|
||||
|
||||
secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
|
||||
35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
|
||||
|
||||
base_string: str = '%s:%s:%s:%s' % (
|
||||
json_data['t'],
|
||||
json_data['m'],
|
||||
'OVbi[TPN{S#)c{36%9?g;usl)CL',
|
||||
len(json_data['m'])
|
||||
)
|
||||
|
||||
return hashlib.sha256(base_string.encode()).hexdigest()
|
||||
|
||||
def format_timestamp(timestamp: int) -> str:
|
||||
|
||||
e = timestamp
|
||||
n = e % 10
|
||||
r = n + 1 if n % 2 == 0 else n
|
||||
return str(e - n + r)
|
||||
|
||||
|
||||
def _create_completion(model: str,messages: list, temperature: float = 0.6, stream: bool = False):
|
||||
headers = {
|
||||
'authority': 'api.caipacity.com',
|
||||
'accept': '*/*',
|
||||
'authorization': 'Bearer free',
|
||||
'client-id': str(uuid.uuid4()),
|
||||
'client-v': '0.1.26',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://ai.ls',
|
||||
'referer': 'https://ai.ls/',
|
||||
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
timestamp = Utils.format_timestamp(int(time.time() * 1000))
|
||||
|
||||
sig = {
|
||||
'd': datetime.now().strftime('%Y-%m-%d'),
|
||||
't': timestamp,
|
||||
's': Utils.hash({
|
||||
't': timestamp,
|
||||
'm': messages[-1]['content']})}
|
||||
|
||||
json_data = json.dumps(separators=(',', ':'), obj={
|
||||
'model': 'gpt-3.5-turbo',
|
||||
'temperature': temperature,
|
||||
'stream': True,
|
||||
'messages': messages} | sig)
|
||||
|
||||
response = requests.post('https://api.caipacity.com/v1/chat/completions?full=false',
|
||||
headers=headers, data=json_data, stream=True)
|
||||
|
||||
for token in response.iter_lines():
|
||||
yield token.decode()
|
||||
|
||||
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
||||
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
@ -1,5 +0,0 @@
|
|||
url = None
|
||||
model = None
|
||||
|
||||
def _create_completion(model: str, messages: list, **kwargs):
|
||||
return
|
|
@ -1 +0,0 @@
|
|||
from . import Provider, Ails, Phind, You, Bing, Openai, Yqcloud
|
|
@ -1,10 +1,9 @@
|
|||
import sys
|
||||
|
||||
from .typing import MetaModels, Union
|
||||
from . import Providers
|
||||
from . import Provider
|
||||
|
||||
|
||||
class Models(metaclass=MetaModels):
|
||||
class Model(metaclass=MetaModels):
|
||||
|
||||
class model:
|
||||
name: str
|
||||
|
@ -14,30 +13,39 @@ class Models(metaclass=MetaModels):
|
|||
class gpt_35_turbo:
|
||||
name: str = 'gpt-3.5-turbo'
|
||||
base_provider: str = 'openai'
|
||||
best_site: str = Providers.Ails
|
||||
best_site: Provider.Provider = Provider.Forefront
|
||||
|
||||
class gpt_4:
|
||||
name: str = 'gpt-4'
|
||||
base_provider: str = 'openai'
|
||||
best_site: str = Providers.Phind
|
||||
best_site: Provider.Provider = Provider.Bing
|
||||
|
||||
class davinvi_003:
|
||||
name: str = 'davinvi-003'
|
||||
base_provider: str = 'openai'
|
||||
best_site: Provider.Provider = Provider.Vercel
|
||||
|
||||
class Utils:
|
||||
convert: dict = {
|
||||
'gpt-3.5-turbo': Models.gpt_35_turbo,
|
||||
'gpt-4': Models.gpt_4
|
||||
'gpt-3.5-turbo': Model.gpt_35_turbo,
|
||||
'gpt-4': Model.gpt_4
|
||||
}
|
||||
|
||||
class ChatCompletion:
|
||||
@staticmethod
|
||||
def create(model: Models.model or str, messages: list, provider: Providers.Provider = None, stream: bool = False, **kwargs):
|
||||
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, **kwargs):
|
||||
try:
|
||||
if isinstance(model, str):
|
||||
model = Utils.convert[model]
|
||||
|
||||
engine = model.best_site if not provider else provider
|
||||
|
||||
return (engine._create_completion(model.name, messages, **kwargs)
|
||||
if stream else ''.join(engine._create_completion(model.name, messages, **kwargs)))
|
||||
if not engine.supports_stream and stream == True:
|
||||
print(
|
||||
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
return (engine._create_completion(model.name, messages, stream, **kwargs)
|
||||
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
|
||||
|
||||
except TypeError as e:
|
||||
print(e)
|
||||
|
|
|
@ -2,7 +2,6 @@ from typing import Dict, NewType, Union, Optional, List, get_type_hints
|
|||
|
||||
sha256 = NewType('sha_256_hash', str)
|
||||
|
||||
|
||||
class MetaModels(type):
|
||||
def __str__(cls):
|
||||
output: List = [
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
import browser_cookie3
|
||||
|
||||
|
||||
class Utils:
|
||||
browsers = [
|
||||
browser_cookie3.chrome, # 62.74% market share
|
||||
browser_cookie3.safari, # 24.12% market share
|
||||
browser_cookie3.firefox, # 4.56% market share
|
||||
browser_cookie3.edge, # 2.85% market share
|
||||
browser_cookie3.opera, # 1.69% market share
|
||||
browser_cookie3.brave, # 0.96% market share
|
||||
browser_cookie3.opera_gx, # 0.64% market share
|
||||
browser_cookie3.vivaldi, # 0.32% market share
|
||||
]
|
||||
|
||||
def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict:
|
||||
cookies = {}
|
||||
|
||||
if setBrowser != False:
|
||||
for browser in Utils.browsers:
|
||||
if browser.__name__ == setBrowser:
|
||||
try:
|
||||
for c in browser(domain_name=domain):
|
||||
if c.name not in cookies:
|
||||
cookies = cookies | {c.name: c.value}
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
else:
|
||||
for browser in Utils.browsers:
|
||||
try:
|
||||
for c in browser(domain_name=domain):
|
||||
if c.name not in cookies:
|
||||
cookies = cookies | {c.name: c.value}
|
||||
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if setName:
|
||||
try:
|
||||
return {setName: cookies[setName]}
|
||||
|
||||
except ValueError:
|
||||
print(f'Error: could not find {setName} cookie in any browser.')
|
||||
exit(1)
|
||||
|
||||
else:
|
||||
return cookies
|
|
@ -3,7 +3,7 @@ import time
|
|||
import json
|
||||
import random
|
||||
|
||||
from g4f import Models, ChatCompletion, Providers
|
||||
from g4f import Model, ChatCompletion, Provider
|
||||
from flask import Flask, request, Response
|
||||
from flask_cors import CORS
|
||||
|
||||
|
@ -20,12 +20,12 @@ def chat_completions():
|
|||
'gpt-3.5-turbo': 'gpt-3.5-turbo-0301'
|
||||
}
|
||||
|
||||
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.Openai, stream=streaming,
|
||||
response = ChatCompletion.create(model=Model.gpt_35_turbo, stream=streaming,
|
||||
messages=messages)
|
||||
|
||||
if not streaming:
|
||||
while 'curl_cffi.requests.errors.RequestsError' in response:
|
||||
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.Openai, stream=streaming,
|
||||
response = ChatCompletion.create(model=Model.gpt_35_turbo, stream=streaming,
|
||||
messages=messages)
|
||||
|
||||
completion_timestamp = int(time.time())
|
||||
|
|
読み込み中…
新しいイシューから参照