mode api's (Yqcloud, Openai)

このコミットが含まれているのは:
abc 2023-05-16 23:55:27 +01:00
コミット 61d14252fd
8個のファイルの変更170行の追加7行の削除

27
g4f/Providers/Openai.py ノーマルファイル
ファイルの表示

@ -0,0 +1,27 @@
import os
import json
import time
import subprocess
from ..typing import sha256, Dict, get_type_hints
url = 'https://chat.openai.com/chat'
model = ['gpt-3.5-turbo']
def _create_completion(model: str, messages: list, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages,
'model': model}, separators=(',', ':'))
cmd = ['python3', f'{path}/helpers/openai.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
yield line.decode('utf-8')[:-1]
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

ファイルの表示

@ -3,6 +3,8 @@ import json
import time
import subprocess
from ..typing import sha256, Dict, get_type_hints
url = 'https://phind.com'
model = ['gpt-3.5-turbo', 'gpt-4']
@ -10,6 +12,7 @@ def _create_completion(model: str, messages: list, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'model': model,
'messages': messages}, separators=(',', ':'))
cmd = ['python3', f'{path}/helpers/phind.py', config]
@ -26,4 +29,7 @@ def _create_completion(model: str, messages: list, **kwargs):
if b'ping - 2023-' in line:
continue
yield line.decode('utf-8')[:-1]
yield line.decode('utf-8')[:-1]
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

32
g4f/Providers/Yqcloud.py ノーマルファイル
ファイルの表示

@ -0,0 +1,32 @@
import os
import time
import requests
from ..typing import sha256, Dict, get_type_hints
def _create_completion(model: str, messages: list, **kwargs):
headers = {
'authority': 'api.aichatos.cloud',
'origin': 'https://chat9.yqcloud.top',
'referer': 'https://chat9.yqcloud.top/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
json_data = {
'prompt': 'always respond in english | %s' % messages[-1]['content'],
'userId': f'#/chat/{int(time.time() * 1000)}',
'network': True,
'apikey': '',
'system': '',
'withoutContext': False,
}
response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
for token in response.iter_content(chunk_size=2046):
if not b'always respond in english' in token:
yield (token.decode('utf-8'))
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

ファイルの表示

@ -1 +1 @@
from . import Provider, Ails, Phind, You, Bing
from . import Provider, Ails, Phind, You, Bing, Openai, Yqcloud

97
g4f/Providers/helpers/openai.py ノーマルファイル
ファイルの表示

@ -0,0 +1,97 @@
import sys
import uuid
import json
import browser_cookie3
from curl_cffi import requests
config = json.loads(sys.argv[1])
def session_auth(cookies):
headers = {
'authority': 'chat.openai.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://chat.openai.com/chat',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
return requests.get('https://chat.openai.com/api/auth/session',
cookies=cookies, headers=headers, impersonate='chrome110').json()
all_cookies = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
domain_name='chat.openai.com')}
try:
cookies = {
'__Secure-next-auth.session-token': all_cookies['__Secure-next-auth.session-token'],
}
except Exception:
print('Failed to get "__Secure-next-auth.session-token" in chrome, please make sure you are authenticated on openai.com')
exit(0)
headers = {
'authority': 'chat.openai.com',
'accept': 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + session_auth(cookies)['accessToken'],
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.openai.com',
'pragma': 'no-cache',
'referer': 'https://chat.openai.com/chat',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
payload = {
'action': 'next',
'history_and_training_disabled': False,
'messages': [
{
'id': str(uuid.uuid4()),
'author': {
'role': 'user',
},
'content': {
'content_type': 'text',
'parts': [
config['messages'][-1]['content']
]
}
}
],
'model': 'text-davinci-002-render-sha',
'parent_message_id': str(uuid.uuid4()),
'supports_modapi': True,
'timezone_offset_min': -60
}
completion = ''
def format(chunk):
global completion
if b'parts' in chunk:
json_data = json.loads(chunk.decode('utf-8').split('data: ')[1])
token = json_data['message']['content']['parts'][0]
token = token.replace(completion, '')
completion += token
print(token, flush=True)
response = requests.post('https://chat.openai.com/backend-api/conversation',
json=payload, headers=headers, content_callback=format, impersonate='chrome110')

ファイルの表示

@ -8,10 +8,12 @@ from curl_cffi import requests
config = json.loads(sys.argv[1])
prompt = config['messages'][-1]['content']
skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate'
json_data = json.dumps({
'question': prompt,
'options': {
'skill': 'expert',
'skill': skill,
'date': datetime.datetime.now().strftime('%d/%m/%Y'),
'language': 'en',
'detailed': False,

ファイルの表示

@ -12,5 +12,4 @@ class MetaModels(type):
' ...'
]
return '\n'.join(output)
return '\n'.join(output)

ファイルの表示

@ -21,12 +21,12 @@ def chat_completions():
'gpt-3.5-turbo': 'gpt-3.5-turbo-0301'
}
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.You, stream=streaming,
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.Openai, stream=streaming,
messages=messages)
if not streaming:
while 'curl_cffi.requests.errors.RequestsError' in response:
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.You, stream=streaming,
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.Openai, stream=streaming,
messages=messages)
completion_timestamp = int(time.time())