bing & new proxy

このコミットが含まれているのは:
abc 2023-05-16 16:57:16 +01:00
コミット e4871265e8
7個のファイルの変更287行の追加85行の削除

27
g4f/Providers/Bing.py ノーマルファイル
ファイルの表示

@ -0,0 +1,27 @@
import os
import json
import time
import subprocess
from ..typing import sha256, Dict, get_type_hints
url = 'https://bing.com/chat'
model = ['gpt-3.5-turbo', 'gpt-4']
def _create_completion(model: str, messages: list, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages,
'model': model}, separators=(',', ':'))
cmd = ['python3', f'{path}/helpers/bing.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
yield line.decode('utf-8')[:-1]
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

ファイルの表示

@ -3,8 +3,8 @@ import json
import time import time
import subprocess import subprocess
url = None url = 'https://phind.com'
model = None model = ['gpt-3.5-turbo', 'gpt-4']
def _create_completion(model: str, messages: list, **kwargs): def _create_completion(model: str, messages: list, **kwargs):

ファイルの表示

@ -3,6 +3,8 @@ import json
import time import time
import subprocess import subprocess
from ..typing import sha256, Dict, get_type_hints
url = 'https://you.com' url = 'https://you.com'
model = 'gpt-3.5-turbo' model = 'gpt-3.5-turbo'

ファイルの表示

@ -1 +1 @@
from . import Ails, Phind, Provider, You from . import Provider, Ails, Phind, You, Bing

185
g4f/Providers/helpers/bing.py ノーマルファイル
ファイルの表示

@ -0,0 +1,185 @@
import sys
import ssl
import uuid
import json
import random
import asyncio
import certifi
import requests
import websockets
import browser_cookie3
config = json.loads(sys.argv[1])
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
conversationstyles = {
'gpt-4': [ #'precise'
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"h3imaginative",
"travelansgnd",
"dv3sugg",
"clgalileo",
"gencontentv3",
"dv3sugg",
"responseos",
"e2ecachewrite",
"cachewriteext",
"nodlcpcwrite",
"travelansgnd",
"nojbfedge",
],
'balanced': [
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"galileo",
"dv3sugg",
"responseos",
"e2ecachewrite",
"cachewriteext",
"nodlcpcwrite",
"travelansgnd",
"nojbfedge",
],
'gpt-3.5-turbo': [ #'precise'
"nlu_direct_response_filter",
"deepleo",
"disable_emoji_spoken_text",
"responsible_ai_policy_235",
"enablemm",
"galileo",
"dv3sugg",
"responseos",
"e2ecachewrite",
"cachewriteext",
"nodlcpcwrite",
"travelansgnd",
"h3precise",
"clgalileo",
"nojbfedge",
]
}
def format(msg: dict) -> str:
return json.dumps(msg) + '\x1e'
def get_token():
try:
cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
return cookies['_U']
except:
print('Error: could not find bing _U cookie in edge browser.')
exit(1)
class AsyncCompletion:
async def create(
prompt : str = None,
optionSets : list = None,
token : str = get_token()):
create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
headers = {
'host' : 'edgeservices.bing.com',
'authority' : 'edgeservices.bing.com',
'cookie' : f'_U={token}',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
}
)
conversationId = create.json()['conversationId']
clientId = create.json()['clientId']
conversationSignature = create.json()['conversationSignature']
wss: websockets.WebSocketClientProtocol or None = None
wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size = None, ssl = ssl_context,
extra_headers = {
'accept': 'application/json',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
'sec-ch-ua-arch': '"x86"',
'sec-ch-ua-bitness': '"64"',
'sec-ch-ua-full-version': '"109.0.1518.78"',
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-model': "",
'sec-ch-ua-platform': '"Windows"',
'sec-ch-ua-platform-version': '"15.0.0"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'x-ms-client-request-id': str(uuid.uuid4()),
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
'Referrer-Policy': 'origin-when-cross-origin',
'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
}
)
await wss.send(format({'protocol': 'json', 'version': 1}))
await wss.recv()
struct = {
'arguments': [
{
'source': 'cib',
'optionsSets': optionSets,
'isStartOfSession': True,
'message': {
'author': 'user',
'inputMethod': 'Keyboard',
'text': prompt,
'messageType': 'Chat'
},
'conversationSignature': conversationSignature,
'participant': {
'id': clientId
},
'conversationId': conversationId
}
],
'invocationId': '0',
'target': 'chat',
'type': 4
}
await wss.send(format(struct))
base_string = ''
final = False
while not final:
objects = str(await wss.recv()).split('\x1e')
for obj in objects:
if obj is None or obj == '':
continue
response = json.loads(obj)
if response.get('type') == 1 and response['arguments'][0].get('messages',):
response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
yield (response_text.replace(base_string, ''))
base_string = response_text
elif response.get('type') == 2:
final = True
await wss.close()
async def run(optionSets, messages):
async for value in AsyncCompletion.create(prompt=messages[-1]['content'],
optionSets=optionSets):
print(value, flush=True)
optionSet = conversationstyles[config['model']]
asyncio.run(run(optionSet, config['messages']))

ファイルの表示

@ -36,8 +36,8 @@ class ChatCompletion:
engine = model.best_site if not provider else provider engine = model.best_site if not provider else provider
return (engine._create_completion(model, messages, **kwargs) return (engine._create_completion(model.name, messages, **kwargs)
if stream else ''.join(engine._create_completion(model, messages, **kwargs))) if stream else ''.join(engine._create_completion(model.name, messages, **kwargs)))
except TypeError as e: except TypeError as e:
print(e) print(e)

ファイルの表示

@ -4,100 +4,88 @@ import json
import random import random
from g4f import Models, ChatCompletion, Providers from g4f import Models, ChatCompletion, Providers
from flask import Flask, request from flask import Flask, request, Response
from flask_cors import CORS
app = Flask(__name__) app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True CORS(app)
class website:
def __init__(self) -> None: @app.route("/chat/completions", methods=['POST'])
self.routes = { def chat_completions():
'/chat/completions': { streaming = request.json.get('stream', False)
'function': self.chat_completions, model = request.json.get('model', 'gpt-3.5-turbo')
'methods': ['POST', 'GET'] messages = request.json.get('messages')
}
models = {
'gpt-3.5-turbo': 'gpt-3.5-turbo-0301'
}
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.You, stream=streaming,
messages=messages)
if not streaming:
while 'curl_cffi.requests.errors.RequestsError' in response:
response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.You, stream=streaming,
messages=messages)
completion_timestamp = int(time.time())
completion_id = ''.join(random.choices(
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
return {
'id': 'chatcmpl-%s' % completion_id,
'object': 'chat.completion',
'created': completion_timestamp,
'model': models[model],
'usage': {
'prompt_tokens': None,
'completion_tokens': None,
'total_tokens': None
},
'choices': [{
'message': {
'role': 'assistant',
'content': response
},
'finish_reason': 'stop',
'index': 0
}]
} }
self.config = { def stream():
'host': '0.0.0.0', for token in response:
'port': 1337,
'debug': True
}
def chat_completions(self):
streaming = request.json.get('stream', False)
model = request.json.get('model', 'gpt-3.5-turbo')
messages = request.json.get('messages')
models = {
'gpt-3.5-turbo': 'gpt-3.5-turbo-0301'
}
response = ChatCompletion.create(model=Models.gpt_35_turbo, stream=streaming,
messages=messages)
if not streaming:
completion_timestamp = int(time.time()) completion_timestamp = int(time.time())
completion_id = ''.join(random.choices( completion_id = ''.join(random.choices(
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28)) 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
return { completion_data = {
'id': 'chatcmpl-%s' % completion_id, 'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion', 'object': 'chat.completion.chunk',
'created': completion_timestamp, 'created': completion_timestamp,
'model': models[model], 'model': 'gpt-3.5-turbo-0301',
'usage': { 'choices': [
'prompt_tokens': None, {
'completion_tokens': None, 'delta': {
'total_tokens': None 'content': token
}, },
'choices': [{ 'index': 0,
'message': { 'finish_reason': None
'role': 'assistant', }
'content': response ]
},
'finish_reason': 'stop',
'index': 0
}]
} }
def stream(): yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
for token in response: time.sleep(0.1)
completion_timestamp = int(time.time())
completion_id = ''.join(random.choices(
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
completion_data = { return app.response_class(stream(), mimetype='text/event-stream')
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': 'gpt-3.5-turbo-0301',
'choices': [
{
'delta': {
'content': token
},
'index': 0,
'finish_reason': None
}
]
}
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
time.sleep(0.1)
return app.response_class(stream(), mimetype='text/event-stream')
if __name__ == '__main__': if __name__ == '__main__':
website = website() config = {
'host': '0.0.0.0',
'port': 1337,
'debug': True
}
for route in website.routes: app.run(**config)
app.add_url_rule(
route,
view_func=website.routes[route]['function'],
methods=website.routes[route]['methods']
)
app.run(**website.config)