diff --git a/README.md b/README.md index 993019a..3cec7e1 100644 --- a/README.md +++ b/README.md @@ -29,22 +29,32 @@ for token in chat_completion: providers: ```py -g4f.Providers.Openai # need to be logged in in browser -g4f.Providers.Bing # need to be logged in in browser -g4f.Providers.You -g4f.Providers.Ails -g4f.Providers.Phind -g4f.Providers.Yqcloud +from g4f.Provider import ( + Phind, + You, + Bing, + Openai, + Yqcloud, + Theb, + Aichat, + Ora, + Aws, + Bard, + Vercel, + Pierangelo, + Forefront +) + # usage: -response = g4f.ChatCompletion.create(..., provider=g4f.Providers.ProviderName) +response = g4f.ChatCompletion.create(..., provider=ProviderName) ``` ```py import g4f -print(g4f.Providers.Ails.params) # supported args +print(g4f.Provider.Ails.params) # supported args # Automatic selection of provider @@ -63,7 +73,7 @@ print(response) # Set with provider -response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Openai, messages=[ +response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.Openai, messages=[ {"role": "user", "content": "Hello world"}], stream=True) for message in response: @@ -73,11 +83,11 @@ for message in response: ### Dev (more instructions soon) -the `g4f.Providers`class +the `g4f.Provider`class default: -`./g4f/Providers/ProviderName.py`: +`./g4f/Provider/Providers/ProviderName.py`: ```python import os @@ -91,6 +101,6 @@ def _create_completion(prompt: str, args...): yield ... -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ +params = f'g4f.Provider.{os.path.basename(__file__)[:-3]} supports: ' + \ ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) ``` diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py new file mode 100644 index 0000000..185df84 --- /dev/null +++ b/g4f/Provider/Provider.py @@ -0,0 +1,12 @@ +import os +from ..typing import sha256, Dict, get_type_hints + +url = None +model = None +supports_stream = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + return + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py new file mode 100644 index 0000000..949c938 --- /dev/null +++ b/g4f/Provider/Providers/Aichat.py @@ -0,0 +1,37 @@ +import os, requests +from ...typing import sha256, Dict, get_type_hints + +url = 'https://chat-gpt.org/chat' +model = ['gpt-3.5-turbo'] +supports_stream = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + headers = { + 'authority': 'chat-gpt.org', + 'accept': '*/*', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://chat-gpt.org', + 'pragma': 'no-cache', + 'referer': 'https://chat-gpt.org/chat', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', + } + + json_data = { + 'message': messages[-1]['content'], + 'temperature': 1, + 'presence_penalty': 0, + 'top_p': 1, + 'frequency_penalty': 0 + } + + response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data) + yield response.json()['message'] + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Aws.py b/g4f/Provider/Providers/Aws.py new file mode 100644 index 0000000..aa67b8f --- /dev/null +++ b/g4f/Provider/Providers/Aws.py @@ -0,0 +1,26 @@ +import os +import requests + +from ...typing import sha256, Dict, get_type_hints + +url = 'https://4aiu6ctrknfxkoaigkigzh5lwm0cciuc.lambda-url.ap-east-1.on.aws/chat/completions' +model = ['gpt-3.5-turbo', 'gpt-4'] +supports_stream = False + +class Auth(requests.auth.AuthBase): + def __init__(self): + self.token = 'sk-1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL' + + def __call__(self, r): + r.headers["authorization"] = "Bearer " + self.token + return r + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + + response = requests.post(url, + auth=Auth(), json={"model": model,"messages": messages}) + + yield (response.json()['choices'][0]['message']['content']) + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py new file mode 100644 index 0000000..0fd95e0 --- /dev/null +++ b/g4f/Provider/Providers/Bard.py @@ -0,0 +1,76 @@ +# implement proxy argument + +import os, requests, json, browser_cookie3, re, random +from ...typing import sha256, Dict, get_type_hints + +url = 'https://bard.google.com' +model = ['Palm2'] +supports_stream = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome( + domain_name='.google.com')}['__Secure-1PSID'] + + formatted = '\n'.join([ + '%s: %s' % (message['role'], message['content']) for message in messages + ]) + prompt = f'{formatted}\nAssistant:' + + proxy = None + + if proxy == None: + raise Exception('Proxy is required for Bard (set in g4f/Provider/Providers/Bard.py line 18)') + + snlm0e = False + conversation_id = None + response_id = None + choice_id = None + + client = requests.Session() + client.proxies = { + 'http': f'https://{proxy}', + 'https': f'https://{proxy}'} if proxy else None + + client.headers = { + 'authority': 'bard.google.com', + 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8', + 'origin': 'https://bard.google.com', + 'referer': 'https://bard.google.com/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + 'x-same-domain': '1', + 'cookie': f'__Secure-1PSID={psid}' + } + + snlm0e = re.search(r'SNlM0e\":\"(.*?)\"', + client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e + + params = { + 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', + '_reqid': random.randint(1111, 9999), + 'rt': 'c' + } + + data = { + 'at': snlm0e, + 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])} + + intents = '.'.join([ + 'assistant', + 'lamda', + 'BardFrontendService' + ]) + + response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate', + data=data, params=params) + + chat_data = json.loads(response.content.splitlines()[3])[0][2] + if chat_data: + json_chat_data = json.loads(chat_data) + + yield json_chat_data[0][0] + + else: + yield 'error' + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Providers/Bing.py b/g4f/Provider/Providers/Bing.py similarity index 71% rename from g4f/Providers/Bing.py rename to g4f/Provider/Providers/Bing.py index 22e27e0..f7739c4 100644 --- a/g4f/Providers/Bing.py +++ b/g4f/Provider/Providers/Bing.py @@ -3,13 +3,13 @@ import json import time import subprocess -from ..typing import sha256, Dict, get_type_hints +from ...typing import sha256, Dict, get_type_hints url = 'https://bing.com/chat' model = ['gpt-3.5-turbo', 'gpt-4'] +supports_stream = True -def _create_completion(model: str, messages: list, **kwargs): - +def _create_completion(model: str, messages: list, stream: bool, **kwargs): path = os.path.dirname(os.path.realpath(__file__)) config = json.dumps({ 'messages': messages, @@ -20,7 +20,8 @@ def _create_completion(model: str, messages: list, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8')[:-1] + #print(line) + yield line.decode('utf-8') #[:-1] params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ diff --git a/g4f/Provider/Providers/Forefront.py b/g4f/Provider/Providers/Forefront.py new file mode 100644 index 0000000..b1d858c --- /dev/null +++ b/g4f/Provider/Providers/Forefront.py @@ -0,0 +1,36 @@ +import os +import json +import requests +from ...typing import sha256, Dict, get_type_hints + +url = 'forefront.com' +model = ['gpt-3.5-turbo'] +supports_stream = True + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + json_data = { + 'text': messages[-1]['content'], + 'action': 'noauth', + 'id': '', + 'parentId': '', + 'workspaceId': '', + 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0', + 'model': 'gpt-4', + 'messages': messages[:-1] if len(messages) > 1 else [], + 'internetMode': 'auto' + } + + response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat', + json=json_data, stream=True) + + for token in response.iter_lines(): + if b'delta' in token: + token = json.loads(token.decode().split('data: ')[1])['delta'] + yield (token) + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) + + + + diff --git a/g4f/Providers/Openai.py b/g4f/Provider/Providers/Openai.py similarity index 73% rename from g4f/Providers/Openai.py rename to g4f/Provider/Providers/Openai.py index 0ae18e0..8e9c2b8 100644 --- a/g4f/Providers/Openai.py +++ b/g4f/Provider/Providers/Openai.py @@ -3,12 +3,14 @@ import json import time import subprocess -from ..typing import sha256, Dict, get_type_hints +from ...typing import sha256, Dict, get_type_hints url = 'https://chat.openai.com/chat' model = ['gpt-3.5-turbo'] +supports_stream = True -def _create_completion(model: str, messages: list, **kwargs): + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): path = os.path.dirname(os.path.realpath(__file__)) config = json.dumps({ @@ -20,8 +22,7 @@ def _create_completion(model: str, messages: list, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8')[:-1] + yield line.decode('utf-8') #[:-1] - params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Ora.py b/g4f/Provider/Providers/Ora.py new file mode 100644 index 0000000..16681a1 --- /dev/null +++ b/g4f/Provider/Providers/Ora.py @@ -0,0 +1,42 @@ +import os, requests, uuid +from ...typing import sha256, Dict, get_type_hints + +url = 'https://ora.ai' +model = ['gpt-3.5-turbo', 'gpt-4'] +supports_stream = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + headers = { + 'authority': 'ora.ai', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://ora.ai', + 'pragma': 'no-cache', + 'referer': 'https://ora.ai/chat/', + 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', + } + + json_data = { + 'chatbotId': 'adb2b793-e667-46b9-8d80-114eaa9a4c40', + 'input': messages[-1]['content'], + 'userId': f'auto:{uuid.uuid4()}', + 'provider': 'OPEN_AI', + 'config': False, + 'includeHistory': False + } + + response = requests.post('https://ora.ai/api/conversation', + headers=headers, json=json_data) + + yield response.json()['response'] + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Providers/Phind.py b/g4f/Provider/Providers/Phind.py similarity index 79% rename from g4f/Providers/Phind.py rename to g4f/Provider/Providers/Phind.py index 32dee4e..d37f9a0 100644 --- a/g4f/Providers/Phind.py +++ b/g4f/Provider/Providers/Phind.py @@ -3,12 +3,13 @@ import json import time import subprocess -from ..typing import sha256, Dict, get_type_hints +from ...typing import sha256, Dict, get_type_hints url = 'https://phind.com' model = ['gpt-3.5-turbo', 'gpt-4'] - -def _create_completion(model: str, messages: list, **kwargs): +supports_stream = True + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): path = os.path.dirname(os.path.realpath(__file__)) config = json.dumps({ @@ -29,7 +30,7 @@ def _create_completion(model: str, messages: list, **kwargs): if b'ping - 2023-' in line: continue - yield line.decode('utf-8')[:-1] + yield line.decode('utf-8') #[:-1] params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Pierangelo.py b/g4f/Provider/Providers/Pierangelo.py new file mode 100644 index 0000000..2f8cd79 --- /dev/null +++ b/g4f/Provider/Providers/Pierangelo.py @@ -0,0 +1,55 @@ +import os +import requests +from ...typing import sha256, Dict, get_type_hints + +url = 'https://chat.pierangelo.info' +model = ['gpt-4', 'gpt-3.5-turbo'] +supports_stream = True + +models = { + 'gpt-4': { + 'id':'gpt-4', + 'name':'GPT-4' + }, + 'gpt-3.5-turbo': { + 'id':'gpt-3.5-turbo', + 'name':'GPT-3.5' + } +} + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + + headers = { + 'authority': 'chat.pierangelo.info', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://chat.pierangelo.info', + 'pragma': 'no-cache', + 'referer': 'https://chat.pierangelo.info/', + 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', + } + + json_data = { + 'model': models[model], + 'messages': messages, + 'key': '', + 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Answer consisely", + 'temperature': 0.7 + } + + response = requests.post('https://chat.pierangelo.info/api/chat', + headers=headers, json=json_data, stream=True) + + for token in response: + yield (token) + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Theb.py b/g4f/Provider/Providers/Theb.py new file mode 100644 index 0000000..aa85741 --- /dev/null +++ b/g4f/Provider/Providers/Theb.py @@ -0,0 +1,28 @@ +import os +import json +import time +import subprocess + +from ...typing import sha256, Dict, get_type_hints + +url = 'https://theb.ai' +model = ['gpt-3.5-turbo'] +supports_stream = True + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + + path = os.path.dirname(os.path.realpath(__file__)) + config = json.dumps({ + 'messages': messages, + 'model': model}, separators=(',', ':')) + + cmd = ['python3', f'{path}/helpers/theb.py', config] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + for line in iter(p.stdout.readline, b''): + yield line.decode('utf-8') #[:-1] + + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py new file mode 100644 index 0000000..f03c6e1 --- /dev/null +++ b/g4f/Provider/Providers/Vercel.py @@ -0,0 +1,160 @@ +import os +import json +import base64 +import quickjs +import queue +import threading + +from curl_cffi import requests +from ...typing import sha256, Dict, get_type_hints + +url = 'https://play.vercel.ai' +model = None +supports_stream = True + +models = { + 'claude-instant-v1': 'anthropic:claude-instant-v1', + 'claude-v1': 'anthropic:claude-v1', + 'alpaca-7b': 'replicate:replicate/alpaca-7b', + 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b', + 'bloom': 'huggingface:bigscience/bloom', + 'bloomz': 'huggingface:bigscience/bloomz', + 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl', + 'flan-ul2': 'huggingface:google/flan-ul2', + 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b', + 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + 'santacoder': 'huggingface:bigcode/santacoder', + 'command-medium-nightly': 'cohere:command-medium-nightly', + 'command-xlarge-nightly': 'cohere:command-xlarge-nightly', + 'gpt-4': 'openai:gpt-4', + 'code-cushman-001': 'openai:code-cushman-001', + 'code-davinci-002': 'openai:code-davinci-002', + 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo', + 'text-ada-001': 'openai:text-ada-001', + 'text-babbage-001': 'openai:text-babbage-001', + 'text-curie-001': 'openai:text-curie-001', + 'text-davinci-002': 'openai:text-davinci-002', + 'text-davinci-003': 'openai:text-davinci-003' +} + +vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': { + 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}} + + +# based on https://github.com/ading2210/vercel-llm-api // modified +class Client: + def __init__(self): + self.session = requests.Session(impersonate='chrome110') + self.headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', + 'Accept-Encoding': 'gzip, deflate, br', + 'Accept-Language': 'en-US,en;q=0.5', + 'Te': 'trailers', + 'Upgrade-Insecure-Requests': '1' + } + self.session.headers.update(self.headers) + + def get_token(self): + b64 = self.session.get('https://play.vercel.ai/openai.jpeg').text + data = json.loads(base64.b64decode(b64)) + + script = 'const globalThis = {data: `sentinel`}; (%s)(%s)' % ( + data['c'], data['a']) + + token_data = json.loads(quickjs.Context().eval(script).json()) + token_string = json.dumps(separators=(',', ':'), + obj={'r': token_data, 't': data['t']}) + + return base64.b64encode(token_string.encode()).decode() + + def get_default_params(self, model_id): + return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()} + + def generate(self, model_id: str, prompt: str, params: dict = {}): + if not ':' in model_id: + model_id = models[model_id] + + defaults = self.get_default_params(model_id) + + payload = defaults | params | { + 'prompt': prompt, + 'model': model_id, + } + + headers = self.headers | { + 'Accept-Encoding': 'gzip, deflate, br', + 'Custom-Encoding': self.get_token(), + 'Host': 'play.vercel.ai', + 'Origin': 'https://play.vercel.ai', + 'Referrer': 'https://play.vercel.ai', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + } + + chunks_queue = queue.Queue() + error = None + response = None + + def callback(data): + chunks_queue.put(data.decode()) + + def request_thread(): + nonlocal response, error + for _ in range(3): + try: + response = self.session.post('https://play.vercel.ai/api/generate', + json=payload, headers=headers, content_callback=callback) + response.raise_for_status() + + except Exception as e: + if _ == 2: + error = e + + else: + continue + + thread = threading.Thread(target=request_thread, daemon=True) + thread.start() + + text = '' + index = 0 + while True: + try: + chunk = chunks_queue.get(block=True, timeout=0.1) + + except queue.Empty: + if error: + raise error + + elif response: + break + + else: + continue + + text += chunk + lines = text.split('\n') + + if len(lines) - 1 > index: + new = lines[index:-1] + for word in new: + yield json.loads(word) + index = len(lines) - 1 + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n' + + for message in messages: + conversation += '%s: %s\n' % (message['role'], message['content']) + + conversation += 'assistant: ' + + completion = Client().generate(model, conversation) + + for token in completion: + yield token + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Providers/You.py b/g4f/Provider/Providers/You.py similarity index 69% rename from g4f/Providers/You.py rename to g4f/Provider/Providers/You.py index 62b0d15..d01e48c 100644 --- a/g4f/Providers/You.py +++ b/g4f/Provider/Providers/You.py @@ -3,12 +3,13 @@ import json import time import subprocess -from ..typing import sha256, Dict, get_type_hints +from ...typing import sha256, Dict, get_type_hints url = 'https://you.com' model = 'gpt-3.5-turbo' +supports_stream = True -def _create_completion(model: str, messages: list, **kwargs): +def _create_completion(model: str, messages: list, stream: bool, **kwargs): path = os.path.dirname(os.path.realpath(__file__)) config = json.dumps({ @@ -19,4 +20,4 @@ def _create_completion(model: str, messages: list, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8')[:-1] \ No newline at end of file + yield line.decode('utf-8') #[:-1] \ No newline at end of file diff --git a/g4f/Providers/Yqcloud.py b/g4f/Provider/Providers/Yqcloud.py similarity index 84% rename from g4f/Providers/Yqcloud.py rename to g4f/Provider/Providers/Yqcloud.py index bf0cb1b..34d8aa2 100644 --- a/g4f/Providers/Yqcloud.py +++ b/g4f/Provider/Providers/Yqcloud.py @@ -2,10 +2,10 @@ import os import time import requests -from ..typing import sha256, Dict, get_type_hints +from ...typing import sha256, Dict, get_type_hints +supports_stream = True - -def _create_completion(model: str, messages: list, **kwargs): +def _create_completion(model: str, messages: list, stream: bool, **kwargs): headers = { 'authority': 'api.aichatos.cloud', diff --git a/g4f/Provider/Providers/data/ails_tokens.txt b/g4f/Provider/Providers/data/ails_tokens.txt new file mode 100644 index 0000000..9fd183c --- /dev/null +++ b/g4f/Provider/Providers/data/ails_tokens.txt @@ -0,0 +1,18 @@ +0B28A84E-592E-4FCD-AB3F-93DDEF9C4DF7 +513A32B7-D024-4CFB-9D70-1B841B245999 +5D733D23-1C8A-4CD0-8C68-CF3C8E02BD2F +606E4968-CAE4-48C8-9C63-10723DAD64B3 +6DB4C6E6-7720-4A8F-8F65-610B09145F41 +7348661D-4E36-49B2-BCAB-6E7D153CAF43 +893565E3-288C-4A09-8267-F507DAC1A70A +8FEC6A8C-5E98-4570-95DD-6BBD18263CB8 +92B8B7DD-DA03-47EA-B614-D506AF6A922A +A02FDB66-BE88-42DA-A162-686316F1C9B2 +A2509F7E-4E7D-4477-A76F-AB3940856C43 +A9E651A2-CC29-439E-9373-17EF55359704 +ABE8D912-7356-4E8A-B37E-A5818808075F +B530598F-8D63-45EC-A550-3BA23FC1434A +CCB5F29D-46BC-455D-80CD-F1666570BB7D +CFE0E42C-70B7-4BDA-9DB6-37A917AB4D00 +D158A715-E29B-4B20-A962-8536DC18D1EC +FB7506FF-EAB3-410E-924F-E06A45937857 \ No newline at end of file diff --git a/g4f/Providers/helpers/bing.py b/g4f/Provider/Providers/helpers/bing.py similarity index 67% rename from g4f/Providers/helpers/bing.py rename to g4f/Provider/Providers/helpers/bing.py index 94926b2..0dc53e2 100644 --- a/g4f/Providers/helpers/bing.py +++ b/g4f/Provider/Providers/helpers/bing.py @@ -2,6 +2,7 @@ import sys import ssl import uuid import json +import time import random import asyncio import certifi @@ -14,6 +15,8 @@ config = json.loads(sys.argv[1]) ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) + + conversationstyles = { 'gpt-4': [ #'precise' "nlu_direct_response_filter", @@ -72,6 +75,8 @@ def format(msg: dict) -> str: return json.dumps(msg) + '\x1e' def get_token(): + return + try: cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')} return cookies['_U'] @@ -83,20 +88,49 @@ class AsyncCompletion: async def create( prompt : str = None, optionSets : list = None, - token : str = get_token()): + token : str = None): # No auth required anymore - create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create', - headers = { - 'host' : 'edgeservices.bing.com', - 'authority' : 'edgeservices.bing.com', - 'cookie' : f'_U={token}', - 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', - } - ) + create = None + for _ in range(5): + try: + create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create', + headers = { + 'host': 'edgeservices.bing.com', + 'accept-encoding': 'gzip, deflate, br', + 'connection': 'keep-alive', + 'authority': 'edgeservices.bing.com', + 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'max-age=0', + 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"110.0.1587.69"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': '""', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'document', + 'sec-fetch-mode': 'navigate', + 'sec-fetch-site': 'none', + 'sec-fetch-user': '?1', + 'upgrade-insecure-requests': '1', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', + 'x-edge-shopping-flag': '1', + 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' + } + ) - conversationId = create.json()['conversationId'] - clientId = create.json()['clientId'] - conversationSignature = create.json()['conversationSignature'] + conversationId = create.json()['conversationId'] + clientId = create.json()['clientId'] + conversationSignature = create.json()['conversationSignature'] + + except Exception as e: + time.sleep(0.5) + continue + + if create == None: raise Exception('Failed to create conversation.') wss: websockets.WebSocketClientProtocol or None = None @@ -164,6 +198,7 @@ class AsyncCompletion: continue response = json.loads(obj) + #print(response, flush=True, end='') if response.get('type') == 1 and response['arguments'][0].get('messages',): response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text') @@ -179,7 +214,7 @@ async def run(optionSets, messages): async for value in AsyncCompletion.create(prompt=messages[-1]['content'], optionSets=optionSets): - print(value, flush=True) + print(value, flush=True, end = '') optionSet = conversationstyles[config['model']] asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file diff --git a/g4f/Providers/helpers/openai.py b/g4f/Provider/Providers/helpers/openai.py similarity index 80% rename from g4f/Providers/helpers/openai.py rename to g4f/Provider/Providers/helpers/openai.py index 50c7b6a..35f02ee 100644 --- a/g4f/Providers/helpers/openai.py +++ b/g4f/Provider/Providers/helpers/openai.py @@ -83,15 +83,24 @@ payload = { completion = '' def format(chunk): - global completion - - if b'parts' in chunk: - json_data = json.loads(chunk.decode('utf-8').split('data: ')[1]) - token = json_data['message']['content']['parts'][0] - token = token.replace(completion, '') - completion += token - - print(token, flush=True) + try: + global completion + + if b'parts' in chunk: + json_data = json.loads(chunk.decode('utf-8').split('data: ')[1]) + token = json_data['message']['content']['parts'][0] + token = token.replace(completion, '') + completion += token + + print(token, flush=True, end = '') + + except Exception as e: + pass -response = requests.post('https://chat.openai.com/backend-api/conversation', - json=payload, headers=headers, content_callback=format, impersonate='chrome110') \ No newline at end of file +for _ in range(3): + try: + response = requests.post('https://chat.openai.com/backend-api/conversation', + json=payload, headers=headers, content_callback=format, impersonate='chrome110') + break + except: + continue \ No newline at end of file diff --git a/g4f/Providers/helpers/phind.py b/g4f/Provider/Providers/helpers/phind.py similarity index 62% rename from g4f/Providers/helpers/phind.py rename to g4f/Provider/Providers/helpers/phind.py index 6d8bd53..70525d5 100644 --- a/g4f/Providers/helpers/phind.py +++ b/g4f/Provider/Providers/helpers/phind.py @@ -16,7 +16,7 @@ json_data = json.dumps({ 'skill': skill, 'date': datetime.datetime.now().strftime('%d/%m/%Y'), 'language': 'en', - 'detailed': False, + 'detailed': True, 'creative': True, 'customLinks': []}}, separators=(',', ':')) @@ -39,17 +39,31 @@ headers = { def output(chunk): - if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n': - chunk = b'data: \n\r\n\r\n' + try: + if b'PHIND_METADATA' in chunk: + return + + if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n': + chunk = b'data: \n\r\n\r\n' - chunk = chunk.decode() + chunk = chunk.decode() - chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n') - chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n') - chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '') + chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n') + chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n') + chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '') - print(chunk, flush=True) + print(chunk, flush=True, end = '') + + except json.decoder.JSONDecodeError: + pass - -response = requests.post('https://www.phind.com/api/infer/answer', +while True: + try: + response = requests.post('https://www.phind.com/api/infer/answer', headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5') + + exit(0) + + except Exception as e: + print('an error occured, retrying... |', e, flush=True) + continue \ No newline at end of file diff --git a/g4f/Provider/Providers/helpers/theb.py b/g4f/Provider/Providers/helpers/theb.py new file mode 100644 index 0000000..2f620cc --- /dev/null +++ b/g4f/Provider/Providers/helpers/theb.py @@ -0,0 +1,49 @@ +import json +import sys +from curl_cffi import requests + +config = json.loads(sys.argv[1]) +prompt = config['messages'][-1]['content'] + +headers = { + 'authority': 'chatbot.theb.ai', + 'accept': 'application/json, text/plain, */*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'content-type': 'application/json', + 'origin': 'https://chatbot.theb.ai', + 'referer': 'https://chatbot.theb.ai/', + 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', +} + +json_data = { + 'prompt': prompt, + 'options': {} +} + +def format(chunk): + try: + chunk_json = json.loads(chunk.decode('utf-8')) + completion_chunk = chunk_json['detail']['choices'][0]['delta']['content'] + + print(completion_chunk, flush=True, end = '') + + except Exception as e: + print('[ERROR] an error occured, retrying... |', e, flush=True) + return + +while True: + try: + response = requests.post('https://chatbot.theb.ai/api/chat-process', + headers=headers, json=json_data, content_callback=format, impersonate='chrome110') + + exit(0) + + except Exception as e: + print('[ERROR] an error occured, retrying... |', e, flush=True) + continue \ No newline at end of file diff --git a/g4f/Providers/helpers/you.py b/g4f/Provider/Providers/helpers/you.py similarity index 88% rename from g4f/Providers/helpers/you.py rename to g4f/Provider/Providers/helpers/you.py index 86a6fa7..02985ed 100644 --- a/g4f/Providers/helpers/you.py +++ b/g4f/Provider/Providers/helpers/you.py @@ -65,7 +65,15 @@ def output(chunk): if b'"youChatToken"' in chunk: chunk_json = json.loads(chunk.decode().split('data: ')[1]) - print(chunk_json['youChatToken'], flush=True) + print(chunk_json['youChatToken'], flush=True, end = '') -response = requests.get(f'https://you.com/api/streamingSearch?{params}', +while True: + try: + response = requests.get(f'https://you.com/api/streamingSearch?{params}', headers=headers, content_callback=output, impersonate='safari15_5') + + exit(0) + + except Exception as e: + print('an error occured, retrying... |', e, flush=True) + continue \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py new file mode 100644 index 0000000..223061a --- /dev/null +++ b/g4f/Provider/__init__.py @@ -0,0 +1,18 @@ +from . import Provider +from .Providers import ( + Phind, + You, + Bing, + Openai, + Yqcloud, + Theb, + Aichat, + Ora, + Aws, + Bard, + Vercel, + Pierangelo, + Forefront +) + +Palm = Bard \ No newline at end of file diff --git a/g4f/Providers/Ails.py b/g4f/Providers/Ails.py deleted file mode 100644 index d6f8b67..0000000 --- a/g4f/Providers/Ails.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -import time -import json -import uuid -import hashlib -import requests - -from ..typing import sha256, Dict, get_type_hints -from datetime import datetime - -url: str = 'https://ai.ls' -model: str = 'gpt-3.5-turbo' - - -class Utils: - def hash(json_data: Dict[str, str]) -> sha256: - - secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83, - 35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76]) - - base_string: str = '%s:%s:%s:%s' % ( - json_data['t'], - json_data['m'], - 'OVbi[TPN{S#)c{36%9?g;usl)CL', - len(json_data['m']) - ) - - return hashlib.sha256(base_string.encode()).hexdigest() - - def format_timestamp(timestamp: int) -> str: - - e = timestamp - n = e % 10 - r = n + 1 if n % 2 == 0 else n - return str(e - n + r) - - -def _create_completion(model: str,messages: list, temperature: float = 0.6, stream: bool = False): - headers = { - 'authority': 'api.caipacity.com', - 'accept': '*/*', - 'authorization': 'Bearer free', - 'client-id': str(uuid.uuid4()), - 'client-v': '0.1.26', - 'content-type': 'application/json', - 'origin': 'https://ai.ls', - 'referer': 'https://ai.ls/', - 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', - } - - timestamp = Utils.format_timestamp(int(time.time() * 1000)) - - sig = { - 'd': datetime.now().strftime('%Y-%m-%d'), - 't': timestamp, - 's': Utils.hash({ - 't': timestamp, - 'm': messages[-1]['content']})} - - json_data = json.dumps(separators=(',', ':'), obj={ - 'model': 'gpt-3.5-turbo', - 'temperature': temperature, - 'stream': True, - 'messages': messages} | sig) - - response = requests.post('https://api.caipacity.com/v1/chat/completions?full=false', - headers=headers, data=json_data, stream=True) - - for token in response.iter_lines(): - yield token.decode() - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Providers/Provider.py b/g4f/Providers/Provider.py deleted file mode 100644 index 74d53f0..0000000 --- a/g4f/Providers/Provider.py +++ /dev/null @@ -1,5 +0,0 @@ -url = None -model = None - -def _create_completion(model: str, messages: list, **kwargs): - return \ No newline at end of file diff --git a/g4f/Providers/__init__.py b/g4f/Providers/__init__.py deleted file mode 100644 index 1d3791b..0000000 --- a/g4f/Providers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import Provider, Ails, Phind, You, Bing, Openai, Yqcloud \ No newline at end of file diff --git a/g4f/__init__.py b/g4f/__init__.py index 8a5218a..77b69ff 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -1,10 +1,9 @@ import sys from .typing import MetaModels, Union -from . import Providers +from . import Provider - -class Models(metaclass=MetaModels): +class Model(metaclass=MetaModels): class model: name: str @@ -14,30 +13,39 @@ class Models(metaclass=MetaModels): class gpt_35_turbo: name: str = 'gpt-3.5-turbo' base_provider: str = 'openai' - best_site: str = Providers.Ails + best_site: Provider.Provider = Provider.Forefront class gpt_4: name: str = 'gpt-4' base_provider: str = 'openai' - best_site: str = Providers.Phind + best_site: Provider.Provider = Provider.Bing + + class davinvi_003: + name: str = 'davinvi-003' + base_provider: str = 'openai' + best_site: Provider.Provider = Provider.Vercel class Utils: convert: dict = { - 'gpt-3.5-turbo': Models.gpt_35_turbo, - 'gpt-4': Models.gpt_4 + 'gpt-3.5-turbo': Model.gpt_35_turbo, + 'gpt-4': Model.gpt_4 } class ChatCompletion: @staticmethod - def create(model: Models.model or str, messages: list, provider: Providers.Provider = None, stream: bool = False, **kwargs): + def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, **kwargs): try: if isinstance(model, str): model = Utils.convert[model] engine = model.best_site if not provider else provider - - return (engine._create_completion(model.name, messages, **kwargs) - if stream else ''.join(engine._create_completion(model.name, messages, **kwargs))) + if not engine.supports_stream and stream == True: + print( + f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr) + sys.exit(1) + + return (engine._create_completion(model.name, messages, stream, **kwargs) + if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs))) except TypeError as e: print(e) diff --git a/g4f/typing.py b/g4f/typing.py index 61530b9..f642faf 100644 --- a/g4f/typing.py +++ b/g4f/typing.py @@ -2,7 +2,6 @@ from typing import Dict, NewType, Union, Optional, List, get_type_hints sha256 = NewType('sha_256_hash', str) - class MetaModels(type): def __str__(cls): output: List = [ diff --git a/g4f/utils.py b/g4f/utils.py new file mode 100644 index 0000000..d5ab41c --- /dev/null +++ b/g4f/utils.py @@ -0,0 +1,49 @@ +import browser_cookie3 + + +class Utils: + browsers = [ + browser_cookie3.chrome, # 62.74% market share + browser_cookie3.safari, # 24.12% market share + browser_cookie3.firefox, # 4.56% market share + browser_cookie3.edge, # 2.85% market share + browser_cookie3.opera, # 1.69% market share + browser_cookie3.brave, # 0.96% market share + browser_cookie3.opera_gx, # 0.64% market share + browser_cookie3.vivaldi, # 0.32% market share + ] + + def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict: + cookies = {} + + if setBrowser != False: + for browser in Utils.browsers: + if browser.__name__ == setBrowser: + try: + for c in browser(domain_name=domain): + if c.name not in cookies: + cookies = cookies | {c.name: c.value} + + except Exception as e: + pass + + else: + for browser in Utils.browsers: + try: + for c in browser(domain_name=domain): + if c.name not in cookies: + cookies = cookies | {c.name: c.value} + + except Exception as e: + pass + + if setName: + try: + return {setName: cookies[setName]} + + except ValueError: + print(f'Error: could not find {setName} cookie in any browser.') + exit(1) + + else: + return cookies diff --git a/interference/app.py b/interference/app.py index 053fa3f..68a14c6 100644 --- a/interference/app.py +++ b/interference/app.py @@ -3,7 +3,7 @@ import time import json import random -from g4f import Models, ChatCompletion, Providers +from g4f import Model, ChatCompletion, Provider from flask import Flask, request, Response from flask_cors import CORS @@ -20,12 +20,12 @@ def chat_completions(): 'gpt-3.5-turbo': 'gpt-3.5-turbo-0301' } - response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.Openai, stream=streaming, + response = ChatCompletion.create(model=Model.gpt_35_turbo, stream=streaming, messages=messages) if not streaming: while 'curl_cffi.requests.errors.RequestsError' in response: - response = ChatCompletion.create(model=Models.gpt_35_turbo, provider=Providers.Openai, stream=streaming, + response = ChatCompletion.create(model=Model.gpt_35_turbo, stream=streaming, messages=messages) completion_timestamp = int(time.time())