initial commit

このコミットが含まれているのは:
abc 2023-05-13 10:39:01 +01:00
コミット 263ed8e3bb
12個のファイルの変更275行の追加1行の削除

21
README.md ノーマルファイル
ファイルの表示

@ -0,0 +1,21 @@
### `g4f.Providers` class
default:
`./g4f/Providers/ProviderName.py`:
```python
import os
url: str = 'https://{site_link}'
model: str = 'gpt-[version]'
def _create_completion(prompt: str, args...):
return ...
or
yield ...
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
```

70
g4f/Providers/Ails.py ノーマルファイル
ファイルの表示

@ -0,0 +1,70 @@
import os
import time
import json
import uuid
import hashlib
import requests
from ..typing import sha256, Dict, get_type_hints
from datetime import datetime
url: str = 'https://ai.ls'
model: str = 'gpt-3.5-turbo'
class Utils:
def hash(json_data: Dict[str, str]) -> sha256:
secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
base_string: str = '%s:%s:%s:%s' % (
json_data['t'],
json_data['m'],
secretKey.decode(),
len(json_data['m'])
)
return hashlib.sha256(base_string.encode()).hexdigest()
def format_timestamp(timestamp: int) -> str:
e = timestamp
n = e % 10
r = n + 1 if n % 2 == 0 else n
return str(e - n + r)
def _create_completion(model: str,messages: list, temperature: float = 0.6, stream: bool = False):
headers = {
'authority': 'api.caipacity.com',
'authorization': f'Bearer free',
'client-id': str(uuid.uuid4()),
'content-type': 'application/json',
'origin': 'https://ai.ls',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
timestamp = Utils.format_timestamp(int(time.time() * 1000))
sig = {
'd': datetime.now().strftime('%Y-%m-%d'),
't': timestamp,
's': Utils.hash({
't': timestamp,
'm': messages[-1]['content']})}
json_data = json.dumps(separators=(',', ':'), obj={
'model': 'gpt-3.5-turbo',
'temperature': temperature,
'stream': True,
'messages': messages} | sig)
response = requests.post('https://api.caipacity.com/v1/chat/completions?full=false',
headers=headers, data=json_data, stream=True)
for token in response.iter_lines():
yield token.decode()
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

29
g4f/Providers/Phind.py ノーマルファイル
ファイルの表示

@ -0,0 +1,29 @@
import os
import json
import time
import subprocess
url = None
model = None
def _create_completion(model: str, messages: list, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages}, separators=(',', ':'))
cmd = ['python3', f'{path}/helpers/phind.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
if b'<title>Just a moment...</title>' in line:
os.system('clear' if os.name == 'posix' else 'cls')
yield 'Clouflare error, please try again...'
os._exit(0)
else:
if b'ping - 2023-' in line:
continue
yield line.decode('utf-8')[:-1]

5
g4f/Providers/Provider.py ノーマルファイル
ファイルの表示

@ -0,0 +1,5 @@
url = None
model = None
def _create_completion(model: str, messages: list, **kwargs):
return

2
g4f/Providers/Test.py ノーマルファイル
ファイルの表示

@ -0,0 +1,2 @@
def _create_completion(prompt: str, **kwargs):
return 'helloooo', prompt

1
g4f/Providers/__init__.py ノーマルファイル
ファイルの表示

@ -0,0 +1 @@
from . import Ails, Phind, Test, Provider

53
g4f/Providers/helpers/phind.py ノーマルファイル
ファイルの表示

@ -0,0 +1,53 @@
import sys
import json
import datetime
import urllib.parse
from curl_cffi import requests
config = json.loads(sys.argv[1])
prompt = config['messages'][-1]['content']
json_data = json.dumps({
'question': prompt,
'options': {
'skill': 'expert',
'date': datetime.datetime.now().strftime('%d/%m/%Y'),
'language': 'en',
'detailed': False,
'creative': True,
'customLinks': []}}, separators=(',', ':'))
headers = {
'Content-Type': 'application/json',
'Pragma': 'no-cache',
'Accept': '*/*',
'Sec-Fetch-Site': 'same-origin',
'Accept-Language': 'en-GB,en;q=0.9',
'Cache-Control': 'no-cache',
'Sec-Fetch-Mode': 'cors',
'Content-Length': str(len(json_data)),
'Origin': 'https://www.phind.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
'Referer': f'https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox',
'Connection': 'keep-alive',
'Host': 'www.phind.com',
'Sec-Fetch-Dest': 'empty'
}
def output(chunk):
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
chunk = b'data: \n\r\n\r\n'
chunk = chunk.decode()
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
print(chunk, flush=True)
response = requests.post('https://www.phind.com/api/infer/creative',
headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')

47
g4f/__init__.py ノーマルファイル
ファイルの表示

@ -0,0 +1,47 @@
import sys
from .typing import MetaModels, Union
from . import Providers
class Models(metaclass=MetaModels):
class model:
name: str
base_provider: str
best_site: str
class gpt_35_turbo:
name: str = 'gpt-3.5-turbo'
base_provider: str = 'openai'
best_site: str = Providers.Ails
class gpt_4:
name: str = 'gpt-4'
base_provider: str = 'openai'
best_site: str = Providers.Phind
class Utils:
convert: dict = {
'gpt-3.5-turbo': Models.gpt_35_turbo,
'gpt-4': Models.gpt_4
}
class ChatCompletion:
@staticmethod
def create(model: Models.model or str, messages: list, provider: Providers.Provider = None, stream: bool = False, **kwargs):
try:
if isinstance(model, str):
model = Utils.convert[model]
engine = model.best_site if not provider else provider
return (engine._create_completion(messages, **kwargs)
if stream else ''.join(engine._create_completion(messages, **kwargs)))
except TypeError as e:
print(e)
arg: str = str(e).split("'")[1]
print(
f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr)
sys.exit(1)

16
g4f/typing.py ノーマルファイル
ファイルの表示

@ -0,0 +1,16 @@
from typing import Dict, NewType, Union, Optional, List, get_type_hints
sha256 = NewType('sha_256_hash', str)
class MetaModels(type):
def __str__(cls):
output: List = [
f'class Engines:\n',
f' class {cls.gpt_35_turbo.__name__}:',
' ...',
f' class {cls.gpt_4.__name__}:',
' ...'
]
return '\n'.join(output)

1
test
ファイルの表示

@ -1 +0,0 @@
test

15
testing/interference_test.py ノーマルファイル
ファイルの表示

@ -0,0 +1,15 @@
import openai
openai.api_key = ''
openai.api_base = 'paste_address_here'
chat_completion = openai.ChatCompletion.create(stream=True,
model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'write a poem about a tree'}])
#print(chat_completion.choices[0].message.content)
for token in chat_completion:
content = token['choices'][0]['delta'].get('content')
if content != None:
print(content)

16
testing/main_test.py ノーマルファイル
ファイルの表示

@ -0,0 +1,16 @@
import g4f
# print(g4f.Providers.Ails.params)
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Phind, messages=[
{"role": "user", "content": "Hello world"}], stream=True)
for message in response:
print(message)
# response = g4f.ChatCompletion.create(model=g4f.Models.gpt_35_turbo,
# provider=g4f.Providers.Ails, prompt='hi')
# print(response)