フォーク元 g4f/gpt4free
1
1
フォーク 0

コミットを比較

...

7 コミット

作成者 SHA1 メッセージ 日付
EbaAaZ 64ee2643e2 modification 2023-05-17 10:02:48 -04:00
EbaAaZ 3dcf3c3222 modification 2023-05-17 10:01:42 -04:00
EbaAaZ 0cf4072f63 modification 2023-05-17 09:59:29 -04:00
EbaAaZ 242932d455 modification 2023-05-17 09:58:58 -04:00
EbaAaZ b8706fd3ea modification 2023-05-17 09:58:14 -04:00
EbaAaZ 734d984dd9 modification 2023-05-17 09:57:36 -04:00
EbaAaZ ff7013652b modification 2023-05-17 09:56:59 -04:00
7個のファイルの変更66行の追加78行の削除

9
.gitignore vendored
ファイルの表示

@ -14,3 +14,12 @@ app.py
typing.py
_init_.py
you.py
Provider.py
Phind.py
Bing.py
Ails.py
__init__.py
you.py
phind.py
bing.py
README.md

ファイルの表示

@ -1,13 +1,27 @@
unstable g4f-v2 early-beta, only for developers !!
markdown
### interference opneai-proxy api (use with openai python package)
# g4f-v2: An Unstable Early-Beta Interference OpenAI Proxy API (For Developers)
run server:
```sh
**Note: This version of g4f is still unstable and intended for developers only. Use it with caution.**
## Introduction
g4f-v2 is a library that acts as an intermediary between your application and the OpenAI GPT-3.5 Turbo language model. It provides an API for interacting with the model and handling chat completions.
## Running the Server
To start the g4f-v2 server, run the following command:
```shell
python3 -m interference.app
```
```py
Usage Examples
Using the OpenAI Python Package
First, ensure you have the OpenAI Python package installed. You can then configure it to use g4f-v2 as the API endpoint:
python
import openai
openai.api_key = ''
@ -16,70 +30,46 @@ openai.api_base = 'http://127.0.0.1:1337'
chat_completion = openai.ChatCompletion.create(stream=True,
model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'write a poem about a tree'}])
#print(chat_completion.choices[0].message.content)
for token in chat_completion:
content = token['choices'][0]['delta'].get('content')
if content != None:
if content is not None:
print(content)
```
### simple usage:
Simple Usage
providers:
```py
g4f.Providers.You
g4f.Providers.Ails
g4f.Providers.Phind
g4f-v2 supports multiple providers, including g4f.Providers.You, g4f.Providers.Ails, and g4f.Providers.Phind. Here's how you can use them:
# usage:
python
response = g4f.ChatCompletion.create(..., provider=g4f.Providers.ProviderName)
```
```py
import g4f
# Accessing provider parameters
print(g4f.Providers.Ails.params) # Displays supported arguments
print(g4f.Providers.Ails.params) # supported args
# Automatic selection of provider
# streamed completion
# Automatic provider selection
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', messages=[
{"role": "user", "content": "Hello world"}], stream=True)
for message in response:
print(message)
# normal response
response = g4f.ChatCompletion.create(model=g4f.Models.gpt_4, messages=[
{"role": "user", "content": "hi"}]) # alterative model setting
print(response)
# Set with provider
# Using a specific provider
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Phind, messages=[
{"role": "user", "content": "Hello world"}], stream=True)
for message in response:
print(message)
```
### Dev
Development
(more instructions soon)
the `g4f.Providers`class
In the development section, we'll cover more instructions soon. The g4f.Providers class is a crucial component of the library. You can define default providers and their behavior in separate files within the g4f/Providers directory. Each provider file should have the following structure:
default:
./g4f/Providers/ProviderName.py:
python
`./g4f/Providers/ProviderName.py`:
```python
import os
url: str = 'https://{site_link}'
model: str = 'gpt-[version]'
@ -88,7 +78,5 @@ def _create_completion(prompt: str, args...):
or
yield ...
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
```
', '.join([f"{name}: {get_type_hints(_create_completion)[name]

ファイルの表示

@ -1,11 +1,10 @@
import os
import time
import json
import time
import uuid
import hashlib
import requests
from ..typing import sha256, Dict, get_type_hints
from datetime import datetime
url: str = 'https://ai.ls'
@ -13,29 +12,24 @@ model: str = 'gpt-3.5-turbo'
class Utils:
def hash(json_data: Dict[str, str]) -> sha256:
@staticmethod
def hash(json_data: dict) -> str:
secretKey = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
base_string: str = '%s:%s:%s:%s' % (
json_data['t'],
json_data['m'],
'OVbi[TPN{S#)c{36%9?g;usl)CL',
len(json_data['m'])
)
base_string = f"{json_data['t']}:{json_data['m']}:{'OVbi[TPN{S#)c{36%9?g;usl)CL'}:{len(json_data['m'])}"
return hashlib.sha256(base_string.encode()).hexdigest()
@staticmethod
def format_timestamp(timestamp: int) -> str:
e = timestamp
n = e % 10
r = n + 1 if n % 2 == 0 else n
return str(e - n + r)
def _create_completion(model: str,messages: list, temperature: float = 0.6, stream: bool = False):
def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False):
headers = {
'authority': 'api.caipacity.com',
'accept': '*/*',
@ -61,13 +55,16 @@ def _create_completion(model: str,messages: list, temperature: float = 0.6, stre
't': timestamp,
's': Utils.hash({
't': timestamp,
'm': messages[-1]['content']})}
'm': messages[-1]['content']
})
}
json_data = json.dumps(separators=(',', ':'), obj={
json_data = json.dumps({
'model': 'gpt-3.5-turbo',
'temperature': temperature,
'stream': True,
'messages': messages} | sig)
'messages': messages
} | sig, separators=(',', ':'))
response = requests.post('https://api.caipacity.com/v1/chat/completions?full=false',
headers=headers, data=json_data, stream=True)

ファイルの表示

@ -1,15 +1,10 @@
import os
import json
import time
import subprocess
from ..typing import sha256, Dict, get_type_hints
import json
url = 'https://bing.com/chat'
model = ['gpt-3.5-turbo', 'gpt-4']
def _create_completion(model: str, messages: list, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages,
@ -20,8 +15,4 @@ def _create_completion(model: str, messages: list, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
yield line.decode('utf-8')[:-1]
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
yield line.decode('utf-8')[:-1]

ファイルの表示

@ -2,12 +2,13 @@ import os
import json
import time
import subprocess
import os
import json
url = 'https://phind.com'
model = ['gpt-3.5-turbo', 'gpt-4']
def _create_completion(model: str, messages: list, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages}, separators=(',', ':'))
@ -19,7 +20,7 @@ def _create_completion(model: str, messages: list, **kwargs):
for line in iter(p.stdout.readline, b''):
if b'<title>Just a moment...</title>' in line:
os.system('clear' if os.name == 'posix' else 'cls')
yield 'Clouflare error, please try again...'
yield 'Cloudflare error, please try again...'
os._exit(0)
else:

ファイルの表示

@ -50,4 +50,4 @@ def output(chunk):
response = requests.post('https://www.phind.com/api/infer/answer',
headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')

長すぎる行があるためファイル差分は表示されません