Added examples

このコミットが含まれているのは:
bkutasi 2023-06-06 10:46:22 +02:00
コミット bc309e2855
3個のファイルの変更61行の追加0行の削除

12
example_gpt-3.5-turbo.py ノーマルファイル
ファイルの表示

@ -0,0 +1,12 @@
import g4f
import sys
# Automatic selection of provider, streamed completion
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo',
messages=[{"role": "user",
"content": "Write a poem about a tree."}],
stream=True)
for message in response:
print(message, end="")
sys.stdout.flush()

32
example_gpt-4.py ノーマルファイル
ファイルの表示

@ -0,0 +1,32 @@
import g4f
import sys
from halo import Halo
# Provider selection
provider=g4f.Provider.Forefront
# Streaming is not supported by these providers
if provider in {g4f.Provider.Aws, g4f.Provider.Ora,
g4f.Provider.Bard, g4f.Provider.Aichat}:
stream=False
else:
stream=True
print(provider.params) # supported args
# streamed completion, with loading spinner for non-streamed responses
with Halo(text='Loading...', spinner='dots'):
response = g4f.ChatCompletion.create(model='gpt-4',
messages=[{"role": "user",
"content": "Write a poem about a tree."}],
stream=stream,
provider=provider)
# Streamed response
if stream:
for message in response:
print(message, end="")
sys.stdout.flush()
else:
print(response)
print("\n")

17
example_server.py ノーマルファイル
ファイルの表示

@ -0,0 +1,17 @@
import openai
import sys
openai.api_key = ''
openai.api_base = 'http://127.0.0.1:1337'
chat_completion = openai.ChatCompletion.create(stream=True,
model='gpt-3.5-turbo',
messages=[{'role': 'user',
'content': 'write a poem about a tree'}])
for token in chat_completion:
content = token['choices'][0]['delta'].get('content')
if content is not None:
print(content, end="")
sys.stdout.flush()