このコミットが含まれているのは:
bkutasi 2023-06-06 11:16:23 +02:00
コミット 2a2e05d4dc
3個のファイルの変更18行の追加18行の削除

ファイルの表示

@ -4,7 +4,7 @@ import sys
# Automatic selection of provider, streamed completion # Automatic selection of provider, streamed completion
response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', response = g4f.ChatCompletion.create(model='gpt-3.5-turbo',
messages=[{"role": "user", messages=[{"role": "user",
"content": "Write a poem about a tree."}], "content": "Write a poem about a tree."}],
stream=True) stream=True)
for message in response: for message in response:

ファイルの表示

@ -1,9 +1,8 @@
import g4f import g4f
import sys import sys
from halo import Halo
# Provider selection # Provider selection
provider=g4f.Provider.Forefront provider=g4f.Provider.Phind
# Streaming is not supported by these providers # Streaming is not supported by these providers
if provider in {g4f.Provider.Aws, g4f.Provider.Ora, if provider in {g4f.Provider.Aws, g4f.Provider.Ora,
@ -14,19 +13,20 @@ else:
print(provider.params) # supported args print(provider.params) # supported args
# streamed completion, with loading spinner for non-streamed responses # Getting the response
with Halo(text='Loading...', spinner='dots'): response = g4f.ChatCompletion.create(model='gpt-4',
response = g4f.ChatCompletion.create(model='gpt-4', messages=[{"role": "user",
messages=[{"role": "user", "content": "Write a poem about a tree."}],
"content": "Write a poem about a tree."}], stream=stream,
stream=stream, provider=provider)
provider=provider)
# Streamed response # Printing the response
if stream: if stream:
for message in response: for message in response:
print(message, end="") print(message, end="")
sys.stdout.flush() sys.stdout.flush()
print("\n")
else: else:
print(response) print(response)
print("\n")

ファイルの表示

@ -1,16 +1,16 @@
import openai import openai
import sys import sys
openai.api_key = '' openai.api_key = ""
openai.api_base = 'http://127.0.0.1:1337' openai.api_base = "http://127.0.0.1:1337"
chat_completion = openai.ChatCompletion.create(stream=True, chat_completion = openai.ChatCompletion.create(stream=True,
model='gpt-3.5-turbo', model="gpt-3.5-turbo",
messages=[{'role': 'user', messages=[{"role": "user",
'content': 'write a poem about a tree'}]) "content": "Write a poem about a tree."}])
for token in chat_completion: for token in chat_completion:
content = token['choices'][0]['delta'].get('content') content = token["choices"][0]["delta"].get("content")
if content is not None: if content is not None:
print(content, end="") print(content, end="")
sys.stdout.flush() sys.stdout.flush()