From 2a2e05d4dca7ed57f456a594e22620afd9640495 Mon Sep 17 00:00:00 2001 From: bkutasi <6hi24etyp@mozmail.com> Date: Tue, 6 Jun 2023 11:16:23 +0200 Subject: [PATCH] Minor fixes --- example_gpt-3.5-turbo.py | 2 +- example_gpt-4.py | 22 +++++++++++----------- example_server.py | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/example_gpt-3.5-turbo.py b/example_gpt-3.5-turbo.py index b688c32..82b61da 100644 --- a/example_gpt-3.5-turbo.py +++ b/example_gpt-3.5-turbo.py @@ -4,7 +4,7 @@ import sys # Automatic selection of provider, streamed completion response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', messages=[{"role": "user", - "content": "Write a poem about a tree."}], + "content": "Write a poem about a tree."}], stream=True) for message in response: diff --git a/example_gpt-4.py b/example_gpt-4.py index 8cec142..bf0a4c3 100644 --- a/example_gpt-4.py +++ b/example_gpt-4.py @@ -1,9 +1,8 @@ import g4f import sys -from halo import Halo # Provider selection -provider=g4f.Provider.Forefront +provider=g4f.Provider.Phind # Streaming is not supported by these providers if provider in {g4f.Provider.Aws, g4f.Provider.Ora, @@ -14,19 +13,20 @@ else: print(provider.params) # supported args -# streamed completion, with loading spinner for non-streamed responses -with Halo(text='Loading...', spinner='dots'): - response = g4f.ChatCompletion.create(model='gpt-4', - messages=[{"role": "user", - "content": "Write a poem about a tree."}], - stream=stream, - provider=provider) +# Getting the response +response = g4f.ChatCompletion.create(model='gpt-4', + messages=[{"role": "user", + "content": "Write a poem about a tree."}], + stream=stream, + provider=provider) -# Streamed response +# Printing the response if stream: for message in response: print(message, end="") sys.stdout.flush() + print("\n") else: print(response) -print("\n") + + diff --git a/example_server.py b/example_server.py index 34ebc64..37ddbd1 100644 --- a/example_server.py +++ b/example_server.py @@ -1,16 +1,16 @@ import openai import sys -openai.api_key = '' -openai.api_base = 'http://127.0.0.1:1337' +openai.api_key = "" +openai.api_base = "http://127.0.0.1:1337" chat_completion = openai.ChatCompletion.create(stream=True, - model='gpt-3.5-turbo', - messages=[{'role': 'user', - 'content': 'write a poem about a tree'}]) + model="gpt-3.5-turbo", + messages=[{"role": "user", + "content": "Write a poem about a tree."}]) for token in chat_completion: - content = token['choices'][0]['delta'].get('content') + content = token["choices"][0]["delta"].get("content") if content is not None: print(content, end="") sys.stdout.flush()