From bc309e2855573c39f80babcb6018346404133bd3 Mon Sep 17 00:00:00 2001 From: bkutasi <6hi24etyp@mozmail.com> Date: Tue, 6 Jun 2023 10:46:22 +0200 Subject: [PATCH] Added examples --- example_gpt-3.5-turbo.py | 12 ++++++++++++ example_gpt-4.py | 32 ++++++++++++++++++++++++++++++++ example_server.py | 17 +++++++++++++++++ 3 files changed, 61 insertions(+) create mode 100644 example_gpt-3.5-turbo.py create mode 100644 example_gpt-4.py create mode 100644 example_server.py diff --git a/example_gpt-3.5-turbo.py b/example_gpt-3.5-turbo.py new file mode 100644 index 0000000..b688c32 --- /dev/null +++ b/example_gpt-3.5-turbo.py @@ -0,0 +1,12 @@ +import g4f +import sys + +# Automatic selection of provider, streamed completion +response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', + messages=[{"role": "user", + "content": "Write a poem about a tree."}], + stream=True) + +for message in response: + print(message, end="") + sys.stdout.flush() diff --git a/example_gpt-4.py b/example_gpt-4.py new file mode 100644 index 0000000..8cec142 --- /dev/null +++ b/example_gpt-4.py @@ -0,0 +1,32 @@ +import g4f +import sys +from halo import Halo + +# Provider selection +provider=g4f.Provider.Forefront + +# Streaming is not supported by these providers +if provider in {g4f.Provider.Aws, g4f.Provider.Ora, + g4f.Provider.Bard, g4f.Provider.Aichat}: + stream=False +else: + stream=True + +print(provider.params) # supported args + +# streamed completion, with loading spinner for non-streamed responses +with Halo(text='Loading...', spinner='dots'): + response = g4f.ChatCompletion.create(model='gpt-4', + messages=[{"role": "user", + "content": "Write a poem about a tree."}], + stream=stream, + provider=provider) + +# Streamed response +if stream: + for message in response: + print(message, end="") + sys.stdout.flush() +else: + print(response) +print("\n") diff --git a/example_server.py b/example_server.py new file mode 100644 index 0000000..34ebc64 --- /dev/null +++ b/example_server.py @@ -0,0 +1,17 @@ +import openai +import sys + +openai.api_key = '' +openai.api_base = 'http://127.0.0.1:1337' + +chat_completion = openai.ChatCompletion.create(stream=True, + model='gpt-3.5-turbo', + messages=[{'role': 'user', + 'content': 'write a poem about a tree'}]) + +for token in chat_completion: + content = token['choices'][0]['delta'].get('content') + if content is not None: + print(content, end="") + sys.stdout.flush() +