diff --git a/example_gpt-3.5-turbo.py b/example_gpt-3.5-turbo.py new file mode 100644 index 0000000..82b61da --- /dev/null +++ b/example_gpt-3.5-turbo.py @@ -0,0 +1,12 @@ +import g4f +import sys + +# Automatic selection of provider, streamed completion +response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', + messages=[{"role": "user", + "content": "Write a poem about a tree."}], + stream=True) + +for message in response: + print(message, end="") + sys.stdout.flush() diff --git a/example_gpt-4.py b/example_gpt-4.py new file mode 100644 index 0000000..bf0a4c3 --- /dev/null +++ b/example_gpt-4.py @@ -0,0 +1,32 @@ +import g4f +import sys + +# Provider selection +provider=g4f.Provider.Phind + +# Streaming is not supported by these providers +if provider in {g4f.Provider.Aws, g4f.Provider.Ora, + g4f.Provider.Bard, g4f.Provider.Aichat}: + stream=False +else: + stream=True + +print(provider.params) # supported args + +# Getting the response +response = g4f.ChatCompletion.create(model='gpt-4', + messages=[{"role": "user", + "content": "Write a poem about a tree."}], + stream=stream, + provider=provider) + +# Printing the response +if stream: + for message in response: + print(message, end="") + sys.stdout.flush() + print("\n") +else: + print(response) + + diff --git a/example_server.py b/example_server.py new file mode 100644 index 0000000..37ddbd1 --- /dev/null +++ b/example_server.py @@ -0,0 +1,17 @@ +import openai +import sys + +openai.api_key = "" +openai.api_base = "http://127.0.0.1:1337" + +chat_completion = openai.ChatCompletion.create(stream=True, + model="gpt-3.5-turbo", + messages=[{"role": "user", + "content": "Write a poem about a tree."}]) + +for token in chat_completion: + content = token["choices"][0]["delta"].get("content") + if content is not None: + print(content, end="") + sys.stdout.flush() +