From a3e3362858e4ca9fe31cc18c335de1ad55eefc22 Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Sat, 13 May 2023 11:06:04 +0100 Subject: [PATCH] instructions --- README.md | 58 +++++++++++++++++++++++++++++++++++- interference/app.py | 2 +- testing/interference_test.py | 15 ---------- testing/main_test.py | 16 ---------- 4 files changed, 58 insertions(+), 33 deletions(-) delete mode 100644 testing/interference_test.py delete mode 100644 testing/main_test.py diff --git a/README.md b/README.md index d6e999f..61b93a7 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,60 @@ -### `g4f.Providers` class +g4f-v2 beta !!, only for developers !! + +### interference opneai-proxy api (use with openai python package) +###### run server: `python3 ./interference/app.py` + +``` +import openai + +openai.api_key = '' +openai.api_base = 'http://127.0.0.1:1337' + +chat_completion = openai.ChatCompletion.create(stream=True, + model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'write a poem about a tree'}]) + +#print(chat_completion.choices[0].message.content) + +for token in chat_completion: + + content = token['choices'][0]['delta'].get('content') + if content != None: + print(content) +``` + +### simple usage: +``` +import g4f + + +print(g4f.Providers.Ails.params) # supported args + +# Automatic selection of provider + +# streamed completion +response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', messages=[ + {"role": "user", "content": "Hello world"}], stream=True) + +for message in response: + print(message) + +# normal response +response = g4f.ChatCompletion.create(model=g4f.Models.gpt_4, prompt='hi') # alterative model setting + +print(response) + + +# Set with provider +response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Phind, messages=[ + {"role": "user", "content": "Hello world"}], stream=True) + +for message in response: + print(message) +``` + +### Dev + +(more instructions soon) +the `g4f.Providers`class default: diff --git a/interference/app.py b/interference/app.py index ce29ccd..6c31176 100644 --- a/interference/app.py +++ b/interference/app.py @@ -20,7 +20,7 @@ class website: self.config = { 'host': '0.0.0.0', - 'port': 5432 if os.name == 'nt' else 1447, + 'port': 1337, 'debug': True } diff --git a/testing/interference_test.py b/testing/interference_test.py deleted file mode 100644 index cd02625..0000000 --- a/testing/interference_test.py +++ /dev/null @@ -1,15 +0,0 @@ -import openai - -openai.api_key = '' -openai.api_base = 'paste_address_here' - -chat_completion = openai.ChatCompletion.create(stream=True, - model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'write a poem about a tree'}]) - -#print(chat_completion.choices[0].message.content) - -for token in chat_completion: - - content = token['choices'][0]['delta'].get('content') - if content != None: - print(content) \ No newline at end of file diff --git a/testing/main_test.py b/testing/main_test.py deleted file mode 100644 index ce75006..0000000 --- a/testing/main_test.py +++ /dev/null @@ -1,16 +0,0 @@ -import g4f - - -# print(g4f.Providers.Ails.params) - -response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Providers.Phind, messages=[ - {"role": "user", "content": "Hello world"}], stream=True) - -for message in response: - print(message) - - -# response = g4f.ChatCompletion.create(model=g4f.Models.gpt_35_turbo, -# provider=g4f.Providers.Ails, prompt='hi') - -# print(response)