diff --git a/README.md b/README.md index 9fd505f..4968662 100644 --- a/README.md +++ b/README.md @@ -32,11 +32,11 @@ Just API's from some language model sites. Pull Requests - ChatGpt Discord Bot - Stars - Forks - Issues - Pull Requests + ChatGpt Discord Bot + Stars + Forks + Issues + Pull Requests @@ -86,7 +86,6 @@ Just API's from some language model sites. | [sqlchat.ai](https://sqlchat.ai) | GPT-3.5 | | [bard.google.com](https://bard.google.com) | custom / search | | [bing.com/chat](https://bing.com/chat) | GPT-4/3.5 | -| [chat.forefront.ai/](https://chat.forefront.ai/) | GPT-4/3.5 | ## Best sites @@ -119,7 +118,7 @@ then run: Build ``` -docker build -t gpt4free:latest -f Docker/Dockerfile . +docker build -t gpt4free:latest . ``` Run @@ -127,17 +126,13 @@ Run ``` docker run -p 8501:8501 gpt4free:latest ``` -Another way - docker-compose (no docker build/run needed) -``` -docker-compose up -d -``` ## Deploy using docker-compose Run the following: ``` -docker-compose up -d +docker-compose up --build -d ``` ## ChatGPT clone diff --git a/docker-compose.yaml b/docker-compose.yaml index 3afd6cd..8098f35 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,8 +2,14 @@ version: "3.9" services: gpt4free: - build: - context: . + build: + context: ./ dockerfile: Dockerfile + container_name: dc_gpt4free + # environment: + # - http_proxy=http://127.0.0.1:1080 # modify this for your proxy + # - https_proxy=http://127.0.0.1:1080 # modify this for your proxy + image: img_gpt4free ports: - - "8501:8501" + - 8501:8501 + restart: always \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index e8e7119..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: '3.8' - -services: - gpt4: - build: - context: . - dockerfile: Dockerfile - image: gpt4free:latest - container_name: gpt4 - ports: - - 8501:8501 - restart: unless-stopped diff --git a/gpt4free/forefront/README.md b/gpt4free/forefront/README.md index 35ba989..887097e 100644 --- a/gpt4free/forefront/README.md +++ b/gpt4free/forefront/README.md @@ -6,8 +6,11 @@ from gpt4free import forefront token = forefront.Account.create(logging=False) print(token) # get a response -for response in forefront.StreamingCompletion.create(token=token, - prompt='hello world', model='gpt-4'): - print(response.completion.choices[0].text, end='') +for response in forefront.StreamingCompletion.create( + token=token, + prompt='hello world', + model='gpt-4' +): + print(response.choices[0].text, end='') print("") ``` \ No newline at end of file diff --git a/gpt4free/theb/README.md b/gpt4free/theb/README.md index a4abdf6..a7af9dd 100644 --- a/gpt4free/theb/README.md +++ b/gpt4free/theb/README.md @@ -5,7 +5,10 @@ from gpt4free import theb # simple streaming completion -for token in theb.Completion.create('hello world'): - print(token, end='', flush=True) -print("") + +while True: + x = input() + for token in theb.Completion.create(x): + print(token, end='', flush=True) + print("") ``` diff --git a/gpt4free/theb/__init__.py b/gpt4free/theb/__init__.py index c696387..e3a7f78 100644 --- a/gpt4free/theb/__init__.py +++ b/gpt4free/theb/__init__.py @@ -17,6 +17,7 @@ class Completion: timer = None message_queue = Queue() stream_completed = False + last_msg_id = None @staticmethod def request(prompt: str, proxy: Optional[str] = None): @@ -28,26 +29,35 @@ class Completion: } proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None - + + options = {} + if Completion.last_msg_id: + options['parentMessageId'] = Completion.last_msg_id + requests.post( 'https://chatbot.theb.ai/api/chat-process', headers=headers, proxies=proxies, content_callback=Completion.handle_stream_response, - json={'prompt': prompt, 'options': {}}, + json={'prompt': prompt, 'options': options}, ) Completion.stream_completed = True @staticmethod + def create(prompt: str, proxy: Optional[str] = None) -> Generator[str, None, None]: + Completion.stream_completed = False + Thread(target=Completion.request, args=[prompt, proxy]).start() while not Completion.stream_completed or not Completion.message_queue.empty(): try: message = Completion.message_queue.get(timeout=0.01) for message in findall(Completion.regex, message): - yield loads(Completion.part1 + message + Completion.part2)['delta'] + message_json = loads(Completion.part1 + message + Completion.part2) + Completion.last_msg_id = message_json['id'] + yield message_json['delta'] except Empty: pass