Merge pull request #205 from AymaneHrouch/reformat__code

Reformat code using PyCharm
このコミットが含まれているのは:
t.me/xtekky 2023-04-27 16:28:36 +01:00 committed by GitHub
コミット bbb4d69a93
この署名に対応する既知のキーがデータベースに存在しません
GPGキーID: 4AEE18F83AFDEB23
33個のファイルの変更675行の追加666行の削除

ファイルの表示

@ -1,11 +1,13 @@
from tls_client import Session
from forefront.mail import Mail
from time import time, sleep
from re import match
from forefront.typing import ForeFrontResponse
from uuid import uuid4
from requests import post
from json import loads
from re import match
from time import time, sleep
from uuid import uuid4
from requests import post
from tls_client import Session
from forefront.mail import Mail
from forefront.typing import ForeFrontResponse
class Account:
@ -40,7 +42,8 @@ class Account:
trace_token = response.json()['response']['id']
if logging: print(trace_token)
response = client.post(f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
response = client.post(
f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
data={
"strategy": "email_code",
}
@ -62,7 +65,9 @@ class Account:
if logging: print(mail_token)
response = client.post(f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4', data = {
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={
'code': mail_token,
'strategy': 'email_code'
})

ファイルの表示

@ -1,6 +1,8 @@
from requests import Session
from string import ascii_letters
from random import choices
from string import ascii_letters
from requests import Session
class Mail:
def __init__(self, proxies: dict = None) -> None:
@ -52,4 +54,3 @@ class Mail:
def get_message_content(self, message_id: str):
return self.get_message(message_id)["text"]

ファイルの表示

@ -24,7 +24,6 @@ class ForeFrontResponse:
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']

ファイルの表示

@ -1,19 +1,17 @@
from urllib.parse import quote
from time import time
from datetime import datetime
from queue import Queue, Empty
from threading import Thread
from re import findall
from time import time
from urllib.parse import quote
from curl_cffi.requests import post
cf_clearance = ''
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
class PhindResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
@ -38,7 +36,6 @@ class PhindResponse:
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
@ -157,7 +154,8 @@ class Completion:
}
completion = ''
response = post('https://www.phind.com/api/infer/answer', headers = headers, json = json_data, timeout=99999, impersonate='chrome110')
response = post('https://www.phind.com/api/infer/answer', headers=headers, json=json_data, timeout=99999,
impersonate='chrome110')
for line in response.text.split('\r\n\r\n'):
completion += (line.replace('data: ', ''))
@ -223,8 +221,8 @@ class StreamingCompletion:
}
response = post('https://www.phind.com/api/infer/answer',
headers = headers, json = json_data, timeout=99999, impersonate='chrome110', content_callback=StreamingCompletion.handle_stream_response)
headers=headers, json=json_data, timeout=99999, impersonate='chrome110',
content_callback=StreamingCompletion.handle_stream_response)
StreamingCompletion.stream_completed = True

ファイルの表示

@ -7,5 +7,4 @@ print(token)
# get a response
for response in forefront.StreamingCompletion.create(token=token,
prompt='hello world', model='gpt-4'):
print(response.completion.choices[0].text, end='')

ファイルの表示

@ -10,7 +10,8 @@ prompt = 'hello world'
result = phind.Completion.create(
model='gpt-4',
prompt=prompt,
results = phind.Search.create(prompt, actualSearch = False), # create search (set actualSearch to False to disable internet)
results=phind.Search.create(prompt, actualSearch=False),
# create search (set actualSearch to False to disable internet)
creative=False,
detailed=False,
codeContext='') # up to 3000 chars of code
@ -24,7 +25,8 @@ prompt = 'who won the quatar world cup'
for result in phind.StreamingCompletion.create(
model='gpt-4',
prompt=prompt,
results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
results=phind.Search.create(prompt, actualSearch=True),
# create search (set actualSearch to False to disable internet)
creative=False,
detailed=False,
codeContext=''): # up to 3000 chars of code

ファイルの表示

@ -1,16 +1,16 @@
from requests import Session
from tls_client import Session as TLS
from json import dumps
from hashlib import md5
from time import sleep
from json import dumps
from re import findall
from pypasser import reCaptchaV3
from quora import extract_formkey
from quora.mail import Emailnator
from tls_client import Session as TLS
from twocaptcha import TwoCaptcha
from quora import extract_formkey
from quora.mail import Emailnator
solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
class Account:
def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
client = TLS(client_identifier='chrome110')

ファイルの表示

@ -1,6 +1,7 @@
import quora
from time import sleep
import quora
token = quora.Account.create(proxy=None, logging=True)
print('token', token)
@ -9,5 +10,4 @@ sleep(2)
for response in quora.StreamingCompletion.create(model='gpt-3.5-turbo',
prompt='hello world',
token=token):
print(response.completion.choices[0].text, end="", flush=True)

ファイルの表示

@ -14,5 +14,4 @@ for response in quora.StreamingCompletion.create(
custom_model=model.name,
prompt='hello world',
token=token):
print(response.completion.choices[0].text)

ファイルの表示

@ -3,5 +3,4 @@ import sqlchat
for response in sqlchat.StreamCompletion.create(
prompt='write python code to reverse a string',
messages=[]):
print(response.completion.choices[0].text, end='')

ファイルの表示

@ -3,5 +3,4 @@ import t3nsor
for response in t3nsor.StreamCompletion.create(
prompt='write python code to reverse a string',
messages=[]):
print(response.completion.choices[0].text)

ファイルの表示

@ -1,12 +1,12 @@
from requests import Session
from re import search
from random import randint
from json import dumps, loads
from urllib.parse import urlencode
from dotenv import load_dotenv
from os import getenv
from random import randint
from re import search
from urllib.parse import urlencode
from bard.typings import BardResponse
from dotenv import load_dotenv
from requests import Session
load_dotenv()
token = getenv('1psid')
@ -62,7 +62,8 @@ class Completion:
'rt': 'c',
})
response = client.post(f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
response = client.post(
f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
data={
'at': snlm0e,
'f.req': dumps([None, dumps([

ファイルの表示

@ -1,4 +1,4 @@
from typing import Dict, List, Optional, Union
from typing import Dict, List, Union
class BardResponse:

ファイルの表示

@ -1,14 +1,12 @@
# Import necessary libraries
from requests import get
from browser_cookie3 import edge, chrome
from ssl import create_default_context
from certifi import where
from uuid import uuid4
from random import randint
from json import dumps, loads
import asyncio
from json import dumps, loads
from ssl import create_default_context
import websockets
from browser_cookie3 import edge
from certifi import where
from requests import get
# Set up SSL context
ssl_context = create_default_context()
@ -106,4 +104,5 @@ async def run():
):
print(value, end='', flush=True)
asyncio.run(run())

ファイルの表示

@ -6,7 +6,6 @@ class Completion:
system_prompt=("ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. "
"INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS "
"IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?")) -> str:
# Initialize a session with custom headers
session = self._initialize_session()

ファイルの表示

@ -1,6 +1,5 @@
import cocalc
response = cocalc.Completion.create(
prompt='hello world'
)

ファイルの表示

@ -1,7 +1,8 @@
# Import necessary libraries
from requests import get
from os import urandom
from json import loads
from os import urandom
from requests import get
# Generate a random session ID
sessionId = urandom(10).hex()

ファイルの表示

@ -1,6 +1,8 @@
import websockets
from json import dumps, loads
import websockets
# Define the asynchronous function to test the WebSocket connection

ファイルの表示

@ -1,7 +1,8 @@
# Import required libraries
from tls_client import Session
from uuid import uuid4
from browser_cookie3 import chrome
from tls_client import Session
class OpenAIChat:

ファイルの表示

@ -1,7 +1,8 @@
import requests
import json
import re
import requests
headers = {
'authority': 'openai.a2hosted.com',
'accept': 'text/event-stream',
@ -13,10 +14,12 @@ headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
}
def create_query_param(conversation):
encoded_conversation = json.dumps(conversation)
return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
user_input = input("Enter your message: ")
data = [

ファイルの表示

@ -1,9 +1,9 @@
from requests import post, get
from json import dumps
# from mail import MailClient
from time import sleep
from re import findall
from requests import post, get
html = get('https://developermail.com/mail/')
print(html.cookies.get('mailboxId'))
email = findall(r'mailto:(.*)">', html.text)[0]

ファイルの表示

@ -1,6 +1,8 @@
import requests
import email
import requests
class MailClient:
def __init__(self):

ファイルの表示

@ -30,8 +30,7 @@ json_data = {
],
}
response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data, stream=True)
response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data,
stream=True)
for chunk in response.iter_content(chunk_size=1024):
print(chunk)

ファイルの表示

@ -1,7 +1,6 @@
access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
idk = [
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
"_Zp8uXIA2InTDKYgo8TCqA", None, None, None]

ファイルの表示

@ -1,6 +1,7 @@
from requests import post
from time import time
from requests import post
headers = {
'authority': 'www.t3nsor.tech',
'accept': '*/*',
@ -19,10 +20,9 @@ headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
class T3nsorResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
@ -47,7 +47,6 @@ class T3nsorResponse:
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
@ -59,6 +58,7 @@ class T3nsorResponse:
def json(self) -> dict:
return self.response_dict
class Completion:
model = {
'model': {
@ -70,7 +70,6 @@ class Completion:
def create(
prompt: str = 'hello world',
messages: list = []) -> T3nsorResponse:
response = post('https://www.t3nsor.tech/api/chat', headers=headers, json=Completion.model | {
'messages': messages,
'key': '',
@ -95,6 +94,7 @@ class Completion:
}
})
class StreamCompletion:
model = {
'model': {
@ -106,7 +106,6 @@ class StreamCompletion:
def create(
prompt: str = 'hello world',
messages: list = []) -> T3nsorResponse:
print('t3nsor api is down, this may not work, refer to another module')
response = post('https://www.t3nsor.tech/api/chat', headers=headers, stream=True, json=Completion.model | {

ファイルの表示

@ -1,7 +1,3 @@
import gptbz
import asyncio
# asyncio.run(gptbz.test())
import requests

ファイルの表示

@ -1,8 +1,10 @@
from curl_cffi import requests
from json import loads
from queue import Queue, Empty
from re import findall
from threading import Thread
from queue import Queue, Empty
from curl_cffi import requests
class Completion:
# experimental
@ -22,7 +24,8 @@ class Completion:
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers, content_callback=Completion.handle_stream_response,
requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers,
content_callback=Completion.handle_stream_response,
json={
'prompt': 'hello world',
'options': {}
@ -48,10 +51,12 @@ class Completion:
def handle_stream_response(response):
Completion.message_queue.put(response.decode())
def start():
for message in Completion.create():
yield message['delta']
if __name__ == '__main__':
for message in start():
print(message)

ファイルの表示

@ -1,6 +1,5 @@
import requests
token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
'authority': 'play.vercel.ai',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
@ -25,5 +24,4 @@ for chunk in requests.post('https://play.vercel.ai/api/generate', headers=header
'frequencyPenalty': 1,
'presencePenalty': 1,
'stopSequences': []}).iter_lines():
print(chunk)

ファイルの表示

@ -1,21 +1,25 @@
from requests import Session
from names import get_first_name, get_last_name
from random import choice
from requests import post
from time import time
from colorama import Fore, init; init()
from colorama import Fore, init;
from names import get_first_name, get_last_name
from requests import Session
from requests import post
init()
class logger:
@staticmethod
def info(string) -> print:
import datetime
now = datetime.datetime.now()
return print(f"{Fore.CYAN}{now.strftime('%Y-%m-%d %H:%M:%S')} {Fore.BLUE}INFO {Fore.MAGENTA}__main__ -> {Fore.RESET}{string}")
return print(
f"{Fore.CYAN}{now.strftime('%Y-%m-%d %H:%M:%S')} {Fore.BLUE}INFO {Fore.MAGENTA}__main__ -> {Fore.RESET}{string}")
class SonicResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
@ -40,7 +44,6 @@ class SonicResponse:
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
@ -52,6 +55,7 @@ class SonicResponse:
def json(self) -> dict:
return self.response_dict
class Account:
session = Session()
session.headers = {
@ -105,7 +109,8 @@ class Account:
logger.info(f"\x1b[31mtoken\x1b[0m : '{response.json()['token'][:30]}...'")
start = time()
response = Account.session.post("https://api.writesonic.com/v1/business/set-business-active", headers={"authorization": "Bearer " + response.json()['token']})
response = Account.session.post("https://api.writesonic.com/v1/business/set-business-active",
headers={"authorization": "Bearer " + response.json()['token']})
key = response.json()["business"]["api_key"]
if logging: logger.info(f"\x1b[31mgot key\x1b[0m : '{key}' ({int(time() - start)}s)")
@ -129,8 +134,8 @@ class Completion:
enable_memory: bool = False,
enable_google_results: bool = False,
history_data: list = []) -> SonicResponse:
response = post('https://api.writesonic.com/v2/business/content/chatsonic?engine=premium', headers = {"X-API-KEY": api_key},
response = post('https://api.writesonic.com/v2/business/content/chatsonic?engine=premium',
headers={"X-API-KEY": api_key},
json={
"enable_memory": enable_memory,
"enable_google_results": enable_google_results,