From 011d0babc265acfe35927c898a276ce8b9a261f3 Mon Sep 17 00:00:00 2001
From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com>
Date: Thu, 27 Apr 2023 19:26:36 +0100
Subject: [PATCH] remove phind
---
README.md | 9 +-
gui/README.md | 8 +-
gui/streamlit_app.py | 21 +-
phind/README.md | 34 ---
phind/__init__.py | 289 ---------------------
phind/__pycache__/__init__.cpython-311.pyc | Bin 8642 -> 0 bytes
testing/phind_test.py | 34 ---
unfinished/easyai/main.py | 42 ---
you/__init__.py | 5 +-
9 files changed, 15 insertions(+), 427 deletions(-)
delete mode 100644 phind/README.md
delete mode 100644 phind/__init__.py
delete mode 100644 phind/__pycache__/__init__.cpython-311.pyc
delete mode 100644 testing/phind_test.py
delete mode 100644 unfinished/easyai/main.py
diff --git a/README.md b/README.md
index 7e3304f..cd4f4a9 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,6 @@ Please note the following:
| **Usage Examples** | | | |
| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | |
| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | |
-| `phind` | Example usage for phind | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./phind/README.md) | ![Inactive](https://img.shields.io/badge/Active-brightgreen) |
| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| **Try it Out** | | | |
| Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
@@ -65,7 +64,6 @@ Please note the following:
| [writesonic.com](https://writesonic.com) | GPT-3.5 / Internet |
| [t3nsor.com](https://t3nsor.com) | GPT-3.5 |
| [you.com](https://you.com) | GPT-3.5 / Internet / good search|
-| [phind.com](https://phind.com) | GPT-4 / Internet / good search |
| [sqlchat.ai](https://sqlchat.ai) | GPT-3.5 |
| [chat.openai.com/chat](https://chat.openai.com/chat) | GPT-3.5 |
| [bard.google.com](https://bard.google.com) | custom / search |
@@ -75,13 +73,10 @@ Please note the following:
## Best sites
#### gpt-4
-- [`/phind`](./phind/README.md)
-- pro: only stable gpt-4 with streaming ( no limit )
-- contra: weird backend prompting
-- why not `ora` anymore ? gpt-4 requires login + limited
+- [`/forefront`](./forefront/README.md)
#### gpt-3.5
-- looking for a stable api at the moment
+- [`/you`](./you/README.md)
## Install
download or clone this GitHub repo
diff --git a/gui/README.md b/gui/README.md
index 379cf15..133abe4 100644
--- a/gui/README.md
+++ b/gui/README.md
@@ -1,9 +1,11 @@
# gpt4free gui
+mode `streamlit_app.py` into base folder to run
+
+
preview:
-
-
-run:
+
+run:
diff --git a/gui/streamlit_app.py b/gui/streamlit_app.py
index a35196e..4e0a618 100644
--- a/gui/streamlit_app.py
+++ b/gui/streamlit_app.py
@@ -4,25 +4,16 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import streamlit as st
-import phind
-
-# Set cloudflare clearance and user agent
-phind.cloudflare_clearance = ''
-phind.phind_api = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
-
+import you
def get_answer(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
try:
- result = phind.Completion.create(
- model='gpt-4',
- prompt=question,
- results=phind.Search.create(question, actualSearch=True),
- creative=False,
- detailed=False,
- codeContext=''
- )
- return result.completion.choices[0].text
+ result = you.Completion.create(
+ prompt = question)
+
+ return result['response']
+
except Exception as e:
# Return error message if an exception occurs
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
diff --git a/phind/README.md b/phind/README.md
deleted file mode 100644
index 806bdf4..0000000
--- a/phind/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-### Example: `phind` (use like openai pypi package)
-
-```python
-import phind
-
-# set cf_clearance cookie (needed again)
-phind.cf_clearance = 'xx.xx-1682166681-0-160'
-phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' # same as the one from browser you got cf_clearance from
-
-prompt = 'who won the quatar world cup'
-
-# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
-# stream completion
-for result in phind.StreamingCompletion.create(
- model = 'gpt-4',
- prompt = prompt,
- results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
- creative = False,
- detailed = False,
- codeContext = ''): # up to 3000 chars of code
-
- print(result.completion.choices[0].text, end='', flush=True)
-
-# normal completion
-result = phind.Completion.create(
- model = 'gpt-4',
- prompt = prompt,
- results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
- creative = False,
- detailed = False,
- codeContext = '') # up to 3000 chars of code
-
-print(result.completion.choices[0].text)
-```
diff --git a/phind/__init__.py b/phind/__init__.py
deleted file mode 100644
index 863360c..0000000
--- a/phind/__init__.py
+++ /dev/null
@@ -1,289 +0,0 @@
-from datetime import datetime
-from queue import Queue, Empty
-from threading import Thread
-from time import time
-from urllib.parse import quote
-
-from curl_cffi.requests import post
-
-cf_clearance = ''
-user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
-
-
-class PhindResponse:
- class Completion:
- class Choices:
- def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
-
- def __repr__(self) -> str:
- return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
-
- def __init__(self, choices: dict) -> None:
- self.choices = list(map(self.Choices, choices))
-
- class Usage:
- def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_tokens']
- self.completion_tokens = usage_dict['completion_tokens']
- self.total_tokens = usage_dict['total_tokens']
-
- def __repr__(self):
- return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
- def __init__(self, response_dict: dict) -> None:
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
-
- def json(self) -> dict:
- return self.response_dict
-
-
-class Search:
- def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
- if user_agent == '':
- raise ValueError('user_agent must be set, refer to documentation')
- if cf_clearance == '':
- raise ValueError('cf_clearance must be set, refer to documentation')
-
- if not actualSearch:
- return {
- '_type': 'SearchResponse',
- 'queryContext': {
- 'originalQuery': prompt
- },
- 'webPages': {
- 'webSearchUrl': f'https://www.bing.com/search?q={quote(prompt)}',
- 'totalEstimatedMatches': 0,
- 'value': []
- },
- 'rankingResponse': {
- 'mainline': {
- 'items': []
- }
- }
- }
-
- headers = {
- 'authority': 'www.phind.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cookie': f'cf_clearance={cf_clearance}',
- 'origin': 'https://www.phind.com',
- 'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': user_agent
- }
-
- return post('https://www.phind.com/api/bing/search', headers=headers, json={
- 'q': prompt,
- 'userRankList': {},
- 'browserLanguage': language}).json()['rawBingResults']
-
-
-class Completion:
- def create(
- model='gpt-4',
- prompt: str = '',
- results: dict = None,
- creative: bool = False,
- detailed: bool = False,
- codeContext: str = '',
- language: str = 'en') -> PhindResponse:
-
- if user_agent == '':
- raise ValueError('user_agent must be set, refer to documentation')
-
- if cf_clearance == '':
- raise ValueError('cf_clearance must be set, refer to documentation')
-
- if results is None:
- results = Search.create(prompt, actualSearch=True)
-
- if len(codeContext) > 2999:
- raise ValueError('codeContext must be less than 3000 characters')
-
- models = {
- 'gpt-4': 'expert',
- 'gpt-3.5-turbo': 'intermediate',
- 'gpt-3.5': 'intermediate',
- }
-
- json_data = {
- 'question': prompt,
- 'bingResults': results, # response.json()['rawBingResults'],
- 'codeContext': codeContext,
- 'options': {
- 'skill': models[model],
- 'date': datetime.now().strftime("%d/%m/%Y"),
- 'language': language,
- 'detailed': detailed,
- 'creative': creative
- }
- }
-
- headers = {
- 'authority': 'www.phind.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type': 'application/json',
- 'cookie': f'cf_clearance={cf_clearance}',
- 'origin': 'https://www.phind.com',
- 'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': user_agent
- }
-
- completion = ''
- response = post('https://www.phind.com/api/infer/answer', headers=headers, json=json_data, timeout=99999,
- impersonate='chrome110')
- for line in response.text.split('\r\n\r\n'):
- completion += (line.replace('data: ', ''))
-
- return PhindResponse({
- 'id': f'cmpl-1337-{int(time())}',
- 'object': 'text_completion',
- 'created': int(time()),
- 'model': models[model],
- 'choices': [{
- 'text': completion,
- 'index': 0,
- 'logprobs': None,
- 'finish_reason': 'stop'
- }],
- 'usage': {
- 'prompt_tokens': len(prompt),
- 'completion_tokens': len(completion),
- 'total_tokens': len(prompt) + len(completion)
- }
- })
-
-
-class StreamingCompletion:
- message_queue = Queue()
- stream_completed = False
-
- def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
-
- models = {
- 'gpt-4': 'expert',
- 'gpt-3.5-turbo': 'intermediate',
- 'gpt-3.5': 'intermediate',
- }
-
- json_data = {
- 'question': prompt,
- 'bingResults': results,
- 'codeContext': codeContext,
- 'options': {
- 'skill': models[model],
- 'date': datetime.now().strftime("%d/%m/%Y"),
- 'language': language,
- 'detailed': detailed,
- 'creative': creative
- }
- }
-
- headers = {
- 'authority': 'www.phind.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type': 'application/json',
- 'cookie': f'cf_clearance={cf_clearance}',
- 'origin': 'https://www.phind.com',
- 'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': user_agent
- }
-
- response = post('https://www.phind.com/api/infer/answer',
- headers=headers, json=json_data, timeout=99999, impersonate='chrome110',
- content_callback=StreamingCompletion.handle_stream_response)
-
- StreamingCompletion.stream_completed = True
-
- @staticmethod
- def create(
- model: str = 'gpt-4',
- prompt: str = '',
- results: dict = None,
- creative: bool = False,
- detailed: bool = False,
- codeContext: str = '',
- language: str = 'en'):
-
- if user_agent == '':
- raise ValueError('user_agent must be set, refer to documentation')
- if cf_clearance == '':
- raise ValueError('cf_clearance must be set, refer to documentation')
-
- if results is None:
- results = Search.create(prompt, actualSearch=True)
-
- if len(codeContext) > 2999:
- raise ValueError('codeContext must be less than 3000 characters')
-
- Thread(target=StreamingCompletion.request, args=[
- model, prompt, results, creative, detailed, codeContext, language]).start()
-
- while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
- try:
- chunk = StreamingCompletion.message_queue.get(timeout=0)
-
- if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
- chunk = b'data: \n\n\r\n\r\n'
-
- chunk = chunk.decode()
-
- chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
- chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
- chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
-
- yield PhindResponse({
- 'id': f'cmpl-1337-{int(time())}',
- 'object': 'text_completion',
- 'created': int(time()),
- 'model': model,
- 'choices': [{
- 'text': chunk,
- 'index': 0,
- 'logprobs': None,
- 'finish_reason': 'stop'
- }],
- 'usage': {
- 'prompt_tokens': len(prompt),
- 'completion_tokens': len(chunk),
- 'total_tokens': len(prompt) + len(chunk)
- }
- })
-
- except Empty:
- pass
-
- @staticmethod
- def handle_stream_response(response):
- StreamingCompletion.message_queue.put(response)
diff --git a/phind/__pycache__/__init__.cpython-311.pyc b/phind/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index b72e9d3ded7c153c6b83ff187816761e2701e498..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 8642
zcmbU`TWk|qmR02{zwNx8K%8I<7)-}`Oh`i0yb}nIB(R;pbVFEo#Z^g6Y?rxJ>CnbV
zXoPk%tJW?v>J>dbX?0p+*f7mXthB4)b65LNYrh<1sjZTbkXG|Ce@4k@S7<->oLhb;
zFf&_roqO&*b?-fO?|I!*f9G=98Mr>Zd|dih55xQyR*ILqfNcB+AP*Udk=Pg$XRom=
z;#`c2o35ECZi?}7{u)nlJ|@J4YXZw~jBJvGr{>2j!+Zy~aa}V@7Rf5vB)jC0oRUj&
zOLbDc)F3rVO;YoUOE!ls#b-HYl#yEQGm_^q2V=lp7{f{_tw3q3q1Y&82T*p_Q0!9s
zBop-h4u=S`x^Qbz(PTg@ld`Htl?0%?7L8+*RT4EBaorY~CrVuI>+6N)hDa
z0vMf*$|U7lT3YH(&PNkccSMP&Ob2@orY^{du2~W~`{4(-j`eoG4>&~*cgbq#%p}E!
z0H?@cmz)S)F&YQD#5ip{)FsIj=}*;*$z&`V5j9}2=ccM8QZ4hEmQ;`Q^iSnPo44)<9kFGh+Wh|e{uFnj
zH)Zk1#mJ>ezb?q}r1qK4M--x_9I6<(Z37t!JQrsJnmAci`f=sqwKcUo5&H
z`$px+f)Xq+*#n$+)1PlroE1s5&?V^7ZQ`Of4@>}CW-$_xlUl0IC_^zZF}Enr$th<<
z0WK0+Nc$`)>t=&x-9qG9nZRb+R5=og%!d}mRDB5z#g&<8OineHTaqzRn^j0W<;J#I
zS%W^3tZJ3@xFX4wbv3HVDf^--lMrx{&>{sBMPPVs`*$Txlj;M42>m
zvxKjfOjxN2kjHR;S89O1rFmdfu4+`AkzPPCRZ)uj&j5w5q6o!45(6(TOn`}!Z)>^$
z8z|q_tubXTNt78?cg;o@&?loMh!G?&y4O;gJ^
z9S+0fS~%RD{7knNE2-cg+N18`qh6eT48T1m7wB5M`Y7>$|H76F9DEf0Au#wNFt{E#
zoDCd?MkmsL{73=Rj_5U`*@bvG8#wlW&(`}h^uGb)F_FqWvq1yh^%x-F&e%V7cYg_*
z2z^RB40RLKOZy6S9_pptgu3vx4Fz^0b>e6^92cVrSfx{!&li=iLNkhzd%*643wIgI
z!gb7-YVD!~y79$kpRY_Pio%v2Rg!@{-ZTc4nVWJ%^NE_T_jX@@|IqQEKqZFYR@6wz
z6P9#BtBOnMBf9|NCKEYH!r?z^lZ{9B;28G+++*?_)0Q@6ra#F}%RfxVUQEZP*A7Tj{w{&
z{qmgPI=-sq87Q9dw5SDc)|ED`?#VM)z8e}7cfkyx(uScU5VSFGv2NW!`b&`XWo8yE
z+4o&UN=vlVivgs*lfikFMt%`_Wib)gY8g9#0Lt)qoTYNB7Z~5)otS
zaX^aQ_QP;0?$iI=V>0cd#{b#XFGs!^`)X{>v+nK9dV5O*Ll{^RLe4nK5XGrA{KOD<
zy8o%&e~_C1&g?g4&J|~dnVv5JSZ^QAwvR54d_J}eKVw=m!7cpeRWLfEWBK6>me5
zyxY8W$roZMr1kS2l2Jc_i!Xgi5AwBpWz`H9wL%vw@RX>B!JO-Igll#bXpI
z*MVFNxqm04>3j+z5J*liXGG
zAe9u2yNwi$J5zqTlJ2@*@E}{Xx2(8mermN>pvy%L0SO;eFH;VSs+;Fbwo@!R3M|o6
zV7U#nvo?HJ!NFCY&g@~nz`Ya`;4oPhFFN3|u1*`iiB
zI=z5W9o6F828CHg7qjJp9s-&-py=9gxPozi)%o5spSAAHb-uSYlI`qU=0CR}elFWN0JsD3^V!Zpz@3PXW;+i7
zZqIf4zfrzY*5c34XS>g?cS8FpwAW$#&982*bv#?j22ZSap3HWh#OIgcx7CW^&Qm$5
z2L$A#;zR`2ft>)iPS>PHQFWeXD;lfc4%={DR0jaCaCJ@Dy4@M;ZaQtyTI&yS5sY;#
z>(*IptM-fpS)-s85cM|zih2b$fOiY$K$EtIuW9>cCDmsUP{X!|uW9>cCAHBifaXeh
z2Fo?jU@W(R&8*;m0FOcOR$wSFzD@-0(q$`P8yF>2_*NSnzfBSkM{og3Jn35{%Y6Y(
z{Z`l;yKbJO>7OZ`lM}kxFw(kHjA#(y8YDPE7Lp@0kc^ac7A7kBkLPsOG^_5GK>O27
z6wa?&$s#Zy6V)2}OXh1%;^*M>vjSX6mO1DFeXJ||B-qu$yFjs8ftOn)8_d9@Imy0a
zdg`doZ>(1GSL*T@uxC3(ge=n9GHE!1(HQ6Wo+#$sHABoL$>gipoTqO4o-s27VSWH_m7
za$F7ax)7#GC+97ka~ShbaE`aan3o~iQS~}Vwk^QiMJ7}GD!ED)GmJfAGTMVY7Wf0c
z*ju_2^GYAW0AmmZ>vc0kS%QYK!p?$*qSEMYA}*aardW(=z!SmMgiHy|YU+Gafg=t+
z88nd#P>_oV#sH}JAb4lv%wskTWXff%Mz>K#OQ`+|0IGF+o~gIFU+oCKYTmPYHE-f}
zH|DL(zANm%U-^FUUvB){jm+LD_~%01xsF|V3*XV0XAn4Q1KpbgrQ_=k3hx+MluQ%MfpuAA1{a*aVy>0(a@ED_|R^0V&%A8&mm%
zee;6u23Uai1B7ONGuq}&WX
zoPAo8`Mk?%74bq^sJ1(jX@ST7q)w}t9de{Fe)$8$Paw7cs|0pw#dOyKaSxXk(v~zm
zP8qXnMSBa?o2B`xt<0*~c(xi!*6O*^<}{x+NwyVO_Z9xGRdU?5X~oYf&~?>X-QTE5
z&a_o>J#{~ZHTw?Njb3-{Y5UC*XRG#Ndu1urRnI@d+-Se+NZZnmRrI)9F8J_LV_{RU
zS9find$qC=WuyiTQyYa#>nLKi+0W{^sTXMc|MaZhjkFCu3^7u()Uv|Qu~qwDlWeG>
z*{ZpzqWuZ|Qt+~p2mDy;O3jBX@GWfzfKysDdxPsA|3$*%yP)OFnI0
zO!)eHdwYG6d69qzh6F35XKyEEqUlcfMk15AEJeYUfbpGy0aGqi#QyGq5TvhXl$51V
zO1y=+9NlEAq^cdh-P>0DJP<4xv
zM2bq#rlbOI_chT3d>iQz6Y7#oe)l~N%o*OK4!@5ni&_{yAt(t+P4O;*xc
z4?2ywo_azFJK?ety1=-EbiH*mJtL^P4I|qyzB?HKqHYC?Nk6aXJca?P6B-N0TUr?D
z0AxMYFkH$m6kO>wXjcCkj21+%jf}Ht`NT_)_hJ0!@zt}>4?T#ldroFOCznTm>u@jM
zS$DK&9qnIAFC6}i!~e3;o9P%^Zye7yj%S?Xc{9_xE7#eTYuyJ=N549{W_{*gZy(6E53D-}moF?|_-%bhuC?!1!+*~|F+Vkb
zd-2i5Olu$fbFBwnw(k7;&@Tp827h+vi#xg2jxXge?!F52<^ucSg8_U8$lHV+z^Vb-
z>VEHL_8(jee7o>y;bkDWcJN!}k%9~Rc=!T%ReiytUfY(fY
zgJTl`hBFT1kYb_gN%dyp>{(hGF~pYB3&s#e^zaE{^pLV$96?Vx2tI~e{R;r)IO0`(
zYtGY_YxCxIbmG6g1Mqy^kN_+Jcvat&7qDVx8a;UnVpgW9HE%=Aj+g^6C*#?XcOm9x
znmk5#H{0~j79EIQMj6bZzZ=#_N47E{AU^hkJUpn5|!kE)t(
znU_Tg44v`n7{SmOqb81eV?AnQ-;a}@z#|w7sD6NX6U(wWrZZFh&oRDC^*_gSWUBu;
zW_PCg|Ei(wLF?KFnTDbDhM{c3&^`0324BwIlxvOTngcoazFb`>*D!?(y>m~_(~