diff --git a/README.md b/README.md index 7ba4de7..5253992 100644 --- a/README.md +++ b/README.md @@ -95,28 +95,86 @@ docker-compose up --build -d ## 🚀 Let's Use GPT4 -> Return when chat complete http://127.0.0.1:3000/ask?prompt=***&model=*** +> Return when chat complete http://127.0.0.1:3000/ask?prompt=***&model=***&site=*** -> Return with eventstream http://127.0.0.1:3000/ask/stream?prompt=***&model=*** +> Return with eventstream http://127.0.0.1:3000/ask/stream?prompt=***&model=***&site=*** - ### Common parameters📝 -- `prompt`: your question -- `model`: target web site include:`forefront` `you` `mcbbs` +### Request Params 📝 - ### WebSite Unique parameters🔒 -- mcbbs - - `messages`: For example `[{"role":"system","content":"IMPORTANT: You are a virtual assistant powered by the gpt-3.5-turbo model, now time is 2023/6/3 13:42:27}"},{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"写个冒泡排序\n"}]` - - `temperature`: 0~1 +- `prompt`: your question. It can be a `string` or `jsonstr`. + - example `jsonstr`:`[{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"你是谁"}]` + - example `string`: `你是谁` +- `model`: default `gpt3.5-turbo`. model include:`gpt4` `gpt3.5-turbo` +- `site`: default `you`. target site, include `forefront` `you` `mcbbs` + +### Response Params 🔙 + +Response when chat end(/ask): + +```typescript +interface ChatResponse { + content: string; + error?: string; +} +``` + +Response with stream like(/ask/stream): + +``` +event: message +data: {"content":"I"} + +event: done +data: {"content":"'m"} + +event: error +data: {"error":"some thind wrong"} +``` ### Example💡 -- `forefront` - - http://127.0.0.1:3000/ask?prompt=whoareyou&model=forefront - - http://127.0.0.1:3000/ask/stream?prompt=whoareyou&model=forefront -- `mcbbs` - - [http://127.0.0.1:3000/ask?prompt=nothing&model=mcbbs&messages=[{"role":"system","content":"IMPORTANT: You are a virtual assistant powered by the gpt-3.5-turbo model, now time is 2023/6/3 13:42:27}"},{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"写个冒泡排序\n"}]](http://127.0.0.1:3000/ask?prompt=nothing&model=mcbbs&messages=[{%22role%22:%22system%22,%22content%22:%22IMPORTANT:%20You%20are%20a%20virtual%20assistant%20powered%20by%20the%20gpt-3.5-turbo%20model,%20now%20time%20is%202023/6/3%2013:42:27}%22},{%22role%22:%22user%22,%22content%22:%22%E4%BD%A0%E5%A5%BD\n%22},{%22role%22:%22assistant%22,%22content%22:%22%E4%BD%A0%E5%A5%BD%EF%BC%81%E6%9C%89%E4%BB%80%E4%B9%88%E6%88%91%E5%8F%AF%E4%BB%A5%E5%B8%AE%E5%8A%A9%E4%BD%A0%E7%9A%84%E5%90%97%EF%BC%9F%22},{%22role%22:%22user%22,%22content%22:%22%E5%86%99%E4%B8%AA%E5%86%92%E6%B3%A1%E6%8E%92%E5%BA%8F\n%22}]) -- `you` - - http://127.0.0.1:3000/ask?prompt=whoareyou&model=you - - http://127.0.0.1:3000/ask/stream?prompt=whoareyou&model=you + +1. request to site you with history + +req: + +[127.0.0.1:3000/ask?site=you&prompt=[{"role":"user","content":"hello"},{"role":"assistant","content":"Hi there! How can I assist you today?"},{"role":"user","content":"who are you"}]]() + +res: + +```json +{ + "content": "Hi there! How can I assist you today?" +} +``` + +[127.0.0.1:3000/ask?site=you&prompt=[{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"你是谁"}]]() + +2. request to site you with stream return + +req: + +[127.0.0.1:3000/ask/stream?site=you&prompt=who are you]() + +res: +``` +event: message +data: {"content":"I"} + +event: message +data: {"content":"'m"} + +event: message +data: {"content":" a"} + +event: message +data: {"content":" search"} + +event: message +data: {"content":" assistant"} +........ +event: done +data: {"content":"done"} +``` ## 👥 Wechat Group diff --git a/README_zh.md b/README_zh.md index a652f60..729b0ad 100644 --- a/README_zh.md +++ b/README_zh.md @@ -91,32 +91,89 @@ docker-compose up --build -d [详细教程](https://icloudnative.io/posts/completely-free-to-use-gpt4/) +## 🚀 Let's Use GPT4 -## 🚀 开始使用GPT4吧 +> 当对话结束时返回示例 http://127.0.0.1:3000/ask?prompt=***&model=***&site=*** -> 当对话结束时才会返回 http://127.0.0.1:3000/ask?prompt=***&model=*** +> 以stream模式返回示例 http://127.0.0.1:3000/ask/stream?prompt=***&model=***&site=*** -> 使用eventstream持续返回对话内容 http://127.0.0.1:3000/ask/stream?prompt=***&model=*** +### 请求参数,请放在query里 📝 -### 公共参数 📝 -- `prompt`: your question -- `model`: target web site include:`forefront` `you` `mcbbs` +- `prompt`: 你的问题,类型是`string` 或者 `jsonstr`. + - `jsonstr`:包含上下文的json字符串,例如:`[{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"你是谁"}]` + - `string`: 单次对话 例如:`你是谁` +- `model`: 默认 `gpt3.5-turbo`. 模型:`gpt4` `gpt3.5-turbo` +- `site`: 默认 `you`. 目标网站 `forefront` `you` `mcbbs` -### 个别网站独有参数 🔒 -- mcbbs - - `messages`: For example `[{"role":"system","content":"IMPORTANT: You are a virtual assistant powered by the gpt-3.5-turbo model, now time is 2023/6/3 13:42:27}"},{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"写个冒泡排序\n"}]` - - `temperature`: 0~1 -### 使用示例 💡 -- `forefront` - - http://127.0.0.1:3000/ask?prompt=whoareyou&model=forefront - - http://127.0.0.1:3000/ask/stream?prompt=whoareyou&model=forefront -- `mcbbs` - - [http://127.0.0.1:3000/ask?prompt=nothing&model=mcbbs&messages=[{"role":"system","content":"IMPORTANT: You are a virtual assistant powered by the gpt-3.5-turbo model, now time is 2023/6/3 13:42:27}"},{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"写个冒泡排序\n"}]](http://127.0.0.1:3000/ask?prompt=nothing&model=mcbbs&messages=[{%22role%22:%22system%22,%22content%22:%22IMPORTANT:%20You%20are%20a%20virtual%20assistant%20powered%20by%20the%20gpt-3.5-turbo%20model,%20now%20time%20is%202023/6/3%2013:42:27}%22},{%22role%22:%22user%22,%22content%22:%22%E4%BD%A0%E5%A5%BD\n%22},{%22role%22:%22assistant%22,%22content%22:%22%E4%BD%A0%E5%A5%BD%EF%BC%81%E6%9C%89%E4%BB%80%E4%B9%88%E6%88%91%E5%8F%AF%E4%BB%A5%E5%B8%AE%E5%8A%A9%E4%BD%A0%E7%9A%84%E5%90%97%EF%BC%9F%22},{%22role%22:%22user%22,%22content%22:%22%E5%86%99%E4%B8%AA%E5%86%92%E6%B3%A1%E6%8E%92%E5%BA%8F\n%22}]) -- `you` - - http://127.0.0.1:3000/ask?prompt=whoareyou&model=you - - http://127.0.0.1:3000/ask/stream?prompt=whoareyou&model=you +### 返回参数 🔙 +当对话结束时返回参数(/ask): + +```typescript +interface ChatResponse { + content: string; + error?: string; +} +``` + +stream模式返回参数示例(/ask/stream): + +``` +event: message +data: {"content":"I"} + +event: done +data: {"content":"'m"} + +event: error +data: {"error":"some thind wrong"} +``` + +### 真实请求示例💡 + +1. 请求you.com, 包含上下文 + +req: + +[127.0.0.1:3000/ask?site=you&prompt=[{"role":"user","content":"hello"},{"role":"assistant","content":"Hi there! How can I assist you today?"},{"role":"user","content":"who are you"}]]() + +res: + +```json +{ + "content": "Hi there! How can I assist you today?" +} +``` + +[127.0.0.1:3000/ask?site=you&prompt=[{"role":"user","content":"你好\n"},{"role":"assistant","content":"你好!有什么我可以帮助你的吗?"},{"role":"user","content":"你是谁"}]]() + +2. 以stream模式请求you.com + +req: + +[127.0.0.1:3000/ask/stream?site=you&prompt=who are you]() + +res: +``` +event: message +data: {"content":"I"} + +event: message +data: {"content":"'m"} + +event: message +data: {"content":" a"} + +event: message +data: {"content":" search"} + +event: message +data: {"content":" assistant"} +........ +event: done +data: {"content":"done"} +``` ## 👥 加群细聊 diff --git a/index.ts b/index.ts index b08df07..7c5c5a0 100644 --- a/index.ts +++ b/index.ts @@ -31,7 +31,7 @@ interface AskRes extends ChatResponse { } router.get('/ask', async (ctx) => { - const {prompt, model = ModelType.GPT3p5, site = Site.You} = ctx.query as unknown as AskReq; + const {prompt, model = ModelType.GPT3p5Turbo, site = Site.You} = ctx.query as unknown as AskReq; if (!prompt) { ctx.body = {error: `need prompt in query`} as AskRes; return; @@ -50,7 +50,7 @@ router.get('/ask', async (ctx) => { }); router.get('/ask/stream', async (ctx) => { - const {prompt, model = ModelType.GPT3p5, site = Site.You} = ctx.query as unknown as AskReq; + const {prompt, model = ModelType.GPT3p5Turbo, site = Site.You} = ctx.query as unknown as AskReq; ctx.set({ "Content-Type": "text/event-stream;charset=utf-8", "Cache-Control": "no-cache", @@ -60,16 +60,19 @@ router.get('/ask/stream', async (ctx) => { ctx.body = es.stream(); if (!prompt) { es.write(Event.error, {error: 'need prompt in query'}) + es.end(); return; } const chat = chatModel.get(site); if (!chat) { es.write(Event.error, {error: `not support site: ${site} `}) + es.end(); return; } const tokenLimit = chat.support(model); if (!tokenLimit) { es.write(Event.error, {error: `${site} not support model ${model}`}) + es.end(); return; } await chat.askStream({prompt: PromptToString(prompt, tokenLimit), model}, es); diff --git a/model/base.ts b/model/base.ts index 870fc13..eed2a56 100644 --- a/model/base.ts +++ b/model/base.ts @@ -14,7 +14,7 @@ export type Message = { } export enum ModelType { - GPT3p5 = 'gpt3.5', + GPT3p5Turbo = 'gpt3.5-turbo', GPT4 = 'gpt4', } diff --git a/model/forefront/index.ts b/model/forefront/index.ts index 464d825..d3a4d96 100644 --- a/model/forefront/index.ts +++ b/model/forefront/index.ts @@ -293,7 +293,7 @@ export class Forefrontnew extends Chat { const [page, account, done, destroy] = this.pagePool.get(); if (!account || !page) { stream.write(Event.error, {error: 'please wait init.....about 1 min'}) - stream.stream().end(); + stream.end(); return; } try { @@ -316,7 +316,7 @@ export class Forefrontnew extends Chat { const newAccount = this.accountPool.get(); destroy(newAccount.id); stream.write(Event.error, {error: 'some thing error, try again later'}); - stream.stream().end(); + stream.end(); return } @@ -351,7 +351,7 @@ export class Forefrontnew extends Chat { } catch (e) { console.error(e); } finally { - stream.stream().end(); + stream.end(); await page.waitForSelector('.flex:nth-child(1) > div:nth-child(2) > .relative > .flex > .cursor-pointer') await page.click('.flex:nth-child(1) > div:nth-child(2) > .relative > .flex > .cursor-pointer') account.gpt4times += 1; diff --git a/model/mcbbs/index.ts b/model/mcbbs/index.ts index 7d5b46e..4db2ae2 100644 --- a/model/mcbbs/index.ts +++ b/model/mcbbs/index.ts @@ -36,7 +36,7 @@ export class Mcbbs extends Chat { support(model: ModelType): number { switch (model) { - case ModelType.GPT3p5: + case ModelType.GPT3p5Turbo: return 2000; default: return 0; @@ -53,7 +53,7 @@ export class Mcbbs extends Chat { stream.read((event, data) => { switch (event) { case Event.done: - stream.stream().end(); + stream.end(); break; case Event.message: result.content = (data as MessageData).content diff --git a/model/you/index.ts b/model/you/index.ts index 4557320..a587672 100644 --- a/model/you/index.ts +++ b/model/you/index.ts @@ -73,7 +73,7 @@ export class You extends Chat { support(model: ModelType): number { switch (model) { - case ModelType.GPT3p5: + case ModelType.GPT3p5Turbo: return 2000; default: return 0; @@ -124,7 +124,7 @@ export class You extends Chat { break; case 'done': stream.write(Event.done, {content: 'done'}) - stream.stream().end(); + stream.end(); return; default: return; diff --git a/utils/index.ts b/utils/index.ts index 20b5bb6..a98993f 100644 --- a/utils/index.ts +++ b/utils/index.ts @@ -92,14 +92,18 @@ export class EventStream { private readonly pt: PassThrough = new PassThrough(); write(event: T, data: Data) { - this.pt.write(`event: ${event}\n`,'utf-8'); - this.pt.write(`data: ${JSON.stringify(data)}\n\n`,'utf-8'); + this.pt.write(`event: ${event}\n`, 'utf-8'); + this.pt.write(`data: ${JSON.stringify(data)}\n\n`, 'utf-8'); } stream() { return this.pt; } + end(cb?: () => void) { + this.pt.end(cb) + } + read(dataCB: DataCB, closeCB: () => void) { this.pt.setEncoding('utf-8'); this.pt.pipe(es.split('\n\n').pipe(es.map(async (chunk: any, cb: any) => {