gpt4free-ts/index.ts

181 行
5.2 KiB
TypeScript
Raw 通常表示 履歴

2023-06-27 15:21:05 +09:00
import Koa, {Context, Middleware, Next} from 'koa';
2023-05-04 22:32:21 +09:00
import Router from 'koa-router'
import bodyParser from 'koa-bodyparser';
2023-07-05 09:36:00 +09:00
import cors from '@koa/cors'
import {ChatModelFactory, Site} from "./model";
2023-05-11 20:32:10 +09:00
import dotenv from 'dotenv';
2023-07-02 13:08:14 +09:00
import {ChatRequest, ChatResponse, Message, ModelType, PromptToString} from "./model/base";
import {Event, EventStream, getTokenSize, OpenaiEventStream, randomStr} from "./utils";
import moment from "moment";
2023-05-11 20:32:10 +09:00
2023-06-27 15:21:05 +09:00
process.setMaxListeners(30); // 将限制提高到20个
2023-05-11 20:32:10 +09:00
dotenv.config();
2023-05-04 22:32:21 +09:00
const app = new Koa();
2023-07-05 09:36:00 +09:00
app.use(cors())
2023-05-04 22:32:21 +09:00
const router = new Router();
2023-05-07 01:30:22 +09:00
const errorHandler = async (ctx: Context, next: Next) => {
try {
await next();
} catch (err: any) {
2023-05-07 21:20:27 +09:00
console.error(err);
ctx.body = JSON.stringify(err);
ctx.res.end();
2023-05-07 01:30:22 +09:00
}
};
app.use(errorHandler);
2023-06-27 15:21:05 +09:00
app.use(bodyParser({jsonLimit: '10mb'}));
const chatModel = new ChatModelFactory();
2023-05-04 22:32:21 +09:00
interface AskReq extends ChatRequest {
site: Site;
}
interface AskRes extends ChatResponse {
2023-05-04 22:32:21 +09:00
}
2023-06-27 15:21:05 +09:00
const AskHandle: Middleware = async (ctx) => {
2023-07-02 13:08:14 +09:00
const {
prompt,
model = ModelType.GPT3p5Turbo,
site = Site.You
2023-07-04 18:36:46 +09:00
} = {...ctx.query as any, ...ctx.request.body as any, ...ctx.params as any} as AskReq;
2023-05-04 22:32:21 +09:00
if (!prompt) {
ctx.body = {error: `need prompt in query`} as AskRes;
2023-05-04 22:32:21 +09:00
return;
}
const chat = chatModel.get(site);
2023-05-05 10:57:42 +09:00
if (!chat) {
ctx.body = {error: `not support site: ${site} `} as AskRes;
return;
}
const tokenLimit = chat.support(model);
if (!tokenLimit) {
ctx.body = {error: `${site} not support model ${model}`} as AskRes;
2023-05-05 10:57:42 +09:00
return;
}
2023-07-16 23:41:38 +09:00
const [content, messages] = PromptToString(prompt, tokenLimit);
ctx.body = await chat.ask({prompt: content, messages, model});
2023-06-27 15:21:05 +09:00
}
2023-05-04 22:32:21 +09:00
2023-07-02 13:08:14 +09:00
const AskStreamHandle: (ESType: new () => EventStream) => Middleware = (ESType) => async (ctx) => {
const {
prompt,
model = ModelType.GPT3p5Turbo,
site = Site.You
2023-07-04 18:36:46 +09:00
} = {...ctx.query as any, ...ctx.request.body as any, ...ctx.params as any} as AskReq;
ctx.set({
"Content-Type": "text/event-stream;charset=utf-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
});
2023-07-02 13:08:14 +09:00
let es = new ESType();
2023-06-13 11:10:29 +09:00
ctx.body = es.stream();
2023-05-04 22:32:21 +09:00
if (!prompt) {
es.write(Event.error, {error: 'need prompt in query'})
2023-06-13 11:42:33 +09:00
es.end();
2023-05-04 22:32:21 +09:00
return;
}
const chat = chatModel.get(site);
2023-05-05 10:57:42 +09:00
if (!chat) {
es.write(Event.error, {error: `not support site: ${site} `})
2023-06-13 11:42:33 +09:00
es.end();
2023-05-05 10:57:42 +09:00
return;
}
const tokenLimit = chat.support(model);
if (!tokenLimit) {
es.write(Event.error, {error: `${site} not support model ${model}`})
2023-06-13 11:42:33 +09:00
es.end();
return;
}
2023-07-16 23:41:38 +09:00
const [content, messages] = PromptToString(prompt, tokenLimit);
await chat.askStream({prompt: content, messages, model}, es);
ctx.body = es.stream();
2023-06-27 15:21:05 +09:00
}
2023-07-02 13:08:14 +09:00
interface OpenAIReq {
site: Site;
stream: boolean;
model: ModelType;
messages: Message[];
}
2023-07-03 12:49:40 +09:00
interface Support {
site: string;
models: string[];
}
router.get('/supports', (ctx) => {
const result: Support[] = [];
for (const key in Site) {
//@ts-ignore
const site = Site[key];
//@ts-ignore
const chat = chatModel.get(site);
2023-07-16 23:41:38 +09:00
const support: Support = {site: site, models: []}
2023-07-03 12:49:40 +09:00
for (const mKey in ModelType) {
//@ts-ignore
const model = ModelType[mKey];
//@ts-ignore
if (chat?.support(model)) {
support.models.push(model);
}
}
result.push(support)
}
ctx.body = result;
});
2023-06-27 15:21:05 +09:00
router.get('/ask', AskHandle);
router.post('/ask', AskHandle);
2023-07-02 13:08:14 +09:00
router.get('/ask/stream', AskStreamHandle(EventStream))
router.post('/ask/stream', AskStreamHandle(EventStream))
2023-07-04 18:36:46 +09:00
const openAIHandle: Middleware = async (ctx, next) => {
const {stream} = {...ctx.query as any, ...ctx.request.body as any, ...ctx.params as any} as OpenAIReq;
2023-07-02 13:08:14 +09:00
(ctx.request.body as any).prompt = JSON.stringify((ctx.request.body as any).messages);
if (stream) {
AskStreamHandle(OpenaiEventStream)(ctx, next);
return;
}
await AskHandle(ctx, next);
console.log(ctx.body);
ctx.body = {
"id": `chatcmpl-${randomStr()}`,
"object": "chat.completion",
"created": moment().unix(),
"choices": [{
"index": 0,
"message": {
"role": "assistant",
2023-07-16 23:41:38 +09:00
"content": ctx.body.content || ctx.body.error,
2023-07-02 13:08:14 +09:00
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 100,
2023-07-16 23:41:38 +09:00
"completion_tokens": getTokenSize(ctx.body.content || ''),
"total_tokens": 100 + getTokenSize(ctx.body.content || '')
2023-07-02 13:08:14 +09:00
}
}
2023-07-04 18:36:46 +09:00
};
router.post('/v1/chat/completions', openAIHandle)
router.post('/:site/v1/chat/completions', openAIHandle)
2023-05-04 22:32:21 +09:00
app.use(router.routes());
2023-05-12 18:16:25 +09:00
(async () => {
const server = app.listen(3000, () => {
console.log("Now listening: 127.0.0.1:3000");
});
2023-05-12 18:16:25 +09:00
process.on('SIGINT', () => {
server.close(() => {
process.exit(0);
});
});
2023-06-27 15:21:05 +09:00
process.on('uncaughtException', (e) => {
console.error(e);
})
2023-05-12 18:16:25 +09:00
})()