gpt4free-ts/index.ts

282 行
7.0 KiB
TypeScript
Raw 通常表示 履歴

2023-09-10 23:26:06 +09:00
import Koa, { Context, Middleware, Next } from 'koa';
import Router from 'koa-router';
2023-05-04 22:32:21 +09:00
import bodyParser from 'koa-bodyparser';
2023-09-10 23:26:06 +09:00
import cors from '@koa/cors';
import { ChatModelFactory } from './model';
2023-05-11 20:32:10 +09:00
import dotenv from 'dotenv';
2023-09-10 23:26:06 +09:00
import {
ChatRequest,
ChatResponse,
Message,
ModelType,
Site,
} from './model/base';
import {
ComError,
Event,
EventStream,
getTokenSize,
OpenaiEventStream,
parseJSON,
randomStr,
ThroughEventStream,
} from './utils';
import moment from 'moment';
import { Config } from './utils/config';
import { initLog } from './utils/log';
2023-05-11 20:32:10 +09:00
2023-09-10 23:26:06 +09:00
process.setMaxListeners(100); // 将限制提高到20个
2023-06-27 15:21:05 +09:00
2023-05-11 20:32:10 +09:00
dotenv.config();
2023-09-10 23:26:06 +09:00
initLog();
Config.load();
Config.watchFile();
2023-05-04 22:32:21 +09:00
const app = new Koa();
2023-09-10 23:26:06 +09:00
app.use(cors());
2023-05-04 22:32:21 +09:00
const router = new Router();
2023-05-07 01:30:22 +09:00
const errorHandler = async (ctx: Context, next: Next) => {
2023-09-10 23:26:06 +09:00
try {
await next();
} catch (err: any) {
console.error('error handle:', err);
ctx.body = { error: { message: err.message } };
ctx.status = err.status || ComError.Status.InternalServerError;
}
2023-05-07 01:30:22 +09:00
};
app.use(errorHandler);
2023-09-10 23:26:06 +09:00
app.use(bodyParser({ jsonLimit: '10mb' }));
const chatModel = new ChatModelFactory();
2023-05-04 22:32:21 +09:00
interface AskReq extends ChatRequest {
2023-09-10 23:26:06 +09:00
site: Site;
}
2023-09-10 23:26:06 +09:00
interface AskRes extends ChatResponse {}
2023-05-04 22:32:21 +09:00
2023-06-27 15:21:05 +09:00
const AskHandle: Middleware = async (ctx) => {
2023-09-10 23:26:06 +09:00
const {
prompt,
model = ModelType.GPT3p5Turbo,
site = Site.You,
...rest
} = {
...(ctx.query as any),
...(ctx.request.body as any),
...(ctx.params as any),
} as AskReq;
if (!prompt) {
throw new ComError(`need prompt in query`, ComError.Status.BadRequest);
}
const chat = chatModel.get(site);
if (!chat) {
throw new ComError(`not support site: ${site} `, ComError.Status.NotFound);
}
let req: ChatRequest = {
...rest,
prompt,
messages: parseJSON<Message[]>(prompt, [{ role: 'user', content: prompt }]),
model,
};
if (typeof req.messages !== 'object') {
// 数值类型parseJSON后为number
req.messages = [{ role: 'user', content: prompt }];
}
req = await chat.preHandle(req);
const data = await chat.ask(req);
if (data && data.error) {
ctx.status = 500;
}
ctx.body = data;
};
2023-05-04 22:32:21 +09:00
2023-09-10 23:26:06 +09:00
const AskStreamHandle: (ESType: new () => EventStream) => Middleware =
(ESType) => async (ctx) => {
2023-07-02 13:08:14 +09:00
const {
2023-09-10 23:26:06 +09:00
prompt,
model = ModelType.GPT3p5Turbo,
site = Site.You,
...rest
} = {
...(ctx.query as any),
...(ctx.request.body as any),
...(ctx.params as any),
} as AskReq;
2023-05-04 22:32:21 +09:00
if (!prompt) {
2023-09-10 23:26:06 +09:00
throw new ComError(`need prompt in query`, ComError.Status.BadRequest);
2023-05-04 22:32:21 +09:00
}
const chat = chatModel.get(site);
2023-05-05 10:57:42 +09:00
if (!chat) {
2023-09-10 23:26:06 +09:00
throw new ComError(
`not support site: ${site} `,
ComError.Status.NotFound,
);
2023-05-05 10:57:42 +09:00
}
2023-09-10 23:26:06 +09:00
let req: ChatRequest = {
...rest,
prompt,
messages: parseJSON<Message[]>(prompt, [
{ role: 'user', content: prompt },
]),
model,
};
if (typeof req.messages !== 'object') {
req.messages = [{ role: 'user', content: prompt }];
}
2023-09-10 23:26:06 +09:00
req = await chat.preHandle(req);
let stream = new ESType();
let ok = true;
const timeout = setTimeout(() => {
stream.write(Event.error, { error: 'timeout' });
stream.write(Event.done, { content: '' });
stream.end();
}, 120 * 1000);
return (() =>
new Promise<void>(async (resolve, reject) => {
const es = new ThroughEventStream(
(event, data) => {
switch (event) {
case Event.error:
clearTimeout(timeout);
if (data instanceof ComError) {
reject(data);
}
ok = false;
reject(
new ComError(
(data as any)?.error || 'unknown error',
(data as any)?.status ||
ComError.Status.InternalServerError,
),
);
break;
default:
clearTimeout(timeout);
if (!ok) {
break;
}
if (!ctx.body) {
ctx.set({
'Content-Type': 'text/event-stream;charset=utf-8',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
});
ctx.body = stream.stream();
}
resolve();
stream.write(event, data);
break;
}
},
() => {
if (!ok) {
return;
}
stream.end();
},
);
await chat.askStream(req, es).catch((err) => {
clearTimeout(timeout);
es.destroy();
reject(err);
});
}))();
};
2023-06-27 15:21:05 +09:00
2023-07-02 13:08:14 +09:00
interface OpenAIReq {
2023-09-10 23:26:06 +09:00
site: Site;
stream: boolean;
model: ModelType;
messages: Message[];
2023-07-02 13:08:14 +09:00
}
2023-07-03 12:49:40 +09:00
interface Support {
2023-09-10 23:26:06 +09:00
site: string;
models: string[];
2023-07-03 12:49:40 +09:00
}
router.get('/supports', (ctx) => {
2023-09-10 23:26:06 +09:00
const result: Support[] = [];
for (const key in Site) {
//@ts-ignore
const site = Site[key];
//@ts-ignore
const chat = chatModel.get(site);
const support: Support = { site: site, models: [] };
for (const mKey in ModelType) {
//@ts-ignore
const model = ModelType[mKey];
//@ts-ignore
if (chat?.support(model)) {
support.models.push(model);
}
2023-07-03 12:49:40 +09:00
}
2023-09-10 23:26:06 +09:00
result.push(support);
}
ctx.body = result;
2023-07-03 12:49:40 +09:00
});
2023-06-27 15:21:05 +09:00
router.get('/ask', AskHandle);
router.post('/ask', AskHandle);
2023-09-10 23:26:06 +09:00
router.get('/ask/stream', AskStreamHandle(EventStream));
router.post('/ask/stream', AskStreamHandle(EventStream));
2023-07-04 18:36:46 +09:00
const openAIHandle: Middleware = async (ctx, next) => {
2023-09-10 23:26:06 +09:00
const { stream, messages } = {
...(ctx.query as any),
...(ctx.request.body as any),
...(ctx.params as any),
} as OpenAIReq;
(ctx.request.body as any).prompt = JSON.stringify(
(ctx.request.body as any).messages,
);
if (stream) {
await AskStreamHandle(OpenaiEventStream)(ctx, next);
return;
}
await AskHandle(ctx, next);
let reqLen = 0;
for (const v of messages) {
reqLen += getTokenSize(v.content);
}
ctx.body = {
id: `chatcmpl-${randomStr()}`,
object: 'chat.completion',
created: moment().unix(),
choices: [
{
index: 0,
message: {
role: 'assistant',
content: ctx.body.content || ctx.body.error,
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: reqLen,
completion_tokens: getTokenSize(ctx.body.content || ''),
total_tokens: reqLen + getTokenSize(ctx.body.content || ''),
},
};
2023-07-04 18:36:46 +09:00
};
2023-09-10 23:26:06 +09:00
router.post('/v1/chat/completions', openAIHandle);
router.post('/:site/v1/chat/completions', openAIHandle);
2023-05-04 22:32:21 +09:00
app.use(router.routes());
2023-05-12 18:16:25 +09:00
(async () => {
2023-09-10 23:26:06 +09:00
const port = +(process.env.PORT || 3000);
const server = app.listen(+(process.env.PORT || 3000), () => {
console.log(`Now listening: 127.0.0.1:${port}`);
});
process.on('SIGINT', () => {
server.close(() => {
process.exit(0);
});
2023-09-10 23:26:06 +09:00
});
process.on('uncaughtException', (e) => {
console.error('uncaughtException', e);
process.exit(1);
});
})();