mirror of
https://github.com/hcengineering/platform.git
synced 2025-04-20 23:32:14 +00:00
Add config for baseUrl (#6645)
Signed-off-by: Kristina Fefelova <kristin.fefelova@gmail.com>
This commit is contained in:
parent
8d9cebdd67
commit
42c602ee67
@ -30,6 +30,7 @@ interface Config {
|
|||||||
Password: string
|
Password: string
|
||||||
OpenAIKey: string
|
OpenAIKey: string
|
||||||
OpenAIModel: OpenAI.ChatModel
|
OpenAIModel: OpenAI.ChatModel
|
||||||
|
OpenAIBaseUrl: string
|
||||||
OpenAITranslateModel: OpenAI.ChatModel
|
OpenAITranslateModel: OpenAI.ChatModel
|
||||||
MaxContentTokens: number
|
MaxContentTokens: number
|
||||||
MaxHistoryRecords: number
|
MaxHistoryRecords: number
|
||||||
@ -55,6 +56,7 @@ const config: Config = (() => {
|
|||||||
OpenAIKey: process.env.OPENAI_API_KEY ?? '',
|
OpenAIKey: process.env.OPENAI_API_KEY ?? '',
|
||||||
OpenAIModel: (process.env.OPENAI_MODEL ?? 'gpt-4o-mini') as OpenAI.ChatModel,
|
OpenAIModel: (process.env.OPENAI_MODEL ?? 'gpt-4o-mini') as OpenAI.ChatModel,
|
||||||
OpenAITranslateModel: (process.env.OPENAI_TRANSLATE_MODEL ?? 'gpt-4o-mini') as OpenAI.ChatModel,
|
OpenAITranslateModel: (process.env.OPENAI_TRANSLATE_MODEL ?? 'gpt-4o-mini') as OpenAI.ChatModel,
|
||||||
|
OpenAIBaseUrl: process.env.OPENAI_BASE_URL ?? '',
|
||||||
MaxContentTokens: parseNumber(process.env.MAX_CONTENT_TOKENS) ?? 128 * 100,
|
MaxContentTokens: parseNumber(process.env.MAX_CONTENT_TOKENS) ?? 128 * 100,
|
||||||
MaxHistoryRecords: parseNumber(process.env.MAX_HISTORY_RECORDS) ?? 500,
|
MaxHistoryRecords: parseNumber(process.env.MAX_HISTORY_RECORDS) ?? 500,
|
||||||
Port: parseNumber(process.env.PORT) ?? 4010
|
Port: parseNumber(process.env.PORT) ?? 4010
|
||||||
|
@ -50,7 +50,13 @@ export class AIBotController {
|
|||||||
readonly storage: DbStorage,
|
readonly storage: DbStorage,
|
||||||
private readonly ctx: MeasureContext
|
private readonly ctx: MeasureContext
|
||||||
) {
|
) {
|
||||||
this.aiClient = config.OpenAIKey === '' ? undefined : new OpenAI({ apiKey: config.OpenAIKey })
|
this.aiClient =
|
||||||
|
config.OpenAIKey !== ''
|
||||||
|
? new OpenAI({
|
||||||
|
apiKey: config.OpenAIKey,
|
||||||
|
baseURL: config.OpenAIBaseUrl === '' ? undefined : config.OpenAIBaseUrl
|
||||||
|
})
|
||||||
|
: undefined
|
||||||
|
|
||||||
this.intervalId = setInterval(() => {
|
this.intervalId = setInterval(() => {
|
||||||
void this.updateWorkspaceClients()
|
void this.updateWorkspaceClients()
|
||||||
|
@ -85,15 +85,23 @@ export async function createChatCompletion (
|
|||||||
client: OpenAI,
|
client: OpenAI,
|
||||||
message: OpenAI.ChatCompletionMessageParam,
|
message: OpenAI.ChatCompletionMessageParam,
|
||||||
user?: string,
|
user?: string,
|
||||||
history: OpenAI.ChatCompletionMessageParam[] = []
|
history: OpenAI.ChatCompletionMessageParam[] = [],
|
||||||
|
skipCache = true
|
||||||
): Promise<OpenAI.ChatCompletion | undefined> {
|
): Promise<OpenAI.ChatCompletion | undefined> {
|
||||||
|
const opt: OpenAI.RequestOptions = {}
|
||||||
|
if (skipCache) {
|
||||||
|
opt.headers = { 'cf-skip-cache': 'true' }
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
return await client.chat.completions.create({
|
return await client.chat.completions.create(
|
||||||
|
{
|
||||||
messages: [...history, message],
|
messages: [...history, message],
|
||||||
model: config.OpenAIModel,
|
model: config.OpenAIModel,
|
||||||
user,
|
user,
|
||||||
stream: false
|
stream: false
|
||||||
})
|
},
|
||||||
|
opt
|
||||||
|
)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error(e)
|
console.error(e)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user