58 lines
2.3 KiB
PHP
58 lines
2.3 KiB
PHP
<?php
|
|
|
|
return [
|
|
/*
|
|
* OpenAI-compatible API configuration.
|
|
*
|
|
* Environment variables are optional, but recommended in deployment:
|
|
* - LLM_API_BASE_URL
|
|
* - LLM_API_KEY
|
|
* - LLM_CHAT_MODEL
|
|
* - LLM_EMBEDDING_MODEL
|
|
*/
|
|
'default' => [
|
|
'base_url' => getenv('LLM_API_BASE_URL') ?: 'https://api.openai.com/v1',
|
|
'api_key' => getenv('LLM_API_KEY') ?: '',
|
|
'organization' => getenv('LLM_ORGANIZATION') ?: null,
|
|
'project' => getenv('LLM_PROJECT') ?: null,
|
|
'timeout' => (int) (getenv('LLM_API_TIMEOUT') ?: 60),
|
|
'connect_timeout' => (int) (getenv('LLM_API_CONNECT_TIMEOUT') ?: 10),
|
|
],
|
|
|
|
'chat' => [
|
|
'model' => getenv('LLM_CHAT_MODEL') ?: 'gpt-4.1-mini',
|
|
'temperature' => (float) (getenv('LLM_CHAT_TEMPERATURE') ?: 0.2),
|
|
'max_tokens' => (int) (getenv('LLM_CHAT_MAX_TOKENS') ?: 1200),
|
|
'stream' => false,
|
|
],
|
|
|
|
'metadata' => [
|
|
'enabled' => (getenv('LLM_METADATA_ENABLED') ?: 'true') !== 'false',
|
|
'model' => getenv('LLM_METADATA_MODEL') ?: (getenv('LLM_CHAT_MODEL') ?: 'gpt-4.1-mini'),
|
|
'max_input_chars' => (int) (getenv('LLM_METADATA_MAX_INPUT_CHARS') ?: 12000),
|
|
'max_tokens' => (int) (getenv('LLM_METADATA_MAX_TOKENS') ?: 1200),
|
|
'temperature' => (float) (getenv('LLM_METADATA_TEMPERATURE') ?: 0.1),
|
|
'stream' => false,
|
|
'response_format' => ['type' => 'json_object'],
|
|
'thinking' => [
|
|
'type' => getenv('LLM_METADATA_THINKING') ?: 'disabled',
|
|
],
|
|
'retry' => [
|
|
'enabled' => (getenv('LLM_METADATA_RETRY_ENABLED') ?: 'true') !== 'false',
|
|
'max_attempts' => (int) (getenv('LLM_METADATA_RETRY_MAX_ATTEMPTS') ?: 3),
|
|
'base_delay_ms' => (int) (getenv('LLM_METADATA_RETRY_BASE_DELAY_MS') ?: 1500),
|
|
'max_delay_ms' => (int) (getenv('LLM_METADATA_RETRY_MAX_DELAY_MS') ?: 10000),
|
|
'retry_statuses' => [429],
|
|
'retry_error_codes' => ['1302', '1303', '1304', '1305', '1306', '1307', '1308'],
|
|
],
|
|
],
|
|
|
|
'embedding' => [
|
|
'model' => getenv('LLM_EMBEDDING_MODEL') ?: 'text-embedding-3-small',
|
|
'batch_size' => (int) (getenv('LLM_EMBEDDING_BATCH_SIZE') ?: 64),
|
|
'dimensions' => getenv('LLM_EMBEDDING_DIMENSIONS') !== false
|
|
? (int) getenv('LLM_EMBEDDING_DIMENSIONS')
|
|
: null,
|
|
],
|
|
];
|