mirror of https://github.com/TriliumNext/Notes
Merge branch 'develop' into dateNote
commit
9bfadd7799
@ -0,0 +1,6 @@
|
||||
TRILIUM_ENV=dev
|
||||
TRILIUM_DATA_DIR=./apps/server/spec/db
|
||||
TRILIUM_RESOURCE_DIR=./apps/server/dist
|
||||
TRILIUM_PUBLIC_SERVER=http://localhost:4200
|
||||
TRILIUM_PORT=8086
|
||||
TRILIUM_INTEGRATION_TEST=edit
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
BIN
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/1_Metrics_image.png
generated
vendored
BIN
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/1_Metrics_image.png
generated
vendored
Binary file not shown.
|
After Width: | Height: | Size: 548 KiB |
BIN
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/2_Metrics_image.png
generated
vendored
BIN
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/2_Metrics_image.png
generated
vendored
Binary file not shown.
|
After Width: | Height: | Size: 52 KiB |
22
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/Metrics.html
generated
vendored
22
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/Metrics.html
generated
vendored
File diff suppressed because it is too large
Load Diff
BIN
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/Metrics_image.png
generated
vendored
BIN
apps/server/src/assets/doc_notes/en/User Guide/User Guide/Advanced Usage/Metrics_image.png
generated
vendored
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
@ -1,169 +0,0 @@
|
||||
/**
|
||||
* In-memory storage for chat sessions
|
||||
*/
|
||||
import log from "../../log.js";
|
||||
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
|
||||
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
|
||||
import { randomString } from "../../utils.js";
|
||||
import type { ChatSession, ChatMessage } from '../interfaces/chat_session.js';
|
||||
|
||||
// In-memory storage for sessions
|
||||
const sessions = new Map<string, ChatSession>();
|
||||
|
||||
// Flag to track if cleanup timer has been initialized
|
||||
let cleanupInitialized = false;
|
||||
|
||||
/**
|
||||
* Provides methods to manage chat sessions
|
||||
*/
|
||||
class SessionsStore {
|
||||
/**
|
||||
* Initialize the session cleanup timer to remove old/inactive sessions
|
||||
*/
|
||||
initializeCleanupTimer(): void {
|
||||
if (cleanupInitialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Clean sessions that have expired based on the constants
|
||||
function cleanupOldSessions() {
|
||||
const expiryTime = new Date(Date.now() - LLM_CONSTANTS.SESSION.SESSION_EXPIRY_MS);
|
||||
for (const [sessionId, session] of sessions.entries()) {
|
||||
if (session.lastActive < expiryTime) {
|
||||
sessions.delete(sessionId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run cleanup at the configured interval
|
||||
setInterval(cleanupOldSessions, LLM_CONSTANTS.SESSION.CLEANUP_INTERVAL_MS);
|
||||
cleanupInitialized = true;
|
||||
log.info("Session cleanup timer initialized");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all sessions
|
||||
*/
|
||||
getAllSessions(): Map<string, ChatSession> {
|
||||
return sessions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific session by ID
|
||||
*/
|
||||
getSession(sessionId: string): ChatSession | undefined {
|
||||
return sessions.get(sessionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new session
|
||||
*/
|
||||
createSession(options: {
|
||||
chatNoteId: string;
|
||||
title?: string;
|
||||
systemPrompt?: string;
|
||||
contextNoteId?: string;
|
||||
maxTokens?: number;
|
||||
model?: string;
|
||||
provider?: string;
|
||||
temperature?: number;
|
||||
}): ChatSession {
|
||||
this.initializeCleanupTimer();
|
||||
|
||||
const title = options.title || 'Chat Session';
|
||||
const sessionId = options.chatNoteId;
|
||||
const now = new Date();
|
||||
|
||||
// Initial system message if provided
|
||||
const messages: ChatMessage[] = [];
|
||||
if (options.systemPrompt) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: options.systemPrompt,
|
||||
timestamp: now
|
||||
});
|
||||
}
|
||||
|
||||
// Create and store the session
|
||||
const session: ChatSession = {
|
||||
id: sessionId,
|
||||
title,
|
||||
messages,
|
||||
createdAt: now,
|
||||
lastActive: now,
|
||||
noteContext: options.contextNoteId,
|
||||
metadata: {
|
||||
temperature: options.temperature || SEARCH_CONSTANTS.TEMPERATURE.DEFAULT,
|
||||
maxTokens: options.maxTokens,
|
||||
model: options.model,
|
||||
provider: options.provider,
|
||||
sources: [],
|
||||
toolExecutions: [],
|
||||
lastUpdated: now.toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
sessions.set(sessionId, session);
|
||||
log.info(`Created in-memory session for Chat Note ID: ${sessionId}`);
|
||||
|
||||
return session;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a session's last active timestamp
|
||||
*/
|
||||
touchSession(sessionId: string): boolean {
|
||||
const session = sessions.get(sessionId);
|
||||
if (!session) {
|
||||
return false;
|
||||
}
|
||||
|
||||
session.lastActive = new Date();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a session
|
||||
*/
|
||||
deleteSession(sessionId: string): boolean {
|
||||
return sessions.delete(sessionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a tool execution in the session metadata
|
||||
*/
|
||||
recordToolExecution(chatNoteId: string, tool: any, result: string, error?: string): void {
|
||||
if (!chatNoteId) return;
|
||||
|
||||
const session = sessions.get(chatNoteId);
|
||||
if (!session) return;
|
||||
|
||||
try {
|
||||
const toolExecutions = session.metadata.toolExecutions || [];
|
||||
|
||||
// Format tool execution record
|
||||
const execution = {
|
||||
id: tool.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`,
|
||||
name: tool.function?.name || 'unknown',
|
||||
arguments: typeof tool.function?.arguments === 'string'
|
||||
? (() => { try { return JSON.parse(tool.function.arguments); } catch { return tool.function.arguments; } })()
|
||||
: tool.function?.arguments || {},
|
||||
result: result,
|
||||
error: error,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
|
||||
// Add to tool executions
|
||||
toolExecutions.push(execution);
|
||||
session.metadata.toolExecutions = toolExecutions;
|
||||
|
||||
log.info(`Recorded tool execution for ${execution.name} in session ${chatNoteId}`);
|
||||
} catch (err) {
|
||||
log.error(`Failed to record tool execution: ${err}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create singleton instance
|
||||
const sessionsStore = new SessionsStore();
|
||||
export default sessionsStore;
|
||||
@ -0,0 +1,179 @@
|
||||
import configurationManager from './configuration_manager.js';
|
||||
import type {
|
||||
ProviderType,
|
||||
ModelIdentifier,
|
||||
ModelConfig,
|
||||
ProviderPrecedenceConfig,
|
||||
EmbeddingProviderPrecedenceConfig
|
||||
} from '../interfaces/configuration_interfaces.js';
|
||||
|
||||
/**
|
||||
* Helper functions for accessing AI configuration without string parsing
|
||||
* Use these throughout the codebase instead of parsing strings directly
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the ordered list of AI providers
|
||||
*/
|
||||
export async function getProviderPrecedence(): Promise<ProviderType[]> {
|
||||
const config = await configurationManager.getProviderPrecedence();
|
||||
return config.providers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default/preferred AI provider
|
||||
*/
|
||||
export async function getPreferredProvider(): Promise<ProviderType | null> {
|
||||
const config = await configurationManager.getProviderPrecedence();
|
||||
if (config.providers.length === 0) {
|
||||
return null; // No providers configured
|
||||
}
|
||||
return config.defaultProvider || config.providers[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the ordered list of embedding providers
|
||||
*/
|
||||
export async function getEmbeddingProviderPrecedence(): Promise<string[]> {
|
||||
const config = await configurationManager.getEmbeddingProviderPrecedence();
|
||||
return config.providers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default embedding provider
|
||||
*/
|
||||
export async function getPreferredEmbeddingProvider(): Promise<string | null> {
|
||||
const config = await configurationManager.getEmbeddingProviderPrecedence();
|
||||
if (config.providers.length === 0) {
|
||||
return null; // No providers configured
|
||||
}
|
||||
return config.defaultProvider || config.providers[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a model identifier (handles "provider:model" format)
|
||||
*/
|
||||
export function parseModelIdentifier(modelString: string): ModelIdentifier {
|
||||
return configurationManager.parseModelIdentifier(modelString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a model configuration from a model string
|
||||
*/
|
||||
export function createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
|
||||
return configurationManager.createModelConfig(modelString, defaultProvider);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default model for a specific provider
|
||||
*/
|
||||
export async function getDefaultModelForProvider(provider: ProviderType): Promise<string | undefined> {
|
||||
const config = await configurationManager.getAIConfig();
|
||||
return config.defaultModels[provider]; // This can now be undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider settings for a specific provider
|
||||
*/
|
||||
export async function getProviderSettings(provider: ProviderType) {
|
||||
const config = await configurationManager.getAIConfig();
|
||||
return config.providerSettings[provider];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if AI is enabled
|
||||
*/
|
||||
export async function isAIEnabled(): Promise<boolean> {
|
||||
const config = await configurationManager.getAIConfig();
|
||||
return config.enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a provider has required configuration
|
||||
*/
|
||||
export async function isProviderConfigured(provider: ProviderType): Promise<boolean> {
|
||||
const settings = await getProviderSettings(provider);
|
||||
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
return Boolean((settings as any)?.apiKey);
|
||||
case 'anthropic':
|
||||
return Boolean((settings as any)?.apiKey);
|
||||
case 'ollama':
|
||||
return Boolean((settings as any)?.baseUrl);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the first available (configured) provider from the precedence list
|
||||
*/
|
||||
export async function getFirstAvailableProvider(): Promise<ProviderType | null> {
|
||||
const providers = await getProviderPrecedence();
|
||||
|
||||
if (providers.length === 0) {
|
||||
return null; // No providers configured
|
||||
}
|
||||
|
||||
for (const provider of providers) {
|
||||
if (await isProviderConfigured(provider)) {
|
||||
return provider;
|
||||
}
|
||||
}
|
||||
|
||||
return null; // No providers are properly configured
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the current AI configuration
|
||||
*/
|
||||
export async function validateConfiguration() {
|
||||
return configurationManager.validateConfig();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached configuration (use when settings change)
|
||||
*/
|
||||
export function clearConfigurationCache(): void {
|
||||
configurationManager.clearCache();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a model configuration with validation that no defaults are assumed
|
||||
*/
|
||||
export async function getValidModelConfig(provider: ProviderType): Promise<{ model: string; provider: ProviderType } | null> {
|
||||
const defaultModel = await getDefaultModelForProvider(provider);
|
||||
|
||||
if (!defaultModel) {
|
||||
// No default model configured for this provider
|
||||
return null;
|
||||
}
|
||||
|
||||
const isConfigured = await isProviderConfigured(provider);
|
||||
if (!isConfigured) {
|
||||
// Provider is not properly configured
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
model: defaultModel,
|
||||
provider
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the first valid model configuration from the provider precedence list
|
||||
*/
|
||||
export async function getFirstValidModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
|
||||
const providers = await getProviderPrecedence();
|
||||
|
||||
for (const provider of providers) {
|
||||
const config = await getValidModelConfig(provider);
|
||||
if (config) {
|
||||
return config;
|
||||
}
|
||||
}
|
||||
|
||||
return null; // No valid model configuration found
|
||||
}
|
||||
@ -0,0 +1,378 @@
|
||||
import options from '../../options.js';
|
||||
import log from '../../log.js';
|
||||
import type {
|
||||
AIConfig,
|
||||
ProviderPrecedenceConfig,
|
||||
EmbeddingProviderPrecedenceConfig,
|
||||
ModelIdentifier,
|
||||
ModelConfig,
|
||||
ProviderType,
|
||||
EmbeddingProviderType,
|
||||
ConfigValidationResult,
|
||||
ProviderSettings,
|
||||
OpenAISettings,
|
||||
AnthropicSettings,
|
||||
OllamaSettings
|
||||
} from '../interfaces/configuration_interfaces.js';
|
||||
|
||||
/**
|
||||
* Configuration manager that handles conversion from string-based options
|
||||
* to proper typed configuration objects.
|
||||
*
|
||||
* This is the ONLY place where string parsing should happen for LLM configurations.
|
||||
*/
|
||||
export class ConfigurationManager {
|
||||
private static instance: ConfigurationManager | null = null;
|
||||
private cachedConfig: AIConfig | null = null;
|
||||
private lastConfigUpdate: number = 0;
|
||||
|
||||
// Cache for 5 minutes to avoid excessive option reads
|
||||
private static readonly CACHE_DURATION = 5 * 60 * 1000;
|
||||
|
||||
private constructor() {}
|
||||
|
||||
public static getInstance(): ConfigurationManager {
|
||||
if (!ConfigurationManager.instance) {
|
||||
ConfigurationManager.instance = new ConfigurationManager();
|
||||
}
|
||||
return ConfigurationManager.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the complete AI configuration
|
||||
*/
|
||||
public async getAIConfig(): Promise<AIConfig> {
|
||||
const now = Date.now();
|
||||
if (this.cachedConfig && (now - this.lastConfigUpdate) < ConfigurationManager.CACHE_DURATION) {
|
||||
return this.cachedConfig;
|
||||
}
|
||||
|
||||
try {
|
||||
const config: AIConfig = {
|
||||
enabled: await this.getAIEnabled(),
|
||||
providerPrecedence: await this.getProviderPrecedence(),
|
||||
embeddingProviderPrecedence: await this.getEmbeddingProviderPrecedence(),
|
||||
defaultModels: await this.getDefaultModels(),
|
||||
providerSettings: await this.getProviderSettings()
|
||||
};
|
||||
|
||||
this.cachedConfig = config;
|
||||
this.lastConfigUpdate = now;
|
||||
return config;
|
||||
} catch (error) {
|
||||
log.error(`Error loading AI configuration: ${error}`);
|
||||
return this.getDefaultConfig();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse provider precedence from string option
|
||||
*/
|
||||
public async getProviderPrecedence(): Promise<ProviderPrecedenceConfig> {
|
||||
try {
|
||||
const precedenceOption = await options.getOption('aiProviderPrecedence');
|
||||
const providers = this.parseProviderList(precedenceOption);
|
||||
|
||||
return {
|
||||
providers: providers as ProviderType[],
|
||||
defaultProvider: providers.length > 0 ? providers[0] as ProviderType : undefined
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error parsing provider precedence: ${error}`);
|
||||
// Only return known providers if they exist, don't assume defaults
|
||||
return {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse embedding provider precedence from string option
|
||||
*/
|
||||
public async getEmbeddingProviderPrecedence(): Promise<EmbeddingProviderPrecedenceConfig> {
|
||||
try {
|
||||
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
|
||||
const providers = this.parseProviderList(precedenceOption);
|
||||
|
||||
return {
|
||||
providers: providers as EmbeddingProviderType[],
|
||||
defaultProvider: providers.length > 0 ? providers[0] as EmbeddingProviderType : undefined
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error parsing embedding provider precedence: ${error}`);
|
||||
// Don't assume defaults, return empty configuration
|
||||
return {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse model identifier with optional provider prefix
|
||||
* Handles formats like "gpt-4", "openai:gpt-4", "ollama:llama2:7b"
|
||||
*/
|
||||
public parseModelIdentifier(modelString: string): ModelIdentifier {
|
||||
if (!modelString) {
|
||||
return {
|
||||
modelId: '',
|
||||
fullIdentifier: ''
|
||||
};
|
||||
}
|
||||
|
||||
const parts = modelString.split(':');
|
||||
|
||||
if (parts.length === 1) {
|
||||
// No provider prefix, just model name
|
||||
return {
|
||||
modelId: modelString,
|
||||
fullIdentifier: modelString
|
||||
};
|
||||
}
|
||||
|
||||
// Check if first part is a known provider
|
||||
const potentialProvider = parts[0].toLowerCase();
|
||||
const knownProviders: ProviderType[] = ['openai', 'anthropic', 'ollama'];
|
||||
|
||||
if (knownProviders.includes(potentialProvider as ProviderType)) {
|
||||
// Provider prefix format
|
||||
const provider = potentialProvider as ProviderType;
|
||||
const modelId = parts.slice(1).join(':'); // Rejoin in case model has colons
|
||||
|
||||
return {
|
||||
provider,
|
||||
modelId,
|
||||
fullIdentifier: modelString
|
||||
};
|
||||
}
|
||||
|
||||
// Not a provider prefix, treat whole string as model name
|
||||
return {
|
||||
modelId: modelString,
|
||||
fullIdentifier: modelString
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create model configuration from string
|
||||
*/
|
||||
public createModelConfig(modelString: string, defaultProvider?: ProviderType): ModelConfig {
|
||||
const identifier = this.parseModelIdentifier(modelString);
|
||||
const provider = identifier.provider || defaultProvider || 'openai';
|
||||
|
||||
return {
|
||||
provider,
|
||||
modelId: identifier.modelId,
|
||||
displayName: identifier.fullIdentifier
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default models for each provider - ONLY from user configuration
|
||||
*/
|
||||
public async getDefaultModels(): Promise<Record<ProviderType, string | undefined>> {
|
||||
try {
|
||||
const [openaiModel, anthropicModel, ollamaModel] = await Promise.all([
|
||||
options.getOption('openaiDefaultModel'),
|
||||
options.getOption('anthropicDefaultModel'),
|
||||
options.getOption('ollamaDefaultModel')
|
||||
]);
|
||||
|
||||
return {
|
||||
openai: openaiModel || undefined,
|
||||
anthropic: anthropicModel || undefined,
|
||||
ollama: ollamaModel || undefined
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error loading default models: ${error}`);
|
||||
// Return undefined for all providers if we can't load config
|
||||
return {
|
||||
openai: undefined,
|
||||
anthropic: undefined,
|
||||
ollama: undefined
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider-specific settings
|
||||
*/
|
||||
public async getProviderSettings(): Promise<ProviderSettings> {
|
||||
try {
|
||||
const [
|
||||
openaiApiKey, openaiBaseUrl, openaiDefaultModel,
|
||||
anthropicApiKey, anthropicBaseUrl, anthropicDefaultModel,
|
||||
ollamaBaseUrl, ollamaDefaultModel
|
||||
] = await Promise.all([
|
||||
options.getOption('openaiApiKey'),
|
||||
options.getOption('openaiBaseUrl'),
|
||||
options.getOption('openaiDefaultModel'),
|
||||
options.getOption('anthropicApiKey'),
|
||||
options.getOption('anthropicBaseUrl'),
|
||||
options.getOption('anthropicDefaultModel'),
|
||||
options.getOption('ollamaBaseUrl'),
|
||||
options.getOption('ollamaDefaultModel')
|
||||
]);
|
||||
|
||||
const settings: ProviderSettings = {};
|
||||
|
||||
if (openaiApiKey || openaiBaseUrl || openaiDefaultModel) {
|
||||
settings.openai = {
|
||||
apiKey: openaiApiKey,
|
||||
baseUrl: openaiBaseUrl,
|
||||
defaultModel: openaiDefaultModel
|
||||
};
|
||||
}
|
||||
|
||||
if (anthropicApiKey || anthropicBaseUrl || anthropicDefaultModel) {
|
||||
settings.anthropic = {
|
||||
apiKey: anthropicApiKey,
|
||||
baseUrl: anthropicBaseUrl,
|
||||
defaultModel: anthropicDefaultModel
|
||||
};
|
||||
}
|
||||
|
||||
if (ollamaBaseUrl || ollamaDefaultModel) {
|
||||
settings.ollama = {
|
||||
baseUrl: ollamaBaseUrl,
|
||||
defaultModel: ollamaDefaultModel
|
||||
};
|
||||
}
|
||||
|
||||
return settings;
|
||||
} catch (error) {
|
||||
log.error(`Error loading provider settings: ${error}`);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate configuration
|
||||
*/
|
||||
public async validateConfig(): Promise<ConfigValidationResult> {
|
||||
const result: ConfigValidationResult = {
|
||||
isValid: true,
|
||||
errors: [],
|
||||
warnings: []
|
||||
};
|
||||
|
||||
try {
|
||||
const config = await this.getAIConfig();
|
||||
|
||||
if (!config.enabled) {
|
||||
result.warnings.push('AI features are disabled');
|
||||
return result;
|
||||
}
|
||||
|
||||
// Validate provider precedence
|
||||
if (config.providerPrecedence.providers.length === 0) {
|
||||
result.errors.push('No providers configured in precedence list');
|
||||
result.isValid = false;
|
||||
}
|
||||
|
||||
// Validate provider settings
|
||||
for (const provider of config.providerPrecedence.providers) {
|
||||
const providerConfig = config.providerSettings[provider];
|
||||
|
||||
if (provider === 'openai') {
|
||||
const openaiConfig = providerConfig as OpenAISettings | undefined;
|
||||
if (!openaiConfig?.apiKey) {
|
||||
result.warnings.push('OpenAI API key is not configured');
|
||||
}
|
||||
}
|
||||
|
||||
if (provider === 'anthropic') {
|
||||
const anthropicConfig = providerConfig as AnthropicSettings | undefined;
|
||||
if (!anthropicConfig?.apiKey) {
|
||||
result.warnings.push('Anthropic API key is not configured');
|
||||
}
|
||||
}
|
||||
|
||||
if (provider === 'ollama') {
|
||||
const ollamaConfig = providerConfig as OllamaSettings | undefined;
|
||||
if (!ollamaConfig?.baseUrl) {
|
||||
result.warnings.push('Ollama base URL is not configured');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
result.errors.push(`Configuration validation error: ${error}`);
|
||||
result.isValid = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cached configuration (force reload on next access)
|
||||
*/
|
||||
public clearCache(): void {
|
||||
this.cachedConfig = null;
|
||||
this.lastConfigUpdate = 0;
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
|
||||
private async getAIEnabled(): Promise<boolean> {
|
||||
try {
|
||||
return await options.getOptionBool('aiEnabled');
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private parseProviderList(precedenceOption: string | null): string[] {
|
||||
if (!precedenceOption) {
|
||||
// Don't assume any defaults - return empty array
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
// Handle JSON array format
|
||||
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
|
||||
const parsed = JSON.parse(precedenceOption);
|
||||
if (Array.isArray(parsed)) {
|
||||
return parsed.map(p => String(p).trim());
|
||||
}
|
||||
}
|
||||
|
||||
// Handle comma-separated format
|
||||
if (precedenceOption.includes(',')) {
|
||||
return precedenceOption.split(',').map(p => p.trim());
|
||||
}
|
||||
|
||||
// Handle single provider
|
||||
return [precedenceOption.trim()];
|
||||
|
||||
} catch (error) {
|
||||
log.error(`Error parsing provider list "${precedenceOption}": ${error}`);
|
||||
// Don't assume defaults on parse error
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
private getDefaultConfig(): AIConfig {
|
||||
return {
|
||||
enabled: false,
|
||||
providerPrecedence: {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
},
|
||||
embeddingProviderPrecedence: {
|
||||
providers: [],
|
||||
defaultProvider: undefined
|
||||
},
|
||||
defaultModels: {
|
||||
openai: undefined,
|
||||
anthropic: undefined,
|
||||
ollama: undefined
|
||||
},
|
||||
providerSettings: {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export default ConfigurationManager.getInstance();
|
||||
@ -0,0 +1,108 @@
|
||||
/**
|
||||
* Configuration interfaces for LLM services
|
||||
* These interfaces replace string parsing with proper typed objects
|
||||
*/
|
||||
|
||||
/**
|
||||
* Provider precedence configuration
|
||||
*/
|
||||
export interface ProviderPrecedenceConfig {
|
||||
providers: ProviderType[];
|
||||
defaultProvider?: ProviderType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Model configuration with provider information
|
||||
*/
|
||||
export interface ModelConfig {
|
||||
provider: ProviderType;
|
||||
modelId: string;
|
||||
displayName?: string;
|
||||
capabilities?: ModelCapabilities;
|
||||
}
|
||||
|
||||
/**
|
||||
* Embedding provider precedence configuration
|
||||
*/
|
||||
export interface EmbeddingProviderPrecedenceConfig {
|
||||
providers: EmbeddingProviderType[];
|
||||
defaultProvider?: EmbeddingProviderType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Model capabilities
|
||||
*/
|
||||
export interface ModelCapabilities {
|
||||
contextWindow?: number;
|
||||
supportsTools?: boolean;
|
||||
supportsVision?: boolean;
|
||||
supportsStreaming?: boolean;
|
||||
maxTokens?: number;
|
||||
temperature?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete AI configuration
|
||||
*/
|
||||
export interface AIConfig {
|
||||
enabled: boolean;
|
||||
providerPrecedence: ProviderPrecedenceConfig;
|
||||
embeddingProviderPrecedence: EmbeddingProviderPrecedenceConfig;
|
||||
defaultModels: Record<ProviderType, string | undefined>;
|
||||
providerSettings: ProviderSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provider-specific settings
|
||||
*/
|
||||
export interface ProviderSettings {
|
||||
openai?: OpenAISettings;
|
||||
anthropic?: AnthropicSettings;
|
||||
ollama?: OllamaSettings;
|
||||
}
|
||||
|
||||
export interface OpenAISettings {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
defaultModel?: string;
|
||||
}
|
||||
|
||||
export interface AnthropicSettings {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
defaultModel?: string;
|
||||
}
|
||||
|
||||
export interface OllamaSettings {
|
||||
baseUrl?: string;
|
||||
defaultModel?: string;
|
||||
timeout?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Valid provider types
|
||||
*/
|
||||
export type ProviderType = 'openai' | 'anthropic' | 'ollama';
|
||||
|
||||
/**
|
||||
* Valid embedding provider types
|
||||
*/
|
||||
export type EmbeddingProviderType = 'openai' | 'ollama' | 'local';
|
||||
|
||||
/**
|
||||
* Model identifier with provider prefix (e.g., "openai:gpt-4" or "ollama:llama2")
|
||||
*/
|
||||
export interface ModelIdentifier {
|
||||
provider?: ProviderType;
|
||||
modelId: string;
|
||||
fullIdentifier: string; // The complete string representation
|
||||
}
|
||||
|
||||
/**
|
||||
* Validation result for configuration
|
||||
*/
|
||||
export interface ConfigValidationResult {
|
||||
isValid: boolean;
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
}
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 548 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 52 KiB |
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
Loading…
Reference in New Issue