@ -10,7 +10,7 @@ import { Ollama } from "ollama";
* Ollama embedding provider implementation using the official Ollama client
* Ollama embedding provider implementation using the official Ollama client
* /
* /
export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
name = "ollama" ;
override name = "ollama" ;
private client : Ollama | null = null ;
private client : Ollama | null = null ;
constructor ( config : EmbeddingConfig ) {
constructor ( config : EmbeddingConfig ) {
@ -30,7 +30,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
/ * *
/ * *
* Initialize the provider by detecting model capabilities
* Initialize the provider by detecting model capabilities
* /
* /
async initialize ( ) : Promise < void > {
override async initialize ( ) : Promise < void > {
const modelName = this . config . model || "llama3" ;
const modelName = this . config . model || "llama3" ;
try {
try {
// Detect model capabilities
// Detect model capabilities
@ -183,7 +183,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
/ * *
/ * *
* Get the current embedding dimension
* Get the current embedding dimension
* /
* /
getDimension( ) : number {
override getDimension( ) : number {
return this . config . dimension ;
return this . config . dimension ;
}
}
@ -260,7 +260,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
/ * *
/ * *
* More specific implementation of batch size error detection for Ollama
* More specific implementation of batch size error detection for Ollama
* /
* /
protected isBatchSizeError( error : any ) : boolean {
protected override isBatchSizeError( error : any ) : boolean {
const errorMessage = error ? . message || '' ;
const errorMessage = error ? . message || '' ;
const ollamaBatchSizeErrorPatterns = [
const ollamaBatchSizeErrorPatterns = [
'context length' , 'token limit' , 'out of memory' ,
'context length' , 'token limit' , 'out of memory' ,
@ -279,7 +279,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
* Note : Ollama API doesn ' t support batch embedding , so we process them sequentially
* Note : Ollama API doesn ' t support batch embedding , so we process them sequentially
* but using the adaptive batch processor to handle rate limits and retries
* but using the adaptive batch processor to handle rate limits and retries
* /
* /
async generateBatchEmbeddings ( texts : string [ ] ) : Promise < Float32Array [ ] > {
override async generateBatchEmbeddings ( texts : string [ ] ) : Promise < Float32Array [ ] > {
if ( texts . length === 0 ) {
if ( texts . length === 0 ) {
return [ ] ;
return [ ] ;
}
}
@ -318,7 +318,7 @@ export class OllamaEmbeddingProvider extends BaseEmbeddingProvider {
* Returns the normalization status for Ollama embeddings
* Returns the normalization status for Ollama embeddings
* Ollama embeddings are not guaranteed to be normalized
* Ollama embeddings are not guaranteed to be normalized
* /
* /
getNormalizationStatus( ) : NormalizationStatus {
override getNormalizationStatus( ) : NormalizationStatus {
return NormalizationStatus . NEVER ; // Be conservative and always normalize
return NormalizationStatus . NEVER ; // Be conservative and always normalize
}
}
}
}