Interface for RAG (Retrieval-Augmented Generation) pipelines.

RAG pipelines orchestrate the process of ingesting documents, storing them with embeddings, retrieving relevant context, and generating responses augmented with that context.

const pipeline = new RAGPipeline({
llmProvider: openAIProvider,
vectorStore: new InMemoryVectorStore(openAIProvider),
chunker: new RecursiveTextChunker({ chunkSize: 1000, chunkOverlap: 200 }),
embeddingModel: 'text-embedding-3-small',
generationModel: 'gpt-4-turbo'
});

// Ingest knowledge base
await pipeline.ingest([
{ id: 'doc1', content: 'Product manual content...' },
{ id: 'doc2', content: 'FAQ content...' }
], { chunk: true });

// Query with context
const result = await pipeline.query(
'How do I reset my password?',
{ limit: 3, minScore: 0.7 }
);

console.log(result.response);
console.log(`Used ${result.context.length} documents for context`);
interface RAGPipeline {
    config: RAGPipelineConfig;
    ingest(
        documents: Document[],
        options?: IngestOptions,
    ): Promise<IngestResult>;
    retrieve(
        query: string,
        options?: RetrievalOptions,
    ): Promise<VectorSearchResult[]>;
    query(
        query: string,
        options?: RetrievalOptions,
        systemPrompt?: string,
    ): Promise<RAGResult>;
    clear(): Promise<void>;
    getStatistics(): Promise<RAGPipelineStatistics>;
}

Methods

Properties

Configuration for this pipeline