Configuration

Customize forgetless behavior with Config.

Config

rust
use forgetless::{Forgetless, Config};
let result = Forgetless::new()
.config(Config::default()
.context_limit(128_000) // Max output tokens
.vision_llm(true) // LLM for image descriptions
.context_llm(true) // LLM for scoring/summarization
.chunk_size(256) // Target chunk size
.parallel(true) // Parallel processing
.cache(true)) // Embedding cache
.add_file("document.pdf")
.run()
.await?;
OptionTypeDefaultDescription
context_limitusize128,000Maximum output tokens
vision_llmboolfalseUse local LLM for image descriptions
context_llmboolfalseUse local LLM for smart scoring
chunk_sizeusize512Target chunk size in tokens
parallelbooltrueEnable parallel file processing
cachebooltrueEnable embedding cache

LLM Options

Local LLM processing is optional and disabled by default for speed.

rust
use forgetless::{Forgetless, Config};
// Fast mode (default) - no LLM, instant processing
let result = Forgetless::new()
.config(Config::default().context_limit(128_000))
.add_file("image.png") // Returns: "[Image: image.png (1920x1080)]"
.run()
.await?;
// With vision LLM - generates image descriptions
let result = Forgetless::new()
.config(Config::default()
.context_limit(128_000)
.vision_llm(true))
.add_file("image.png") // Returns: "A diagram showing..."
.run()
.await?;
// With context LLM - smart scoring and summarization
let result = Forgetless::new()
.config(Config::default()
.context_limit(128_000)
.context_llm(true))
.add(&massive_content)
.run()
.await?;

First run downloads models (~500MB). Uses SmolVLM-256M for vision.

Chunk Size

rust
use forgetless::{Forgetless, Config};
// Smaller chunks for more granular selection
let result = Forgetless::new()
.config(Config::default()
.context_limit(128_000)
.chunk_size(256))
.add(content)
.run()
.await?;
// Larger chunks for faster processing
let result = Forgetless::new()
.config(Config::default()
.context_limit(128_000)
.chunk_size(1024))
.add(content)
.run()
.await?;