Forgetless

forgetless

v0.1.0

Context optimization for LLMs. Compress massive content to fit your token budget.

Usage

rust
use forgetless::{Forgetless, Config, WithPriority};
let result = Forgetless::new()
.config(Config::default().context_limit(128_000))
.add(WithPriority::critical(&system_prompt))
.add(&conversation)
.add_file("research.pdf")
.add_file("diagram.png")
.query("What are the key findings?")
.run()
.await?;
// Send to your LLM
let response = llm.chat(&result.content).await?;
output
Input:  1,847,291 tokens
Output:   127,843 tokens
Compression: 14.5x
Documentation