Type-Safe Everything
Full TypeScript support with inferred types throughout your LLM chains. No more guessing what data you're working with.
A TypeScript package that provides simplified base components that make building and maintaining LLM-powered applications easier.
import { createLlmExecutor, useLlm, createParser } from "llm-exe";
const classifier = createLlmExecutor({
llm: useLlm("openai.gpt-4o-mini"),
prompt: Classify as bug/feature/question: {{text}},
parser: createParser("enum", {
values: ["bug", "feature", "question"]
})
});
// Type-safe, reliable, production-ready ✨
const category = await classifier.execute({ text: userInput });
// Every LLM project starts like this...
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: makePrompt(data) }],
response_format: { type: "json_object" },
});
const text = response.choices[0].message.content;
const parsed = JSON.parse(text); // 🤞 hope it's valid JSON
// Type safety? lol?
const category = parsed.category; // any
const items = parsed.items; // undefined? array? who knows
// Oh right, need to validate this somehow
if (!["bug", "feature", "question"].includes(category)) {
// Model hallucinated a new category. Now what?
}
// TODO: Add retries
// TODO: Add tests
// TODO: Switch to Claude when this fails
import {
useLlm,
createChatPrompt,
createParser,
createLlmExecutor,
} from "llm-exe";
// Define once, use everywhere
async function llmClassifier(text: string) {
return createLlmExecutor({
llm: useLlm("openai.gpt-4o-mini"),
prompt: createChatPrompt<{ text: string }>(
"Classify this as 'bug', 'feature', or 'question': {{text}}"
),
parser: createParser("stringExtract", {
enum: ["bug", "feature", "question"],
}),
}).execute({ text });
}
// It's just a typed function now
const category = await llmClassifier(userInput);
// category is typed as "bug" | "feature" | "question" ✨
// Each piece does one thing well
const summarizer = createLlmExecutor({
llm: useLlm("openai.gpt-4o-mini"),
prompt: createChatPrompt("Summarize: {{text}}"),
parser: createParser("string"),
});
const translator = createLlmExecutor({
llm: useLlm("anthropic.claude-3-5-haiku"),
prompt: createChatPrompt("Translate to {{language}}: {{text}}"),
parser: createParser("string"),
});
// Combine them naturally
const summary = await summarizer.execute({ text: article });
const spanish = await translator.execute({
text: summary,
language: "Spanish",
});
const analyst = createLlmExecutor(
{
llm: useLlm("openai.gpt-4o"),
prompt: createChatPrompt<{ data: any }>(
"Analyze this data and return insights as JSON: {{data}}"
),
parser: createParser("json", {
schema: {
insights: { type: "array", items: { type: "string" } },
score: { type: "number", min: 0, max: 100 },
},
}),
},
{
// Built-in retry, timeout, hooks
maxRetries: 3,
timeout: 30000,
hooks: {
onSuccess: (result) => logger.info("Analysis complete", result),
onError: (error) => logger.error("Analysis failed", error),
},
}
);
// Guaranteed to match schema or throw
const { insights, score } = await analyst.execute({ data: salesData });
// You can also bind events to an executor!
analyst.on("complete", (result) => {
logger.info("Analysis complete", result);
});
import { createCallableExecutor, useExecutors } from "llm-exe";
// Your existing code becomes LLM-callable
const queryDB = createCallableExecutor({
name: "query_database",
description: "Query our PostgreSQL database",
input: "SQL query to execute",
handler: async ({ input }) => {
const results = await db.query(input); // Your actual DB!
return { result: results.rows };
},
});
const sendEmail = createCallableExecutor({
name: "send_email",
description: "Send email via our email service",
input: "JSON with 'to', 'subject', 'body'",
handler: async ({ input }) => {
const { to, subject, body } = JSON.parse(input);
await emailService.send({ to, subject, body }); // Real emails!
return { result: "Email sent successfully" };
},
});
// Let the LLM use your tools
const assistant = createLlmExecutor({
llm: useLlm("openai.gpt-4o"),
prompt: createChatPrompt(`Help the user with their request.
You can query the database and send emails.`),
parser: createParser("json"),
});
const tools = useExecutors([queryDB, sendEmail]);
// LLM decides what to do and calls YOUR functions
const plan = await assistant.execute({
request: "Send our top 5 customers a thank you email",
});
// LLM might return: { action: "query_database", input: "SELECT email FROM customers ORDER BY revenue DESC LIMIT 5" }
const result = await tools.callFunction(plan.action, plan.input);
// Change ONE line to switch providers
const llm = useLlm("openai.gpt-4o");
// const llm = useLlm("anthropic.claude-3-5-sonnet");
// const llm = useLlm("google.gemini-2.0-flash");
// const llm = useLlm("xai.grok-2");
// const llm = useLlm("ollama.llama-3.3-70b");
// Everything else stays exactly the same ✨
Stop wrestling with LLM APIs. Start shipping AI features that actually work.