Modular
Write functions powered by LLM's with easy to use building blocks.
A package that provides simplified base components to make building and maintaining LLM-powered applications easier.
import * as llmExe from "llm-exe";
/**
* Define a yes/no llm-powered function
*/
export async function YesOrNoBot<I extends string>(input: I) {
const llm = llmExe.useLlm("openai.gpt-4o-mini");
const instruction = `You are not an assistant, I need you to reply with only
'yes' or 'no' as an answer to the question below. Do not explain yourself
or ask questions. Answer with only yes or no.`;
const prompt = llmExe
.createChatPrompt(instruction)
.addUserMessage(input)
.addUserMessage(`yes or no:`);
const parser = llmExe.createParser("stringExtract", { enum: ["yes", "no"] });
return llmExe.createLlmExecutor({ llm, prompt, parser }).execute({ input });
}
const isTheSkyBlue = await YesOrNoBot(`Can the sky be blue?`)
// yes
const isGrassRed = await YesOrNoBot(`Is grass usually red?`)
// no
All you need to do is change this one line to use any of the supported models. Make sure you have configured API keys.
import * as llmExe from "llm-exe";
/**
* Define a yes/no llm-powered function
*/
export async function YesOrNoBot<I extends string>(input: I) {
const llm = llmExe.useLlm("openai.gpt-4o-mini");
// const llm = llmExe.useLlm("openai.gpt-4o"); // or
// const llm = llmExe.useLlm("google.gemini-2.0-flash"); // or
// const llm = llmExe.useLlm("xai.grok-2"); // or
// const llm = llmExe.useLlm("anthropic.claude-3-7-sonnet"); // or
// const llm = llmExe.useLlm("deepseek.chat"); // or
// const llm = llmExe.useLlm("anthropic.claude-3-opus"); // or
// const llm = llmExe.useLlm("google.gemini-1.5-pro"); // or
// ..and many more!
const instruction = `You are not an assistant, I need you to reply with only
'yes' or 'no' as an answer to the question below. Do not explain yourself
or ask questions. Answer with only yes or no.`;
const prompt = llmExe
.createChatPrompt(instruction)
.addUserMessage(input)
.addUserMessage(`yes or no:`);
const parser = llmExe.createParser("stringExtract", { enum: ["yes", "no"] });
return llmExe.createLlmExecutor({ llm, prompt, parser }).execute({ input });
}