Overview
The getPrompt
function retrieves prompts stored in the Lucidic platform, enabling centralized prompt management across your applications.
Syntax
await lai.getPrompt({
promptName: string,
variables?: Record<string, any>,
cacheTtl?: number, // seconds; -1 = forever; 0 = no cache
label?: string, // default 'production'
}): Promise<string>
Parameters
The name/identifier of the prompt to retrieve.
Optional variables to substitute (e.g., { name: 'Alice' }
replaces {{name}}
).
Cache TTL in seconds. Use -1
to cache forever, 0
to disable cache.
label
string
default:"'production'"
Prompt label/version to fetch.
Returns
Returns a Promise that resolves to the prompt content as a string.
Examples
Basic Usage
// Retrieve a prompt
const prompt = await lai.getPrompt({ promptName: "customer_support_greeting" });
// Use with OpenAI
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [
{ role: "system", content: prompt },
{ role: "user", content: userMessage }
]
});
With Cache Control
// Force fresh fetch (bypass cache)
const latestPrompt = await lai.getPrompt({ promptName: "dynamic_prompt", cacheTtl: 0 });
// Cache for 5 minutes (default 300)
const cachedPrompt = await lai.getPrompt({ promptName: "static_prompt", cacheTtl: 300 });
Error Handling
try {
const prompt = await lai.getPrompt("missing_prompt");
} catch (error) {
console.error("Prompt not found:", error);
// Fall back to default prompt
const prompt = "You are a helpful assistant.";
}
Prompt Versioning Pattern
// Use environment-specific prompts
const environment = process.env.NODE_ENV || "development";
const promptName = `assistant_prompt_${environment}`;
const prompt = await lai.getPrompt(promptName);
Multiple Prompts
// Load multiple prompts
const [systemPrompt, userPrompt, errorPrompt] = await Promise.all([
lai.getPrompt("system_instructions"),
lai.getPrompt("user_template"),
lai.getPrompt("error_handler")
]);
// Use in conversation
const messages = [
{ role: "system", content: systemPrompt },
{ role: "user", content: userPrompt.replace("{{input}}", userInput) }
];
Common Patterns
1. Template Replacement
const prompt = await lai.getPrompt({
promptName: "email_template",
variables: {
customerName: "John Doe",
product: "Premium Plan",
date: new Date().toLocaleDateString()
}
});
2. Prompt Caching Strategy
class PromptManager {
private cache = new Map<string, { prompt: string; timestamp: number }>();
private cacheTTL = 5 * 60 * 1000; // 5 minutes
async getPrompt(name: string): Promise<string> {
const cached = this.cache.get(name);
if (cached && Date.now() - cached.timestamp < this.cacheTTL) {
return cached.prompt;
}
// Force fresh fetch for expired cache
const prompt = await lai.getPrompt({ promptName: name, cacheTtl: 0 });
this.cache.set(name, { prompt, timestamp: Date.now() });
return prompt;
}
}
3. Fallback Prompts
async function getPromptWithFallback(
primaryName: string,
fallbackName: string
): Promise<string> {
try {
return await lai.getPrompt({ promptName: primaryName });
} catch (error) {
console.warn(`Primary prompt ${primaryName} not found, using fallback`);
try {
return await lai.getPrompt({ promptName: fallbackName });
} catch (fallbackError) {
// Ultimate fallback
return "You are a helpful AI assistant.";
}
}
}
Integration Example
class AIAssistant {
private openai: any;
async initialize() {
const OpenAI = (await import('openai')).default;
await lai.init({ instrumentModules: { OpenAI } });
this.openai = new OpenAI();
}
async respond(userInput: string, context?: string): Promise<string> {
// Get prompts from platform
const systemPrompt = await lai.getPrompt("assistant_system");
const contextPrompt = context
? await lai.getPrompt("assistant_with_context")
: "";
const messages = [
{ role: "system", content: systemPrompt },
];
if (context && contextPrompt) {
messages.push({
role: "system",
content: contextPrompt.replace("{{context}}", context)
});
}
messages.push({ role: "user", content: userInput });
const response = await this.openai.chat.completions.create({
model: "gpt-4",
messages
});
return response.choices[0].message.content;
}
}
Best Practices
1. Naming Conventions
// Use descriptive, hierarchical names
await lai.getPrompt("support/greeting/enterprise");
await lai.getPrompt("analysis/financial/quarterly");
await lai.getPrompt("generation/email/followup");
2. Error Recovery
// Always have a fallback strategy
async function safeGetPrompt(name: string, defaultPrompt: string): Promise<string> {
try {
return await lai.getPrompt(name);
} catch (error) {
await lai.createEvent({
description: "Prompt fetch failed",
result: `Failed to get ${name}: ${error.message}`
});
return defaultPrompt;
}
}
// Pre-load critical prompts
async function preloadPrompts(promptNames: string[]) {
const prompts = await Promise.all(
promptNames.map(name => lai.getPrompt({ promptName: name }))
);
return Object.fromEntries(
promptNames.map((name, i) => [name, prompts[i]])
);
}
// Usage at startup
const criticalPrompts = await preloadPrompts([
"system_instructions",
"error_handler",
"greeting_message"
]);
Notes
- Prompts must be created in the Lucidic dashboard first
- Caching improves performance for frequently used prompts
- Prompt names are case-sensitive
- Network errors will throw exceptions
See Also