Overview
This page contains detailed examples showing how to use Lucidic’s TypeScript SDK in real-world scenarios. All examples assume you’ve already set up your credentials.Basic Session Management
Copy
Ask AI
import * as lai from 'lucidicai';
async function basicExample() {
// ESM: import provider and pass module reference for instrumentation
import OpenAI from 'openai';
await lai.init({
sessionName: "Document Processor",
instrumentModules: { OpenAI },
task: "Process and summarize documents"
});
const openai = new OpenAI();
// Create a step
await lai.createStep();
// Your logic here
const document = await readDocument();
// LLM calls are auto-tracked
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [
{ role: "system", content: "Summarize the document" },
{ role: "user", content: document }
]
});
// End step with evaluation
await lai.endStep({
evalScore: 95,
evalDescription: "Successfully summarized"
});
// End session
await lai.endSession({ isSuccessful: true, isSuccessfulReason: "Document processed successfully" });
}
Error Handling Patterns
Copy
Ask AI
import * as lai from 'lucidicai';
import { APIError } from 'lucidicai';
async function robustWorkflow() {
try {
const { Anthropic } = await import('@anthropic-ai/sdk');
await lai.init({ sessionName: "Fault-tolerant workflow", instrumentModules: { anthropic: Anthropic } });
const anthropic = new Anthropic();
try {
await lai.createStep();
// Risky operation
const result = await riskyOperation();
await lai.endStep({
evalScore: 100,
evalDescription: "Success"
});
} catch (error) {
// Step failed - mark it
await lai.endStep({
evalScore: 0,
evalDescription: `Failed: ${error.message}`
});
throw error;
}
} catch (error) {
if (error instanceof APIError) {
console.error("API error:", error.message);
}
// Ensure session ends even on error
try {
await lai.endSession({ isSuccessful: false, isSuccessfulReason: `Error: ${error.message}` });
} catch (endError) {
console.error("Failed to end session:", endError);
}
}
}
Working with Events
Copy
Ask AI
async function eventTrackingExample() {
await lai.init({ sessionName: "Event tracking demo" });
await lai.createStep();
// Create manual event for non-LLM operation
const dbEvent = await lai.createEvent({
description: "Database query",
model: "postgresql"
});
// Perform database operation
const users = await db.query("SELECT * FROM users");
// Update event with results
await lai.updateEvent({
eventId: dbEvent.eventId,
result: `Found ${users.length} users`,
costAdded: 0.001
});
// Create event for external API
const apiEvent = await lai.createEvent({
description: "Weather API call"
});
const weather = await fetchWeatherData();
await lai.updateEvent({
eventId: apiEvent.eventId,
result: JSON.stringify(weather),
costAdded: 0.0001
});
await lai.endStep();
await lai.endSession();
}
Long-Running Workflows
Copy
Ask AI
async function longRunningPipeline(documentBatch: string[]) {
// Phase 1: Initialize and preprocess
const sessionId = await lai.init({
sessionName: "Document batch processing",
task: `Process ${documentBatch.length} documents`
});
await lai.createStep();
const validDocs = await preprocessDocuments(documentBatch);
await lai.endStep({
evalScore: 100,
evalDescription: `${validDocs.length} documents ready`
});
// Save progress
await saveCheckpoint({ sessionId, validDocs });
await lai.endSession();
// Phase 2: Process each document (could be separate job)
await lai.init({ sessionId });
for (const [index, doc] of validDocs.entries()) {
await lai.createStep();
try {
await processDocument(doc);
await lai.endStep({ evalScore: 100 });
} catch (error) {
await lai.endStep({
evalScore: 0,
evalDescription: error.message
});
}
}
await lai.endSession({ isSuccessful: true, isSuccessfulReason: "Batch processing completed" });
}
Multimodal Tracking
Copy
Ask AI
import * as lai from 'lucidicai';
async function multimodalExample() {
await lai.init({
sessionName: "Image analysis workflow",
providers: ["openai"]
});
const OpenAI = (await import('openai')).default;
const openai = new OpenAI();
// Use storage context for multimodal capture
// Image data URLs in prompts are auto-extracted and linked; no extra context manager needed
// (If you need to scope sessions across async roots, see Session Context docs.)
await lai.createStep();
// Upload image
const imageBuffer = await readFile('chart.png');
// Convert your image to a data URL (or read base64) and pass it in the OpenAI call or event screenshots
const imageUrl = 'data:image/png;base64,' + (await imageBuffer.toString('base64'));
// Create event with screenshot
await lai.createEvent({
description: "Visual analysis",
screenshots: [imageUrl]
});
// Use vision model
const response = await openai.chat.completions.create({
model: "gpt-4-vision-preview",
messages: [
{
role: "user",
content: [
{ type: "text", text: "Describe this chart" },
{ type: "image_url", image_url: { url: imageUrl } }
]
}
]
});
await lai.endStep({
evalScore: 100,
evalDescription: "Successfully analyzed image"
});
});
await lai.endSession();
}
Mass Simulation
Copy
Ask AI
async function runMassSimulation() {
await lai.init();
// Define variations to test
const prompts = [
"Be concise",
"Be detailed",
"Be technical"
];
const models = ["gpt-3.5-turbo", "gpt-4"];
// Run simulation
await lai.runMassSimulation({
sessionBaseName: "Prompt comparison",
numSessions: prompts.length * models.length,
sessionFunction: async () => {
for (const prompt of prompts) {
for (const model of models) {
await runSingleTest(prompt, model);
}
}
}
});
}
async function runSingleTest(systemPrompt: string, model: string) {
await lai.init({
sessionName: `${model} - ${systemPrompt}`,
tags: ["simulation", model, systemPrompt]
});
await lai.createStep();
const OpenAI = (await import('openai')).default;
const openai = new OpenAI();
const response = await openai.chat.completions.create({
model,
messages: [
{ role: "system", content: systemPrompt },
{ role: "user", content: "Explain quantum computing" }
]
});
// Evaluate based on criteria
const score = evaluateResponse(response.choices[0].message.content);
await lai.endStep({
evalScore: score,
evalDescription: `${systemPrompt} scoring`
});
await lai.endSession({ isSuccessful: true });
}
Authentication Patterns
Using Environment Variables
Copy
Ask AI
// Set these in your environment:
// LUCIDIC_API_KEY=your-api-key
// LUCIDIC_AGENT_ID=your-agent-id
import * as lai from 'lucidicai';
await lai.init({
sessionName: "My Session",
providers: ["openai"]
});
Using dotenv
Copy
Ask AI
import * as lai from 'lucidicai';
import dotenv from 'dotenv';
dotenv.config();
await lai.init({
sessionName: "My Session",
providers: ["anthropic"]
});
Direct Configuration
Copy
Ask AI
await lai.init({
apiKey: "your-api-key",
agentId: "your-agent-id",
sessionName: "My Session",
providers: ["openai"]
});
Production Best Practices
Copy
Ask AI
async function productionWorkflow() {
// Custom masking function
const maskSSN = (text: string) => {
return text.replace(/\b\d{3}-\d{2}-\d{4}\b/g, 'XXX-XX-XXXX');
};
await lai.init({
sessionName: "Production agent",
providers: ["openai", "anthropic"],
maskingFunction: maskSSN,
autoEnd: true, // Ensure cleanup on crashes
debug: false // Disable debug logs
});
// Set production tags
await lai.updateSession(
undefined, // Don't change task
{
environment: "production",
version: "1.2.3",
region: "us-west-2"
}
);
// Your production logic with proper error handling
try {
await processUserRequest();
} finally {
// Ensure session ends even if error occurs
await lai.endSession();
}
}
Next Steps
- Learn about Core Concepts
- Explore Provider Integrations
- Read the API Reference
- Understand Advanced Features