Skip to main content
Pool data fetching can be expensive in terms of RPC calls and latency. This guide shows how to implement intelligent caching strategies that dramatically improve performance while ensuring data freshness for DLMM applications.

Why Caching Matters

Without caching, every pool analysis request triggers multiple expensive RPC calls:
  • Pool metadata: 1-2 RPC calls
  • Bin data (50 bins): 50+ RPC calls
  • Quote calculations: 2-3 RPC calls
  • Total per pool: 55+ RPC calls
With proper caching:
  • First request: 55+ RPC calls
  • Subsequent requests: 0-2 RPC calls
  • Performance improvement: 20-50x faster

Multi-Layer Caching Strategy

Implement a sophisticated caching system with different TTLs for different data types:
// src/services/PoolDataCache.ts
export interface CacheEntry<T> {
  data: T;
  timestamp: number;
  ttl: number;
  hits: number;
}

export interface CacheOptions {
  defaultTTL: number;
  maxSize: number;
  enableMetrics: boolean;
}

export class PoolDataCache {
  private cache: Map<string, CacheEntry<any>>;
  private options: CacheOptions;
  private metrics: {
    hits: number;
    misses: number;
    evictions: number;
  };

  constructor(options: Partial<CacheOptions> = {}) {
    this.cache = new Map();
    this.options = {
      defaultTTL: 30000, // 30 seconds
      maxSize: 1000,
      enableMetrics: true,
      ...options
    };
    this.metrics = { hits: 0, misses: 0, evictions: 0 };
  }

  /**
   * Set data with customizable TTL based on data type
   */
  set(
    key: string, 
    data: any, 
    customTTL?: number,
    category: 'metadata' | 'state' | 'bins' | 'quotes' = 'state'
  ): void {
    // Different TTLs for different data types
    const ttlMap = {
      metadata: 300000, // 5 minutes (rarely changes)
      state: 30000,     // 30 seconds (moderate changes)
      bins: 60000,      // 1 minute (changes with new positions)
      quotes: 10000     // 10 seconds (changes frequently)
    };

    const ttl = customTTL || ttlMap[category] || this.options.defaultTTL;

    // Evict oldest entries if cache is full
    if (this.cache.size >= this.options.maxSize) {
      this.evictOldest();
    }

    this.cache.set(key, {
      data,
      timestamp: Date.now(),
      ttl,
      hits: 0
    });

    // Auto-cleanup after TTL
    setTimeout(() => this.cache.delete(key), ttl);
  }

  /**
   * Get data with automatic expiration handling
   */
  get<T>(key: string): T | null {
    const entry = this.cache.get(key);
    
    if (!entry) {
      this.options.enableMetrics && this.metrics.misses++;
      return null;
    }

    // Check if expired
    if (Date.now() - entry.timestamp > entry.ttl) {
      this.cache.delete(key);
      this.options.enableMetrics && this.metrics.misses++;
      return null;
    }

    // Update hit count and metrics
    entry.hits++;
    this.options.enableMetrics && this.metrics.hits++;
    
    return entry.data as T;
  }

  /**
   * Check if key exists and is valid
   */
  has(key: string): boolean {
    return this.get(key) !== null;
  }

  /**
   * Invalidate specific key or pattern
   */
  invalidate(keyOrPattern: string): number {
    if (keyOrPattern.includes('*')) {
      // Pattern matching
      const pattern = keyOrPattern.replace('*', '.*');
      const regex = new RegExp(pattern);
      let deleted = 0;

      for (const key of this.cache.keys()) {
        if (regex.test(key)) {
          this.cache.delete(key);
          deleted++;
        }
      }

      return deleted;
    } else {
      // Exact key
      return this.cache.delete(keyOrPattern) ? 1 : 0;
    }
  }

  /**
   * Get cache statistics
   */
  getStats() {
    const totalRequests = this.metrics.hits + this.metrics.misses;
    
    return {
      size: this.cache.size,
      maxSize: this.options.maxSize,
      hitRate: totalRequests > 0 ? (this.metrics.hits / totalRequests) * 100 : 0,
      hits: this.metrics.hits,
      misses: this.metrics.misses,
      evictions: this.metrics.evictions,
      memoryEstimate: this.estimateMemoryUsage()
    };
  }

  /**
   * Clear all cached data
   */
  clear(): void {
    this.cache.clear();
    this.metrics = { hits: 0, misses: 0, evictions: 0 };
  }

  private evictOldest(): void {
    let oldestKey: string | null = null;
    let oldestTime = Date.now();

    for (const [key, entry] of this.cache.entries()) {
      if (entry.timestamp < oldestTime) {
        oldestTime = entry.timestamp;
        oldestKey = key;
      }
    }

    if (oldestKey) {
      this.cache.delete(oldestKey);
      this.metrics.evictions++;
    }
  }

  private estimateMemoryUsage(): number {
    let totalSize = 0;
    
    for (const [key, entry] of this.cache.entries()) {
      totalSize += key.length * 2; // 2 bytes per char
      totalSize += JSON.stringify(entry.data).length * 2;
      totalSize += 64; // Estimated overhead
    }

    return totalSize;
  }
}

Cached DLMM Data Composer

Integrate caching into your data composition service:
// src/services/CachedDLMMDataComposer.ts
import { DLMMDataComposer } from './DLMMDataComposer';
import { PoolDataCache } from './PoolDataCache';
import { ComposedPoolData, BinLiquidityData } from '../types/pool';

export class CachedDLMMDataComposer extends DLMMDataComposer {
  private cache: PoolDataCache;
  private enablePartialCaching: boolean;

  constructor(
    rpcUrl: string, 
    cacheOptions?: any,
    enablePartialCaching: boolean = true
  ) {
    super(rpcUrl);
    this.cache = new PoolDataCache(cacheOptions);
    this.enablePartialCaching = enablePartialCaching;
  }

  /**
   * Compose pool data with intelligent caching
   */
  async composePoolData(poolAddress: string): Promise<ComposedPoolData> {
    const cacheKey = `pool-complete-${poolAddress}`;
    
    // Try to get complete cached data first
    const cachedData = this.cache.get<ComposedPoolData>(cacheKey);
    if (cachedData) {
      console.log(`📦 Complete cache hit for ${poolAddress}`);
      return cachedData;
    }

    console.log(`🔄 Cache miss, composing data for ${poolAddress}`);

    // Compose data with partial caching
    if (this.enablePartialCaching) {
      const composedData = await this.composeWithPartialCaching(poolAddress);
      
      // Cache the complete result
      this.cache.set(cacheKey, composedData, undefined, 'state');
      
      return composedData;
    } else {
      // Fallback to non-cached composition
      return super.composePoolData(poolAddress);
    }
  }

  /**
   * Compose data leveraging partial caching for individual components
   */
  private async composeWithPartialCaching(poolAddress: string): Promise<ComposedPoolData> {
    const poolInfoKey = `pool-info-${poolAddress}`;
    const poolStateKey = `pool-state-${poolAddress}`;
    const binDataKey = `bin-data-${poolAddress}`;
    const pricingKey = `pricing-${poolAddress}`;

    // Try to get cached components
    let poolInfo = this.cache.get(poolInfoKey);
    let poolState = this.cache.get(poolStateKey);
    let binData = this.cache.get(binDataKey);
    let pricingData = this.cache.get(pricingKey);

    // Track what we need to fetch
    const toFetch = [];
    if (!poolInfo) toFetch.push('info');
    if (!poolState) toFetch.push('state');
    if (!binData) toFetch.push('bins');
    if (!pricingData) toFetch.push('pricing');

    console.log(`🎯 Partial cache: need to fetch [${toFetch.join(', ')}] for ${poolAddress}`);

    // Fetch missing components
    if (toFetch.length > 0) {
      const pool = await this.createPoolInstance(poolAddress);
      
      // Fetch and cache missing components
      if (!poolInfo) {
        poolInfo = await this.composePoolInfo(pool);
        this.cache.set(poolInfoKey, poolInfo, undefined, 'metadata');
      }
      
      if (!poolState) {
        poolState = await this.composePoolState(pool);
        this.cache.set(poolStateKey, poolState, undefined, 'state');
      }
      
      if (!binData) {
        binData = await this.composeLiquidityDistribution(pool);
        this.cache.set(binDataKey, binData, undefined, 'bins');
      }
      
      if (!pricingData) {
        pricingData = await this.composePricingData(pool, poolState.activeId);
        this.cache.set(pricingKey, pricingData, undefined, 'quotes');
      }
    }

    // Combine cached and fresh data
    return {
      pool: poolInfo,
      state: poolState,
      liquidity: binData,
      pricing: pricingData,
      timestamp: Date.now()
    };
  }

  /**
   * Prefetch and cache data for multiple pools
   */
  async prefetchPools(poolAddresses: string[]): Promise<void> {
    console.log(`🚀 Prefetching data for ${poolAddresses.length} pools...`);

    const prefetchPromises = poolAddresses.map(async (poolAddress) => {
      try {
        await this.composePoolData(poolAddress);
        console.log(`✅ Prefetched ${poolAddress}`);
      } catch (error) {
        console.warn(`⚠️ Failed to prefetch ${poolAddress}:`, error.message);
      }
    });

    await Promise.all(prefetchPromises);
    console.log(`📦 Prefetching completed`);
  }

  /**
   * Invalidate cache for specific pool or all pools
   */
  invalidatePool(poolAddress?: string): number {
    if (poolAddress) {
      return this.cache.invalidate(`*${poolAddress}*`);
    } else {
      this.cache.clear();
      return -1; // Indicates full clear
    }
  }

  /**
   * Get caching performance statistics
   */
  getCacheStats() {
    return this.cache.getStats();
  }

  /**
   * Force refresh pool data (bypass cache)
   */
  async forceRefreshPoolData(poolAddress: string): Promise<ComposedPoolData> {
    console.log(`🔄 Force refreshing ${poolAddress}`);
    
    // Invalidate existing cache
    this.invalidatePool(poolAddress);
    
    // Fetch fresh data
    return this.composePoolData(poolAddress);
  }
}

Advanced Caching Patterns

Background Cache Refresh

Keep cache warm by refreshing data before it expires:
export class BackgroundCacheRefresher {
  private refreshQueue: Map<string, NodeJS.Timeout>;
  private composer: CachedDLMMDataComposer;

  constructor(composer: CachedDLMMDataComposer) {
    this.refreshQueue = new Map();
    this.composer = composer;
  }

  /**
   * Schedule background refresh for a pool
   */
  scheduleRefresh(
    poolAddress: string, 
    refreshInterval: number = 25000 // Refresh 5 seconds before TTL
  ): void {
    // Clear existing timer if any
    const existing = this.refreshQueue.get(poolAddress);
    if (existing) {
      clearInterval(existing);
    }

    // Schedule periodic refresh
    const timer = setInterval(async () => {
      try {
        console.log(`🔄 Background refresh for ${poolAddress}`);
        await this.composer.composePoolData(poolAddress);
      } catch (error) {
        console.warn(`Background refresh failed for ${poolAddress}:`, error.message);
      }
    }, refreshInterval);

    this.refreshQueue.set(poolAddress, timer);
  }

  /**
   * Stop background refresh for a pool
   */
  stopRefresh(poolAddress: string): void {
    const timer = this.refreshQueue.get(poolAddress);
    if (timer) {
      clearInterval(timer);
      this.refreshQueue.delete(poolAddress);
    }
  }

  /**
   * Stop all background refreshes
   */
  stopAllRefreshes(): void {
    for (const [poolAddress, timer] of this.refreshQueue.entries()) {
      clearInterval(timer);
    }
    this.refreshQueue.clear();
  }
}

Smart Cache Warming

Intelligently warm cache based on usage patterns:
export class SmartCacheWarmer {
  private usageStats: Map<string, { count: number; lastAccess: number }>;
  private composer: CachedDLMMDataComposer;

  constructor(composer: CachedDLMMDataComposer) {
    this.usageStats = new Map();
    this.composer = composer;
  }

  /**
   * Track pool access for smart warming
   */
  trackPoolAccess(poolAddress: string): void {
    const stats = this.usageStats.get(poolAddress) || { count: 0, lastAccess: 0 };
    stats.count++;
    stats.lastAccess = Date.now();
    this.usageStats.set(poolAddress, stats);
  }

  /**
   * Warm cache for frequently accessed pools
   */
  async warmFrequentPools(threshold: number = 5): Promise<void> {
    const frequentPools = Array.from(this.usageStats.entries())
      .filter(([_, stats]) => stats.count >= threshold)
      .sort((a, b) => b[1].count - a[1].count)
      .slice(0, 10) // Top 10 most accessed
      .map(([poolAddress]) => poolAddress);

    if (frequentPools.length > 0) {
      console.log(`🌡️ Warming cache for ${frequentPools.length} frequent pools`);
      await this.composer.prefetchPools(frequentPools);
    }
  }

  /**
   * Auto-warm based on time patterns
   */
  scheduleIntelligentWarming(): void {
    // Warm cache every 5 minutes during active hours
    setInterval(() => {
      const hour = new Date().getUTCHours();
      
      // Active trading hours (UTC)
      if ((hour >= 13 && hour <= 21) || (hour >= 1 && hour <= 9)) {
        this.warmFrequentPools();
      }
    }, 5 * 60 * 1000);
  }
}

Usage Examples

Basic Cached Implementation

async function analyzePoolWithCaching(poolAddress: string) {
  const composer = new CachedDLMMDataComposer('https://api.mainnet-beta.solana.com');
  
  console.log('First request (cache miss):');
  console.time('First Request');
  const data1 = await composer.composePoolData(poolAddress);
  console.timeEnd('First Request');
  
  console.log('Second request (cache hit):');
  console.time('Second Request');
  const data2 = await composer.composePoolData(poolAddress);
  console.timeEnd('Second Request');
  
  // Show cache performance
  const stats = composer.getCacheStats();
  console.log('Cache Stats:', {
    hitRate: `${stats.hitRate.toFixed(1)}%`,
    size: stats.size,
    memoryEstimate: `${(stats.memoryEstimate / 1024).toFixed(1)} KB`
  });
  
  return data1;
}

Portfolio Dashboard with Smart Caching

class PortfolioDashboard {
  private composer: CachedDLMMDataComposer;
  private backgroundRefresher: BackgroundCacheRefresher;
  private cacheWarmer: SmartCacheWarmer;

  constructor(rpcUrl: string) {
    this.composer = new CachedDLMMDataComposer(rpcUrl, {
      defaultTTL: 30000,
      maxSize: 500,
      enableMetrics: true
    });
    
    this.backgroundRefresher = new BackgroundCacheRefresher(this.composer);
    this.cacheWarmer = new SmartCacheWarmer(this.composer);
    
    // Start intelligent cache warming
    this.cacheWarmer.scheduleIntelligentWarming();
  }

  async loadPortfolio(userPools: string[]): Promise<any> {
    console.log(`📊 Loading portfolio with ${userPools.length} pools...`);

    // Prefetch all pools to warm cache
    await this.composer.prefetchPools(userPools);

    // Schedule background refresh for active pools
    userPools.forEach(pool => {
      this.backgroundRefresher.scheduleRefresh(pool, 25000);
      this.cacheWarmer.trackPoolAccess(pool);
    });

    // Get portfolio data (should be mostly cached)
    const poolData = await Promise.all(
      userPools.map(pool => this.composer.composePoolData(pool))
    );

    // Calculate portfolio metrics
    return this.calculatePortfolioMetrics(poolData);
  }

  async refreshPortfolio(userPools: string[]): Promise<any> {
    console.log('🔄 Refreshing portfolio data...');
    
    // Force refresh all pools
    const refreshPromises = userPools.map(pool => 
      this.composer.forceRefreshPoolData(pool)
    );
    
    const poolData = await Promise.all(refreshPromises);
    return this.calculatePortfolioMetrics(poolData);
  }

  getCachingStats() {
    return this.composer.getCacheStats();
  }

  cleanup() {
    this.backgroundRefresher.stopAllRefreshes();
  }

  private calculatePortfolioMetrics(poolData: any[]) {
    return {
      totalPools: poolData.length,
      totalTVL: poolData.reduce((sum, pool) => sum + pool.liquidity.totalValueLocked, 0),
      averageConcentration: poolData.reduce((sum, pool) => sum + pool.liquidity.concentrationRatio, 0) / poolData.length,
      cachePerformance: this.getCachingStats()
    };
  }
}

Performance Monitoring

Track cache performance to optimize TTL values:
function monitorCachePerformance(composer: CachedDLMMDataComposer) {
  setInterval(() => {
    const stats = composer.getCacheStats();
    
    console.log('📊 Cache Performance Report:');
    console.log(`  Hit Rate: ${stats.hitRate.toFixed(1)}%`);
    console.log(`  Cache Size: ${stats.size}/${stats.maxSize}`);
    console.log(`  Memory Usage: ${(stats.memoryEstimate / 1024).toFixed(1)} KB`);
    
    // Adjust TTL based on hit rate
    if (stats.hitRate < 70) {
      console.log('⚠️ Low hit rate - consider increasing TTL');
    } else if (stats.hitRate > 95) {
      console.log('💡 High hit rate - could decrease TTL for fresher data');
    }
  }, 60000); // Report every minute
}

Best Practices

  1. Layer Your Caching: Different TTLs for different data types (metadata: 5min, quotes: 10s)
  2. Monitor Hit Rates: Aim for 80%+ hit rates for optimal performance
  3. Implement Cache Warming: Proactively refresh frequently accessed data
  4. Handle Cache Misses Gracefully: Always have fallback to fresh data
  5. Size Limits: Set reasonable cache size limits to prevent memory issues
  6. Invalidation Strategy: Clear cache when data is known to be stale
  7. Background Refresh: Keep cache warm during active trading hours
Your DLMM application now has production-ready caching that dramatically improves performance while maintaining data accuracy.