Performance Profiling Fundamentals
Performance profiling is the process of analyzing and measuring the performance characteristics of an application to identify bottlenecks, optimize resource usage, and improve overall efficiency. Understanding profiling fundamentals is crucial for building high-performance applications.
π Types of Performance Profiling
β‘ CPU Profiling
Analyze CPU usage and identify computational bottlenecks
// Node.js CPU profiling
const { performance } = require('perf_hooks');
const v8 = require('v8-profiler-next');
// Start CPU profiling
function startCPUProfiling(name = 'cpu-profile') {
console.log('Starting CPU profiling...');
v8.setGenerateType(1); // Generate type information
v8.startProfiling(name, true);
return name;
}
// Stop CPU profiling and save
function stopCPUProfiling(name) {
console.log('Stopping CPU profiling...');
const profile = v8.stopProfiling(name);
profile.export((error, result) => {
if (error) {
console.error('Error exporting profile:', error);
return;
}
require('fs').writeFileSync(`${name}.cpuprofile`, result);
console.log(`CPU profile saved to ${name}.cpuprofile`);
profile.delete();
});
}
// Usage example
const profileName = startCPUProfiling('my-app-profile');
// Simulate some work
function fibonacci(n) {
if (n <= 1) return n;
return fibonacci(n - 1) + fibonacci(n - 2);
}
// Run CPU-intensive task
console.time('fibonacci-calculation');
const result = fibonacci(40);
console.timeEnd('fibonacci-calculation');
stopCPUProfiling(profileName);
// Chrome DevTools CPU profiling
// 1. Open Chrome DevTools (F12)
// 2. Go to Performance tab
// 3. Click record button
// 4. Perform actions to profile
// 5. Stop recording
// 6. Analyze flame graph and timelines
Key Metrics:
- CPU utilization percentage
- Function call frequency
- Time spent in each function
- Call stack depth
- Garbage collection pauses
πΎ Memory Profiling
Track memory usage and identify memory leaks
// Node.js memory profiling
const memwatch = require('memwatch-next');
// Monitor memory usage
function monitorMemory() {
const usage = process.memoryUsage();
console.log('Memory Usage:');
console.log(`RSS: ${Math.round(usage.rss / 1024 / 1024)} MB`);
console.log(`Heap Used: ${Math.round(usage.heapUsed / 1024 / 1024)} MB`);
console.log(`Heap Total: ${Math.round(usage.heapTotal / 1024 / 1024)} MB`);
console.log(`External: ${Math.round(usage.external / 1024 / 1024)} MB`);
}
// Track memory leaks
let hd = new memwatch.HeapDiff();
function startMemoryTracking() {
hd = new memwatch.HeapDiff();
console.log('Started memory tracking');
}
function endMemoryTracking() {
const diff = hd.end();
console.log('Memory diff:', JSON.stringify(diff, null, 2));
// Analyze changes
if (diff.change.size_bytes > 0) {
console.log(`β οΈ Memory leak detected: +${diff.change.size_bytes} bytes`);
} else {
console.log(`β
Memory usage decreased by ${Math.abs(diff.change.size_bytes)} bytes`);
}
}
// Memory leak detection
memwatch.on('leak', (info) => {
console.error('Memory leak detected:', info);
});
memwatch.on('stats', (stats) => {
console.log('GC stats:', stats);
});
// Usage example
startMemoryTracking();
// Create objects that might leak
const objects = [];
for (let i = 0; i < 10000; i++) {
objects.push({
data: 'x'.repeat(1000),
timestamp: Date.now()
});
}
// Force garbage collection (if available)
if (global.gc) {
global.gc();
} else {
console.log('Run with --expose-gc to enable manual GC');
}
monitorMemory();
endMemoryTracking();
// Chrome DevTools Memory profiling
// 1. Open Chrome DevTools
// 2. Go to Memory tab
// 3. Select Heap snapshot
// 4. Take initial snapshot
// 5. Perform actions
// 6. Take another snapshot
// 7. Compare snapshots to find leaks
Key Metrics:
- Heap size and usage
- Memory leak detection
- Garbage collection frequency
- Object allocation patterns
- Retained memory size
π Network Profiling
Analyze network requests and API performance
// Network request profiling
async function profileNetworkRequest(url, options = {}) {
const startTime = performance.now();
try {
const response = await fetch(url, options);
const endTime = performance.now();
const metrics = {
url,
method: options.method || 'GET',
status: response.status,
responseTime: endTime - startTime,
contentLength: response.headers.get('content-length'),
contentType: response.headers.get('content-type'),
cacheHit: response.headers.get('x-cache') === 'HIT'
};
console.log('Network Request Profile:', metrics);
// Read response to measure total time
const responseStart = performance.now();
await response.text();
const responseEnd = performance.now();
metrics.totalTime = responseEnd - startTime;
metrics.responseReadTime = responseEnd - responseStart;
return metrics;
} catch (error) {
const endTime = performance.now();
console.error('Network request failed:', {
url,
error: error.message,
responseTime: endTime - startTime
});
throw error;
}
}
// API endpoint profiling
async function profileAPIEndpoint(baseUrl, endpoints) {
const results = [];
for (const endpoint of endpoints) {
console.log(`\nProfiling ${endpoint}...`);
try {
const metrics = await profileNetworkRequest(`${baseUrl}${endpoint}`);
results.push({ endpoint, ...metrics });
} catch (error) {
results.push({
endpoint,
error: error.message,
status: 'FAILED'
});
}
// Small delay between requests
await new Promise(resolve => setTimeout(resolve, 100));
}
// Analyze results
const successful = results.filter(r => r.status !== 'FAILED');
const avgResponseTime = successful.reduce((sum, r) => sum + r.responseTime, 0) / successful.length;
console.log('\n=== API Profiling Summary ===');
console.log(`Total endpoints: ${endpoints.length}`);
console.log(`Successful requests: ${successful.length}`);
console.log(`Failed requests: ${results.length - successful.length}`);
console.log(`Average response time: ${avgResponseTime.toFixed(2)}ms`);
return results;
}
// Usage
const endpoints = [
'/api/users',
'/api/products',
'/api/orders',
'/api/analytics'
];
profileAPIEndpoint('https://api.example.com', endpoints);
// Browser Network profiling
// 1. Open Chrome DevTools (F12)
// 2. Go to Network tab
// 3. Enable Preserve log
// 4. Perform actions
// 5. Analyze waterfall chart
// 6. Check response times, sizes, and status codes
Key Metrics:
- Response time (TTFB, content download)
- Request/response size
- HTTP status codes
- Cache hit rates
- Connection establishment time
π½ I/O Profiling
Monitor disk and file system operations
// File I/O profiling
const fs = require('fs').promises;
const { performance } = require('perf_hooks');
async function profileFileOperation(operation, ...args) {
const startTime = performance.now();
try {
const result = await operation(...args);
const endTime = performance.now();
return {
operation: operation.name,
args: args.map(arg => typeof arg === 'string' && arg.length > 50 ?
arg.substring(0, 50) + '...' : arg),
duration: endTime - startTime,
success: true,
resultSize: result ? result.length : 0
};
} catch (error) {
const endTime = performance.now();
return {
operation: operation.name,
args: args,
duration: endTime - startTime,
success: false,
error: error.message
};
}
}
// Profile various file operations
async function profileFileOperations() {
const results = [];
// Create test file
const testFile = './test-file.txt';
const testData = 'x'.repeat(1024 * 1024); // 1MB of data
// Write operation
console.log('Profiling file write...');
let result = await profileFileOperation(fs.writeFile, testFile, testData);
results.push(result);
// Read operation
console.log('Profiling file read...');
result = await profileFileOperation(fs.readFile, testFile, 'utf8');
results.push(result);
// Stat operation
console.log('Profiling file stat...');
result = await profileFileOperation(fs.stat, testFile);
results.push(result);
// Delete operation
console.log('Profiling file delete...');
result = await profileFileOperation(fs.unlink, testFile);
results.push(result);
// Analyze results
console.log('\n=== File I/O Profiling Results ===');
results.forEach(result => {
console.log(`${result.operation}: ${result.duration.toFixed(2)}ms ${result.success ? 'β
' : 'β'}`);
});
const avgDuration = results.reduce((sum, r) => sum + r.duration, 0) / results.length;
console.log(`Average operation time: ${avgDuration.toFixed(2)}ms`);
return results;
}
// Database query profiling
async function profileDatabaseQuery(query, params = []) {
const startTime = performance.now();
try {
const result = await db.query(query, params);
const endTime = performance.now();
return {
query: query.substring(0, 100) + (query.length > 100 ? '...' : ''),
params: params,
duration: endTime - startTime,
rowCount: result.rowCount || result.length,
success: true
};
} catch (error) {
const endTime = performance.now();
return {
query: query.substring(0, 100),
params: params,
duration: endTime - startTime,
success: false,
error: error.message
};
}
}
// Usage
profileFileOperations();
// Example database profiling
async function profileDatabaseOperations() {
const operations = [
profileDatabaseQuery('SELECT * FROM users LIMIT 100'),
profileDatabaseQuery('SELECT COUNT(*) FROM orders'),
profileDatabaseQuery('INSERT INTO logs (message, level) VALUES (?, ?)', ['Test log', 'info'])
];
const results = await Promise.all(operations);
console.log('\n=== Database Profiling Results ===');
results.forEach(result => {
console.log(`${result.query}: ${result.duration.toFixed(2)}ms ${result.success ? 'β
' : 'β'}`);
});
}
Key Metrics:
- File read/write speeds
- Database query execution time
- I/O operation frequency
- Disk usage patterns
- Cache hit/miss ratios
Profiling Tools & Techniques
π οΈ Essential Profiling Tools
Choose the right tools for different profiling scenarios and environments.
π Chrome DevTools
Built-in browser performance analysis tools
// Chrome DevTools Performance API
// Measure function execution time
function measureExecutionTime(fn, ...args) {
const startMark = `start-${fn.name}`;
const endMark = `end-${fn.name}`;
performance.mark(startMark);
const result = fn(...args);
performance.mark(endMark);
performance.measure(`${fn.name}-execution`, startMark, endMark);
const measure = performance.getEntriesByName(`${fn.name}-execution`)[0];
console.log(`${fn.name} took ${measure.duration.toFixed(2)}ms`);
return result;
}
// User Timing API for custom measurements
function startTiming(name) {
performance.mark(`${name}-start`);
}
function endTiming(name) {
performance.mark(`${name}-end`);
performance.measure(name, `${name}-start`, `${name}-end`);
const measure = performance.getEntriesByName(name)[0];
console.log(`${name}: ${measure.duration.toFixed(2)}ms`);
}
// Usage
startTiming('data-processing');
// Simulate data processing
const data = Array.from({ length: 10000 }, (_, i) => i * 2);
const processed = data.map(x => x * x).filter(x => x > 1000);
endTiming('data-processing');
// Performance Observer for continuous monitoring
const observer = new PerformanceObserver((list) => {
for (const entry of list.getEntries()) {
if (entry.entryType === 'measure') {
console.log(`${entry.name}: ${entry.duration.toFixed(2)}ms`);
}
}
});
observer.observe({ entryTypes: ['measure'] });
Available Tabs:
- Performance: CPU, memory, and network profiling
- Memory: Heap snapshots and allocation timelines
- Network: Request timing and resource analysis
- Application: Storage, cache, and service worker analysis
- Lighthouse: Automated performance audits
π Python Profiling Tools
cProfile, line_profiler, and memory_profiler for Python applications
import cProfile
import pstats
from functools import wraps
import tracemalloc
import time
# Function decorator for profiling
def profile_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
profiler = cProfile.Profile()
profiler.enable()
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
profiler.disable()
execution_time = end_time - start_time
print(f"{func.__name__} executed in {execution_time:.4f} seconds")
# Print profiling stats
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats(10)
return result
return wrapper
# Memory profiling decorator
def profile_memory(func):
@wraps(func)
def wrapper(*args, **kwargs):
tracemalloc.start()
result = func(*args, **kwargs)
current, peak = tracemalloc.get_traced_memory()
print(f"Current memory usage: {current / 1024 / 1024:.2f} MB")
print(f"Peak memory usage: {peak / 1024 / 1024:.2f} MB")
tracemalloc.stop()
return result
return wrapper
# Usage example
@profile_function
@profile_memory
def process_data():
data = []
for i in range(100000):
data.append({
'id': i,
'value': i * 2,
'description': f'Item number {i}' * 10 # Create some memory usage
})
# Simulate processing
processed = [item for item in data if item['id'] % 2 == 0]
return processed
# Run profiled function
result = process_data();
# Line-by-line profiling with line_profiler
# Install: pip install line_profiler
# Usage: kernprof -l script.py
# Then: python -m line_profiler script.py.lprof
@profile # This decorator is from line_profiler
def slow_function():
total = 0
for i in range(1000000):
total += i ** 2
return total
# Memory profiling with memory_profiler
# Install: pip install memory_profiler
# Usage: mprof run script.py
# Then: mprof plot
from memory_profiler import profile
@profile
def memory_intensive_function():
big_list = []
for i in range(100000):
big_list.append('x' * 1000) # 1KB per item
return big_list
Key Tools:
- cProfile: Deterministic profiling of Python programs
- line_profiler: Line-by-line timing measurements
- memory_profiler: Memory usage monitoring
- tracemalloc: Trace memory allocations
- Py-Spy: Sampling profiler for production
π Application Performance Monitoring (APM)
New Relic, DataDog, and AppDynamics for production monitoring
// New Relic Node.js agent setup
const newrelic = require('newrelic');
// Custom transaction naming
app.get('/api/users/:id', (req, res) => {
newrelic.setTransactionName(`GET /api/users/:id`);
// Add custom attributes
newrelic.addCustomAttribute('userId', req.params.id);
newrelic.addCustomAttribute('userAgent', req.get('User-Agent'));
// Your route logic here
res.json({ id: req.params.id, name: 'John Doe' });
});
// Custom metrics
const metricName = 'Custom/User/Registration/Time';
newrelic.recordMetric(metricName, registrationTimeInMs);
// Custom events
newrelic.recordCustomEvent('UserAction', {
action: 'login',
userId: user.id,
timestamp: Date.now(),
success: true
});
// Error tracking
app.use((error, req, res, next) => {
// Record error in New Relic
newrelic.noticeError(error, {
userId: req.user?.id,
url: req.url,
method: req.method,
userAgent: req.get('User-Agent')
});
res.status(500).json({ error: 'Internal server error' });
});
// DataDog APM setup
const tracer = require('dd-trace').init({
service: 'my-application',
env: process.env.NODE_ENV,
version: '1.0.0'
});
// Custom spans
app.get('/api/complex-operation', async (req, res) => {
const span = tracer.scope().active();
span.setTag('operation', 'complex-calculation');
span.setTag('user.id', req.user?.id);
try {
// Database operation
const dbSpan = tracer.startSpan('database.query', {
childOf: span
});
const result = await db.query('SELECT * FROM complex_data');
dbSpan.finish();
// Processing operation
const processSpan = tracer.startSpan('data.processing', {
childOf: span
});
const processed = result.map(item => ({
...item,
processed: true
}));
processSpan.finish();
res.json(processed);
} catch (error) {
span.setTag('error', true);
span.setTag('error.message', error.message);
throw error;
} finally {
span.finish();
}
});
Popular APM Tools:
- New Relic: Comprehensive application monitoring
- DataDog: Cloud-native monitoring and analytics
- AppDynamics: Business application performance monitoring
- Dynatrace: AI-powered full-stack monitoring
- Application Insights: Azure-native APM
β¨ Profiling Best Practices
π― Profile in Production-Like Environments
- Use realistic data volumes and user loads
- Test with production hardware specifications
- Include network latency and external service calls
- Consider different geographic regions
- Use production database configurations
π Establish Baselines
- Measure performance before making changes
- Track key metrics over time
- Set acceptable performance thresholds
- Compare against industry benchmarks
- Document expected performance characteristics
π Iterative Optimization
- Identify the biggest bottlenecks first
- Make small, measurable changes
- Profile after each optimization
- Avoid premature optimization
- Balance performance with maintainability
π Continuous Monitoring
- Implement automated performance monitoring
- Set up alerts for performance degradation
- Regular performance audits
- Monitor third-party service performance
- Track user experience metrics
Performance Optimization Strategies
β‘ Common Performance Bottlenecks & Solutions
Identify and resolve the most common performance issues in web applications.
π N+1 Query Problem
Problem: Multiple database queries instead of optimized batch queries
// β Bad: N+1 queries
app.get('/api/posts-with-authors', async (req, res) => {
const posts = await Post.findAll();
// This executes N additional queries (one per post)
for (const post of posts) {
post.author = await User.findById(post.authorId);
}
res.json(posts);
});
// β
Good: Use joins or eager loading
app.get('/api/posts-with-authors', async (req, res) => {
// Single query with join
const posts = await Post.findAll({
include: [{
model: User,
as: 'author'
}]
});
res.json(posts);
});
// Alternative: Batch loading
app.get('/api/posts-with-authors', async (req, res) => {
const posts = await Post.findAll();
const authorIds = [...new Set(posts.map(p => p.authorId))];
// Single query for all authors
const authors = await User.findAll({
where: { id: authorIds }
});
// Create lookup map
const authorMap = authors.reduce((map, author) => {
map[author.id] = author;
return map;
}, {});
// Attach authors to posts
posts.forEach(post => {
post.author = authorMap[post.authorId];
});
res.json(posts);
});
πΎ Memory Leaks
Problem: Memory not being properly garbage collected
// β Memory leak: Event listeners not cleaned up
class DataManager {
constructor() {
this.listeners = [];
this.data = new Map();
}
addListener(callback) {
this.listeners.push(callback);
}
// Memory leak: listeners never removed
notifyListeners() {
this.listeners.forEach(callback => callback(this.data));
}
}
// β
Fixed: Proper cleanup
class DataManager {
constructor() {
this.listeners = new Set(); // Use Set for automatic deduplication
this.data = new Map();
}
addListener(callback) {
this.listeners.add(callback);
// Return cleanup function
return () => {
this.listeners.delete(callback);
};
}
notifyListeners() {
this.listeners.forEach(callback => callback(this.data));
}
destroy() {
this.listeners.clear();
this.data.clear();
}
}
// Usage with automatic cleanup
const manager = new DataManager();
const cleanup = manager.addListener((data) => {
console.log('Data updated:', data);
});
// Later, when component unmounts
cleanup();
// For React components
useEffect(() => {
const cleanup = manager.addListener(handleDataUpdate);
return () => {
cleanup(); // Cleanup on unmount
};
}, []);
π Inefficient Loops
Problem: Nested loops and inefficient iterations
// β Inefficient nested loops
function processData(data) {
const results = [];
for (let i = 0; i < data.length; i++) {
for (let j = 0; j < data.length; j++) {
if (data[i].id === data[j].relatedId) {
results.push({
item: data[i],
related: data[j]
});
}
}
}
return results;
}
// β
Optimized with hash maps
function processDataOptimized(data) {
const results = [];
const idMap = new Map();
// Create lookup map - O(n)
data.forEach(item => {
idMap.set(item.id, item);
});
// Single pass - O(n)
data.forEach(item => {
if (item.relatedId && idMap.has(item.relatedId)) {
results.push({
item: item,
related: idMap.get(item.relatedId)
});
}
});
return results;
}
// β Inefficient array operations
function filterAndTransform(arr) {
const filtered = [];
for (let i = 0; i < arr.length; i++) {
if (arr[i] > 10) {
filtered.push(arr[i] * 2);
}
}
return filtered;
}
// β
Use built-in array methods
function filterAndTransformOptimized(arr) {
return arr
.filter(item => item > 10)
.map(item => item * 2);
}
// For large arrays, consider lazy evaluation
function* lazyFilterAndTransform(arr) {
for (const item of arr) {
if (item > 10) {
yield item * 2;
}
}
}
// Usage
const largeArray = Array.from({ length: 1000000 }, (_, i) => i);
const result = [...lazyFilterAndTransform(largeArray)]; // Memory efficient
π Excessive API Calls
Problem: Too many HTTP requests or redundant API calls
// β Multiple API calls in a loop
async function getUserDetails(userIds) {
const users = [];
for (const userId of userIds) {
const response = await fetch(`/api/users/${userId}`);
const user = await response.json();
users.push(user);
}
return users;
}
// β
Batch API calls
async function getUserDetailsBatch(userIds) {
// Option 1: Single batch endpoint
const response = await fetch('/api/users/batch', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ ids: userIds })
});
return await response.json();
// Option 2: Parallel requests with Promise.all
const promises = userIds.map(userId =>
fetch(`/api/users/${userId}`).then(r => r.json())
);
return await Promise.all(promises);
}
// Request deduplication
class ApiCache {
constructor() {
this.cache = new Map();
this.pending = new Map();
}
async get(url) {
// Check cache first
if (this.cache.has(url)) {
return this.cache.get(url);
}
// Check if request is already pending
if (this.pending.has(url)) {
return this.pending.get(url);
}
// Make request
const promise = fetch(url).then(async response => {
const data = await response.json();
this.cache.set(url, data);
// Cache for 5 minutes
setTimeout(() => {
this.cache.delete(url);
}, 5 * 60 * 1000);
return data;
});
this.pending.set(url, promise);
try {
const result = await promise;
return result;
} finally {
this.pending.delete(url);
}
}
}
// Usage
const apiCache = new ApiCache();
// These will share the same request
const user1 = await apiCache.get('/api/users/1');
const user2 = await apiCache.get('/api/users/1'); // Cached
π¦ Large Bundle Sizes
Problem: JavaScript bundles are too large, slowing initial page loads
// Code splitting with dynamic imports
// Instead of:
// import { heavyFunction } from './heavy-module';
// Use:
const HeavyComponent = lazy(() =>
import('./HeavyComponent')
);
// Route-based code splitting
const routes = [
{
path: '/dashboard',
component: lazy(() => import('./Dashboard'))
},
{
path: '/reports',
component: lazy(() => import('./Reports'))
}
];
// Library chunking
// webpack.config.js
optimization: {
splitChunks: {
chunks: 'all',
cacheGroups: {
vendor: {
test: /[\/]node_modules[\/]/,
name: 'vendors',
chunks: 'all'
},
react: {
test: /[\/]node_modules[\/]react/,
name: 'react',
chunks: 'all'
}
}
}
}
// Tree shaking
// Only import what you need
// β import * as _ from 'lodash';
// β
import { debounce, throttle } from 'lodash';
// Dynamic imports for rarely used code
async function loadHeavyLibrary() {
if (typeof window !== 'undefined') {
const { Chart } = await import('chart.js');
return Chart;
}
}
π Advanced Optimization Techniques
Cutting-edge techniques for maximum performance gains.
β‘ WebAssembly for CPU-Intensive Tasks
// JavaScript fallback
function fibonacciJS(n) {
if (n <= 1) return n;
return fibonacciJS(n - 1) + fibonacciJS(n - 2);
}
// WebAssembly (compiled from C++)
// fibonacci.c
int fibonacci(int n) {
if (n <= 1) return n;
return fibonacci(n - 1) + fibonacci(n - 2);
}
// Compile to WebAssembly
// emcc fibonacci.c -o fibonacci.js -s EXPORTED_FUNCTIONS="['_fibonacci']"
// Usage in JavaScript
const wasmModule = await WebAssembly.instantiateStreaming(
fetch('fibonacci.wasm')
);
const fibonacciWasm = wasmModule.instance.exports.fibonacci;
// Much faster than JavaScript version
console.log(fibonacciWasm(40)); // Instant result
π§΅ Web Workers for Parallel Processing
// Main thread
const worker = new Worker('heavy-processing.js');
// Send data to worker
worker.postMessage({
type: 'PROCESS_DATA',
data: largeDataset
});
// Receive results from worker
worker.onmessage = function(e) {
const result = e.data;
console.log('Processing complete:', result);
};
// heavy-processing.js (Worker)
self.onmessage = function(e) {
const { type, data } = e.data;
if (type === 'PROCESS_DATA') {
// Heavy processing in separate thread
const result = processLargeDataset(data);
// Send result back to main thread
self.postMessage(result);
}
};
function processLargeDataset(data) {
// CPU-intensive operations here
return data.map(item => {
// Complex calculations
return item.value * Math.sqrt(item.value);
}).filter(value => value > 1000);
}
πΎ Service Workers for Caching
// Service worker for offline caching
// sw.js
const CACHE_NAME = 'v1';
const urlsToCache = [
'/',
'/styles/main.css',
'/scripts/main.js',
'/images/logo.png'
];
self.addEventListener('install', event => {
event.waitUntil(
caches.open(CACHE_NAME)
.then(cache => cache.addAll(urlsToCache))
);
});
self.addEventListener('fetch', event => {
event.respondWith(
caches.match(event.request)
.then(response => {
// Return cached version or fetch from network
return response || fetch(event.request);
})
);
});
// Cache strategies
const cacheStrategies = {
// Cache first, then network
cacheFirst: async (request) => {
const cached = await caches.match(request);
if (cached) return cached;
const response = await fetch(request);
if (response.ok) {
const cache = await caches.open(CACHE_NAME);
cache.put(request, response.clone());
}
return response;
},
// Network first, fallback to cache
networkFirst: async (request) => {
try {
const response = await fetch(request);
if (response.ok) {
const cache = await caches.open(CACHE_NAME);
cache.put(request, response.clone());
}
return response;
} catch (error) {
return caches.match(request);
}
},
// Stale while revalidate
staleWhileRevalidate: async (request) => {
const cache = await caches.open(CACHE_NAME);
const cached = await cache.match(request);
const fetchPromise = fetch(request).then(response => {
if (response.ok) {
cache.put(request, response.clone());
}
return response;
});
return cached || fetchPromise;
}
};
π Virtualization for Large Lists
// React virtualized list
import { FixedSizeList as List } from 'react-window';
function VirtualizedList({ items, itemHeight }) {
return (
<List
height={400}
itemCount={items.length}
itemSize={itemHeight}
width={300}
>
{({ index, style }) => (
<div style={style}>
{items[index].name}
</div>
)}
</List>
);
}
// Only renders visible items
const items = Array.from({ length: 10000 }, (_, i) => ({
id: i,
name: `Item ${i}`
}));
<VirtualizedList items={items} itemHeight={50} />
// Custom virtualization for non-React
class VirtualScroller {
constructor(container, items, itemHeight) {
this.container = container;
this.items = items;
this.itemHeight = itemHeight;
this.scrollTop = 0;
this.container.addEventListener('scroll', this.onScroll.bind(this));
this.render();
}
onScroll() {
this.scrollTop = this.container.scrollTop;
this.render();
}
render() {
const startIndex = Math.floor(this.scrollTop / this.itemHeight);
const endIndex = Math.min(
startIndex + Math.ceil(this.container.clientHeight / this.itemHeight),
this.items.length
);
this.container.innerHTML = '';
for (let i = startIndex; i < endIndex; i++) {
const item = this.items[i];
const div = document.createElement('div');
div.style.height = `${this.itemHeight}px`;
div.style.position = 'absolute';
div.style.top = `${i * this.itemHeight}px`;
div.textContent = item.name;
this.container.appendChild(div);
}
}
}
Panda Core Profiling Tools
Performance Profiling Tools Suite
π Browser Compatibility Checker
Check browser compatibility and feature support for modern web technologies across different browsers and versions for performance testing.
π AI Sitemap Generator
Generate comprehensive XML sitemaps with AI-powered prioritization for performance analysis and SEO optimization.
Panda Profiling Protocol
1. Performance Baseline
AI establishes performance baselines and identifies key metrics
2. Automated Profiling
Comprehensive profiling across CPU, memory, network, and I/O
3. Bottleneck Analysis
AI analyzes profiling data to identify performance bottlenecks
4. Optimization Recommendations
Generates specific optimization recommendations with priority ranking
5. Continuous Monitoring
Monitors performance improvements and alerts on regressions
Measuring Profiling Success
β‘ Performance Improvement
Quantifiable improvement in application response times and throughput
πΎ Resource Efficiency
Reduction in CPU, memory, and network resource utilization
π₯ User Experience
Enhanced user satisfaction through faster, more responsive applications
π§ Development Efficiency
Faster identification and resolution of performance issues