Performance Optimization - Câu hỏi phỏng vấn Senior Engineer
1. JVM Performance Tuning
Câu hỏi:
"Làm thế nào để tune JVM parameters cho một high-throughput application? Analyze và optimize garbage collection."
Câu trả lời:
Ví dụ JVM Tuning Strategy:
// 1. Performance Monitoring Service
@Service
public class PerformanceMonitoringService {
private final MeterRegistry meterRegistry;
private final MemoryMXBean memoryBean;
private final List<GarbageCollectorMXBean> gcBeans;
private final RuntimeMXBean runtimeBean;
public PerformanceMonitoringService(MeterRegistry meterRegistry) {
this.meterRegistry = meterRegistry;
this.memoryBean = ManagementFactory.getMemoryMXBean();
this.gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
this.runtimeBean = ManagementFactory.getRuntimeMXBean();
initializeMetrics();
}
private void initializeMetrics() {
// JVM Memory Metrics
Gauge.builder("jvm.memory.heap.used")
.register(meterRegistry, this, self -> self.memoryBean.getHeapMemoryUsage().getUsed());
Gauge.builder("jvm.memory.heap.max")
.register(meterRegistry, this, self -> self.memoryBean.getHeapMemoryUsage().getMax());
Gauge.builder("jvm.memory.nonheap.used")
.register(meterRegistry, this, self -> self.memoryBean.getNonHeapMemoryUsage().getUsed());
// GC Metrics
for (GarbageCollectorMXBean gcBean : gcBeans) {
Gauge.builder("jvm.gc.collection.count")
.tag("gc", gcBean.getName())
.register(meterRegistry, gcBean, GarbageCollectorMXBean::getCollectionCount);
Gauge.builder("jvm.gc.collection.time")
.tag("gc", gcBean.getName())
.register(meterRegistry, gcBean, GarbageCollectorMXBean::getCollectionTime);
}
// Thread Metrics
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
Gauge.builder("jvm.threads.live")
.register(meterRegistry, threadBean, ThreadMXBean::getThreadCount);
Gauge.builder("jvm.threads.daemon")
.register(meterRegistry, threadBean, ThreadMXBean::getDaemonThreadCount);
}
@Scheduled(fixedRate = 30000) // Every 30 seconds
public void recordPerformanceMetrics() {
recordMemoryMetrics();
recordGCMetrics();
recordThreadMetrics();
}
private void recordMemoryMetrics() {
MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage();
double heapUtilization = (double) heapUsage.getUsed() / heapUsage.getMax() * 100;
meterRegistry.gauge("jvm.memory.heap.utilization", heapUtilization);
// Alert if heap utilization is high
if (heapUtilization > 80) {
log.warn("High heap memory utilization: {}%", String.format("%.2f", heapUtilization));
}
}
private void recordGCMetrics() {
for (GarbageCollectorMXBean gcBean : gcBeans) {
long collectionCount = gcBean.getCollectionCount();
long collectionTime = gcBean.getCollectionTime();
if (collectionCount > 0) {
double avgGCTime = (double) collectionTime / collectionCount;
meterRegistry.gauge("jvm.gc.average.time",
Tags.of("gc", gcBean.getName()), avgGCTime);
}
}
}
private void recordThreadMetrics() {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
long[] deadlockedThreads = threadBean.findDeadlockedThreads();
if (deadlockedThreads != null) {
meterRegistry.counter("jvm.threads.deadlocked").increment(deadlockedThreads.length);
log.error("Detected {} deadlocked threads", deadlockedThreads.length);
}
}
public PerformanceReport generateReport() {
MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage();
MemoryUsage nonHeapUsage = memoryBean.getNonHeapMemoryUsage();
List<GCInfo> gcInfos = gcBeans.stream()
.map(bean -> new GCInfo(
bean.getName(),
bean.getCollectionCount(),
bean.getCollectionTime()
))
.collect(Collectors.toList());
return PerformanceReport.builder()
.uptime(runtimeBean.getUptime())
.heapUsed(heapUsage.getUsed())
.heapMax(heapUsage.getMax())
.nonHeapUsed(nonHeapUsage.getUsed())
.gcInfos(gcInfos)
.build();
}
}
// 2. Memory Pool Analysis
@Component
public class MemoryPoolAnalyzer {
private final List<MemoryPoolMXBean> memoryPools;
public MemoryPoolAnalyzer() {
this.memoryPools = ManagementFactory.getMemoryPoolMXBeans();
}
public Map<String, MemoryPoolInfo> analyzeMemoryPools() {
return memoryPools.stream()
.collect(Collectors.toMap(
MemoryPoolMXBean::getName,
this::createMemoryPoolInfo
));
}
private MemoryPoolInfo createMemoryPoolInfo(MemoryPoolMXBean pool) {
MemoryUsage usage = pool.getUsage();
MemoryUsage peakUsage = pool.getPeakUsage();
return MemoryPoolInfo.builder()
.name(pool.getName())
.type(pool.getType())
.used(usage.getUsed())
.committed(usage.getCommitted())
.max(usage.getMax())
.peakUsed(peakUsage.getUsed())
.utilizationPercent(calculateUtilization(usage))
.build();
}
private double calculateUtilization(MemoryUsage usage) {
if (usage.getMax() == -1) {
return (double) usage.getUsed() / usage.getCommitted() * 100;
}
return (double) usage.getUsed() / usage.getMax() * 100;
}
public List<String> getOptimizationRecommendations() {
List<String> recommendations = new ArrayList<>();
Map<String, MemoryPoolInfo> pools = analyzeMemoryPools();
// Eden Space analysis
MemoryPoolInfo edenSpace = pools.get("G1 Eden Space");
if (edenSpace != null && edenSpace.getUtilizationPercent() > 90) {
recommendations.add("Consider increasing -Xmn (young generation size)");
}
// Old Generation analysis
MemoryPoolInfo oldGen = pools.get("G1 Old Gen");
if (oldGen != null && oldGen.getUtilizationPercent() > 80) {
recommendations.add("Consider increasing heap size or optimizing object lifecycle");
}
// Metaspace analysis
MemoryPoolInfo metaspace = pools.get("Metaspace");
if (metaspace != null && metaspace.getUtilizationPercent() > 90) {
recommendations.add("Consider increasing -XX:MetaspaceSize");
}
return recommendations;
}
}
// 3. JVM Arguments Configuration
@ConfigurationProperties(prefix = "app.jvm")
@Data
public class JVMConfiguration {
public static class G1GCConfig {
public static List<String> getOptimalArgs(String profile) {
List<String> args = new ArrayList<>();
switch (profile.toLowerCase()) {
case "high-throughput":
args.addAll(Arrays.asList(
// Heap Settings
"-Xms4g",
"-Xmx8g",
// G1GC Settings
"-XX:+UseG1GC",
"-XX:MaxGCPauseMillis=100",
"-XX:G1HeapRegionSize=16m",
"-XX:G1NewSizePercent=30",
"-XX:G1MaxNewSizePercent=40",
"-XX:G1MixedGCCountTarget=8",
"-XX:G1MixedGCLiveThresholdPercent=85",
// GC Logging
"-XX:+UseStringDeduplication",
"-XX:+UnlockExperimentalVMOptions",
"-XX:G1UseAdaptiveIHOP=true"
));
break;
case "low-latency":
args.addAll(Arrays.asList(
// Heap Settings
"-Xms2g",
"-Xmx4g",
// G1GC Low Latency
"-XX:+UseG1GC",
"-XX:MaxGCPauseMillis=50",
"-XX:G1HeapRegionSize=8m",
"-XX:G1NewSizePercent=40",
"-XX:G1MaxNewSizePercent=50",
// Reduce allocation rate
"-XX:+UseLargePages",
"-XX:+AlwaysPreTouch"
));
break;
case "memory-constrained":
args.addAll(Arrays.asList(
// Smaller heap
"-Xms512m",
"-Xmx1g",
// Optimize for memory
"-XX:+UseG1GC",
"-XX:MaxGCPauseMillis=200",
"-XX:+UseStringDeduplication",
"-XX:+UseCompressedOops",
"-XX:+UseCompressedClassPointers"
));
break;
}
// Common production settings
args.addAll(Arrays.asList(
// GC Logging (Java 11+)
"-Xlog:gc:gc.log:time,tags",
"-XX:+UseContainerSupport",
// JIT Compiler
"-XX:+TieredCompilation",
"-XX:TieredStopAtLevel=4",
// Error Handling
"-XX:+HeapDumpOnOutOfMemoryError",
"-XX:HeapDumpPath=/tmp/heapdump.hprof",
"-XX:+ExitOnOutOfMemoryError"
));
return args;
}
}
}
JVM Tuning Best Practices:
# High Throughput Application
java -Xms4g -Xmx8g \
-XX:+UseG1GC \
-XX:MaxGCPauseMillis=100 \
-XX:G1HeapRegionSize=16m \
-XX:+UseStringDeduplication \
-XX:+HeapDumpOnOutOfMemoryError \
-Xlog:gc:gc.log:time,tags \
-jar app.jar
# Low Latency Application
java -Xms2g -Xmx4g \
-XX:+UseG1GC \
-XX:MaxGCPauseMillis=50 \
-XX:+UseLargePages \
-XX:+AlwaysPreTouch \
-XX:+UnlockExperimentalVMOptions \
-XX:+UseTransparentHugePages \
-jar app.jar
# Memory Constrained Environment
java -Xms512m -Xmx1g \
-XX:+UseG1GC \
-XX:+UseCompressedOops \
-XX:+UseStringDeduplication \
-XX:MaxMetaspaceSize=256m \
-jar app.jar
2. Database Query Optimization
Câu hỏi:
"Optimize slow database queries và implement effective caching strategies. Làm thế nào để handle database connection pooling?"
Câu trả lời:
Ví dụ Database Optimization:
// 1. Query Performance Monitoring
@Component
public class QueryPerformanceInterceptor implements Interceptor {
private final MeterRegistry meterRegistry;
private final Timer queryTimer;
private final Counter slowQueryCounter;
public QueryPerformanceInterceptor(MeterRegistry meterRegistry) {
this.meterRegistry = meterRegistry;
this.queryTimer = Timer.builder("database.query.duration")
.register(meterRegistry);
this.slowQueryCounter = Counter.builder("database.query.slow")
.register(meterRegistry);
}
@Override
public boolean onLoad(Object entity, Serializable id, Object[] state, String[] propertyNames, Type[] types) {
return false;
}
@Override
public boolean onSave(Object entity, Serializable id, Object[] state, String[] propertyNames, Type[] types) {
return false;
}
@Override
public void onDelete(Object entity, Serializable id, Object[] state, String[] propertyNames, Type[] types) {
}
@Override
public boolean onFlushDirty(Object entity, Serializable id, Object[] currentState, Object[] previousState, String[] propertyNames, Type[] types) {
return false;
}
@Override
public Boolean isTransient(Object entity) {
return null;
}
@Override
public Object instantiate(String entityName, EntityMode entityMode, Serializable id) {
return null;
}
@Override
public String getEntityName(Object object) {
return null;
}
@Override
public Object getEntity(String entityName, Serializable id) {
return null;
}
@Override
public void afterTransactionBegin(Transaction tx) {
}
@Override
public void beforeTransactionCompletion(Transaction tx) {
}
@Override
public void afterTransactionCompletion(Transaction tx) {
}
@Override
public String onPrepareStatement(String sql) {
long startTime = System.nanoTime();
return queryTimer.recordCallable(() -> {
// Log slow queries
long duration = System.nanoTime() - startTime;
if (duration > Duration.ofSeconds(1).toNanos()) {
slowQueryCounter.increment();
log.warn("Slow query detected: {} ({}ms)", sql, duration / 1_000_000);
}
return sql;
});
}
}
// 2. Optimized Repository Implementation
@Repository
public class OptimizedUserRepository {
@PersistenceContext
private EntityManager entityManager;
@Autowired
private RedisTemplate<String, Object> redisTemplate;
// Efficient pagination với cursor-based approach
public Page<User> findUsersWithCursor(String cursor, int size) {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<User> query = cb.createQuery(User.class);
Root<User> root = query.from(User.class);
List<Predicate> predicates = new ArrayList<>();
if (cursor != null) {
// Decode cursor để get last ID
Long lastId = decodeCursor(cursor);
predicates.add(cb.greaterThan(root.get("id"), lastId));
}
query.where(predicates.toArray(new Predicate[0]));
query.orderBy(cb.asc(root.get("id")));
TypedQuery<User> typedQuery = entityManager.createQuery(query);
typedQuery.setMaxResults(size + 1); // +1 để check if has next
List<User> users = typedQuery.getResultList();
boolean hasNext = users.size() > size;
if (hasNext) {
users = users.subList(0, size);
}
String nextCursor = hasNext ? encodeCursor(users.get(users.size() - 1).getId()) : null;
return new CursorBasedPage<>(users, nextCursor, hasNext);
}
// Batch loading để avoid N+1 problem
@Query("SELECT DISTINCT u FROM User u " +
"LEFT JOIN FETCH u.profile " +
"LEFT JOIN FETCH u.roles " +
"WHERE u.id IN :ids")
List<User> findByIdsWithAssociations(@Param("ids") List<Long> ids);
// Projection queries cho better performance
@Query("SELECT new com.company.dto.UserSummaryDto(u.id, u.name, u.email, p.avatar) " +
"FROM User u LEFT JOIN u.profile p " +
"WHERE u.active = true")
List<UserSummaryDto> findActiveUserSummaries();
// Native query cho complex aggregations
@Query(value = """
SELECT
DATE_TRUNC('day', u.created_at) as date,
COUNT(*) as user_count,
COUNT(CASE WHEN u.email_verified = true THEN 1 END) as verified_count
FROM users u
WHERE u.created_at >= :startDate
GROUP BY DATE_TRUNC('day', u.created_at)
ORDER BY date
""", nativeQuery = true)
List<Object[]> getUserRegistrationStats(@Param("startDate") LocalDateTime startDate);
// Cached queries
@Cacheable(value = "users", key = "#id")
public User findByIdCached(Long id) {
return entityManager.find(User.class, id);
}
@CacheEvict(value = "users", key = "#user.id")
public User save(User user) {
return entityManager.merge(user);
}
// Bulk operations cho performance
@Modifying
@Query("UPDATE User u SET u.lastLoginAt = :loginTime WHERE u.id IN :userIds")
int updateLastLoginBulk(@Param("userIds") List<Long> userIds,
@Param("loginTime") LocalDateTime loginTime);
@Modifying
@Query("DELETE FROM UserSession s WHERE s.expiresAt < :now")
int deleteExpiredSessions(@Param("now") LocalDateTime now);
private String encodeCursor(Long id) {
return Base64.getEncoder().encodeToString(id.toString().getBytes());
}
private Long decodeCursor(String cursor) {
byte[] decoded = Base64.getDecoder().decode(cursor);
return Long.parseLong(new String(decoded));
}
}
// 3. Multi-level Caching Strategy
@Service
public class CachingService {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private CacheManager l1CacheManager; // Local cache (Caffeine)
@Autowired
private UserRepository userRepository;
// L1 Cache (Local) + L2 Cache (Redis)
public User getUserWithMultiLevelCache(Long userId) {
String cacheKey = "user:" + userId;
// Try L1 cache first
Cache l1Cache = l1CacheManager.getCache("users");
if (l1Cache != null) {
Cache.ValueWrapper cached = l1Cache.get(userId);
if (cached != null) {
return (User) cached.get();
}
}
// Try L2 cache (Redis)
User user = (User) redisTemplate.opsForValue().get(cacheKey);
if (user != null) {
// Populate L1 cache
if (l1Cache != null) {
l1Cache.put(userId, user);
}
return user;
}
// Database fallback
user = userRepository.findById(userId).orElse(null);
if (user != null) {
// Populate both caches
redisTemplate.opsForValue().set(cacheKey, user, Duration.ofHours(1));
if (l1Cache != null) {
l1Cache.put(userId, user);
}
}
return user;
}
// Cache warming strategy
@EventListener
@Async
public void warmupCacheOnStartup(ApplicationReadyEvent event) {
log.info("Starting cache warmup...");
// Load frequently accessed users
List<Long> frequentUserIds = userRepository.findFrequentlyAccessedUserIds();
frequentUserIds.parallelStream()
.forEach(this::getUserWithMultiLevelCache);
log.info("Cache warmup completed for {} users", frequentUserIds.size());
}
// Cache invalidation patterns
@CacheEvict(value = {"users", "userProfiles"}, key = "#userId")
public void invalidateUserCache(Long userId) {
String cacheKey = "user:" + userId;
redisTemplate.delete(cacheKey);
// Invalidate related caches
redisTemplate.delete("user:profile:" + userId);
redisTemplate.delete("user:permissions:" + userId);
}
// Distributed cache lock để prevent cache stampede
public User getUserWithLock(Long userId) {
String lockKey = "lock:user:" + userId;
String cacheKey = "user:" + userId;
// Try to get from cache first
User user = (User) redisTemplate.opsForValue().get(cacheKey);
if (user != null) {
return user;
}
// Try to acquire lock
Boolean lockAcquired = redisTemplate.opsForValue()
.setIfAbsent(lockKey, "locked", Duration.ofSeconds(30));
if (Boolean.TRUE.equals(lockAcquired)) {
try {
// Double-check cache after acquiring lock
user = (User) redisTemplate.opsForValue().get(cacheKey);
if (user != null) {
return user;
}
// Load from database
user = userRepository.findById(userId).orElse(null);
if (user != null) {
redisTemplate.opsForValue().set(cacheKey, user, Duration.ofHours(1));
}
return user;
} finally {
redisTemplate.delete(lockKey);
}
} else {
// Wait for other thread to load data
try {
Thread.sleep(100);
return getUserWithLock(userId); // Recursive call
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return userRepository.findById(userId).orElse(null);
}
}
}
}
// 4. Connection Pool Configuration
@Configuration
public class DatabaseConfig {
@Bean
@Primary
@ConfigurationProperties("app.datasource.primary")
public DataSourceProperties primaryDataSourceProperties() {
return new DataSourceProperties();
}
@Bean
@Primary
public DataSource primaryDataSource() {
HikariConfig config = new HikariConfig();
// Connection Pool Settings
config.setJdbcUrl(primaryDataSourceProperties().getUrl());
config.setUsername(primaryDataSourceProperties().getUsername());
config.setPassword(primaryDataSourceProperties().getPassword());
config.setDriverClassName(primaryDataSourceProperties().getDriverClassName());
// Pool Size Configuration
config.setMinimumIdle(5); // Minimum connections
config.setMaximumPoolSize(20); // Maximum connections
config.setConnectionTimeout(30000); // 30 seconds
config.setIdleTimeout(600000); // 10 minutes
config.setMaxLifetime(1800000); // 30 minutes
config.setLeakDetectionThreshold(60000); // 1 minute
// Performance Settings
config.addDataSourceProperty("cachePrepStmts", "true");
config.addDataSourceProperty("prepStmtCacheSize", "250");
config.addDataSourceProperty("prepStmtCacheSqlLimit", "2048");
config.addDataSourceProperty("useServerPrepStmts", "true");
config.addDataSourceProperty("rewriteBatchedStatements", "true");
config.addDataSourceProperty("cacheResultSetMetadata", "true");
config.addDataSourceProperty("cacheServerConfiguration", "true");
config.addDataSourceProperty("elideSetAutoCommits", "true");
config.addDataSourceProperty("maintainTimeStats", "false");
// Connection Validation
config.setConnectionTestQuery("SELECT 1");
config.setValidationTimeout(5000);
return new HikariDataSource(config);
}
// Read-only datasource cho reporting
@Bean
@ConfigurationProperties("app.datasource.readonly")
public DataSource readOnlyDataSource() {
HikariConfig config = new HikariConfig();
// Optimized for read operations
config.setMinimumIdle(2);
config.setMaximumPoolSize(10);
config.setReadOnly(true);
return new HikariDataSource(config);
}
}