Files
go-jdenticon/cmd/jdenticon/batch_bench_test.go
Kevin McIntyre d9e84812ff Initial release: Go Jdenticon library v0.1.0
- Core library with SVG and PNG generation
- CLI tool with generate and batch commands
- Cross-platform path handling for Windows compatibility
- Comprehensive test suite with integration tests
2026-01-03 23:41:48 -05:00

241 lines
6.3 KiB
Go

package main
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/ungluedlabs/go-jdenticon/jdenticon"
)
// benchmarkSizes defines different test scenarios for batch processing
var benchmarkSizes = []struct {
name string
count int
}{
{"Small", 50},
{"Medium", 200},
{"Large", 1000},
}
// BenchmarkBatchProcessing_Sequential benchmarks sequential processing (concurrency=1)
func BenchmarkBatchProcessing_Sequential(b *testing.B) {
for _, size := range benchmarkSizes {
b.Run(size.name, func(b *testing.B) {
benchmarkBatchWithConcurrency(b, size.count, 1)
})
}
}
// BenchmarkBatchProcessing_Concurrent benchmarks concurrent processing with different worker counts
func BenchmarkBatchProcessing_Concurrent(b *testing.B) {
concurrencyLevels := []int{2, 4, runtime.NumCPU(), runtime.NumCPU() * 2}
for _, size := range benchmarkSizes {
for _, concurrency := range concurrencyLevels {
b.Run(fmt.Sprintf("%s_Workers%d", size.name, concurrency), func(b *testing.B) {
benchmarkBatchWithConcurrency(b, size.count, concurrency)
})
}
}
}
// benchmarkBatchWithConcurrency runs a benchmark with specific parameters
func benchmarkBatchWithConcurrency(b *testing.B, iconCount, concurrency int) {
// Create temporary directory for test
tempDir := b.TempDir()
inputFile := filepath.Join(tempDir, "test-input.txt")
outputDir := filepath.Join(tempDir, "output")
// Generate test input file
createTestInputFile(b, inputFile, iconCount)
// Create generator for testing with complexity limits disabled for consistent benchmarks
config, err := jdenticon.Configure(jdenticon.WithMaxComplexity(-1))
if err != nil {
b.Fatalf("Failed to create config: %v", err)
}
generator, err := jdenticon.NewGeneratorWithConfig(config, concurrency*100)
if err != nil {
b.Fatalf("Failed to create generator: %v", err)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
// Clean and recreate output directory for each iteration
os.RemoveAll(outputDir)
if err := os.MkdirAll(outputDir, 0755); err != nil {
b.Fatalf("Failed to create output directory: %v", err)
}
// Measure processing time
start := time.Now()
// Execute batch processing
jobs, total, err := prepareJobs(inputFile, outputDir, FormatSVG, 64)
if err != nil {
b.Fatalf("Failed to prepare jobs: %v", err)
}
if total != iconCount {
b.Fatalf("Expected %d jobs, got %d", iconCount, total)
}
// Process jobs (simplified version without progress bar for benchmarking)
stats := processBenchmarkJobs(jobs, generator, FormatSVG, concurrency)
duration := time.Since(start)
// Verify all jobs completed successfully
processed := atomic.LoadInt64(&stats.processed)
failed := atomic.LoadInt64(&stats.failed)
if processed != int64(iconCount) {
b.Fatalf("Expected %d processed, got %d", iconCount, processed)
}
if failed > 0 {
b.Fatalf("Expected 0 failures, got %d", failed)
}
// Report custom metrics
b.ReportMetric(float64(iconCount)/duration.Seconds(), "icons/sec")
b.ReportMetric(float64(concurrency), "workers")
}
}
// processBenchmarkJobs executes jobs for benchmarking (without context cancellation)
func processBenchmarkJobs(jobs []batchJob, generator *jdenticon.Generator, format FormatFlag, concurrency int) *batchStats {
stats := &batchStats{}
jobChan := make(chan batchJob, len(jobs))
// Start workers
done := make(chan struct{})
for i := 0; i < concurrency; i++ {
go func() {
defer func() { done <- struct{}{} }()
for job := range jobChan {
if err := processJob(context.Background(), job, generator, format); err != nil {
atomic.AddInt64(&stats.failed, 1)
} else {
atomic.AddInt64(&stats.processed, 1)
}
}
}()
}
// Send jobs
go func() {
defer close(jobChan)
for _, job := range jobs {
jobChan <- job
}
}()
// Wait for completion
for i := 0; i < concurrency; i++ {
<-done
}
return stats
}
// createTestInputFile generates a test input file with specified number of entries
func createTestInputFile(b *testing.B, filename string, count int) {
file, err := os.Create(filename)
if err != nil {
b.Fatalf("Failed to create test input file: %v", err)
}
defer file.Close()
var builder strings.Builder
for i := 0; i < count; i++ {
builder.WriteString(fmt.Sprintf("user%d@example.com\n", i))
}
if _, err := file.WriteString(builder.String()); err != nil {
b.Fatalf("Failed to write test input file: %v", err)
}
}
// BenchmarkJobPreparation benchmarks the job preparation phase
func BenchmarkJobPreparation(b *testing.B) {
for _, size := range benchmarkSizes {
b.Run(size.name, func(b *testing.B) {
tempDir := b.TempDir()
inputFile := filepath.Join(tempDir, "test-input.txt")
outputDir := filepath.Join(tempDir, "output")
createTestInputFile(b, inputFile, size.count)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
jobs, total, err := prepareJobs(inputFile, outputDir, FormatSVG, 64)
if err != nil {
b.Fatalf("Failed to prepare jobs: %v", err)
}
if total != size.count {
b.Fatalf("Expected %d jobs, got %d", size.count, total)
}
// Prevent compiler optimization
_ = jobs
}
})
}
}
// BenchmarkSingleJobProcessing benchmarks individual job processing
func BenchmarkSingleJobProcessing(b *testing.B) {
tempDir := b.TempDir()
config, err := jdenticon.Configure(jdenticon.WithMaxComplexity(-1))
if err != nil {
b.Fatalf("Failed to create config: %v", err)
}
generator, err := jdenticon.NewGeneratorWithConfig(config, 100)
if err != nil {
b.Fatalf("Failed to create generator: %v", err)
}
job := batchJob{
value: "test@example.com",
outputPath: filepath.Join(tempDir, "test.svg"),
size: 64,
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if err := processJob(context.Background(), job, generator, FormatSVG); err != nil {
b.Fatalf("Failed to process job: %v", err)
}
// Clean up for next iteration
os.Remove(job.outputPath)
}
}
// BenchmarkConcurrencyScaling analyzes how performance scales with worker count
func BenchmarkConcurrencyScaling(b *testing.B) {
const iconCount = 500
maxWorkers := runtime.NumCPU() * 2
for workers := 1; workers <= maxWorkers; workers *= 2 {
b.Run(fmt.Sprintf("Workers%d", workers), func(b *testing.B) {
benchmarkBatchWithConcurrency(b, iconCount, workers)
})
}
}