Files
go-jdenticon/internal/perfsuite/suite.go
Kevin McIntyre f1544ef49c
Some checks failed
CI / Test (Go 1.24.x, ubuntu-latest) (push) Successful in 1m53s
CI / Code Quality (push) Failing after 26s
CI / Security Scan (push) Failing after 11s
CI / Test Coverage (push) Successful in 1m13s
CI / Benchmarks (push) Failing after 10m22s
CI / Build CLI (push) Failing after 8s
Benchmarks / Run Benchmarks (push) Failing after 10m13s
Release / Test (push) Successful in 55s
Release / Build (amd64, darwin, ) (push) Failing after 12s
Release / Build (amd64, linux, ) (push) Failing after 6s
Release / Build (amd64, windows, .exe) (push) Failing after 12s
Release / Build (arm64, darwin, ) (push) Failing after 12s
Release / Build (arm64, linux, ) (push) Failing after 12s
Release / Release (push) Has been skipped
CI / Test (Go 1.24.x, macos-latest) (push) Has been cancelled
CI / Test (Go 1.24.x, windows-latest) (push) Has been cancelled
chore: update module path to gitea.dockr.co/kev/go-jdenticon
Move hosting from GitHub to private Gitea instance.
2026-02-10 10:07:57 -05:00

470 lines
12 KiB
Go

package perfsuite
import (
"context"
"encoding/json"
"fmt"
"os"
"runtime"
"strings"
"testing"
"time"
"gitea.dockr.co/kev/go-jdenticon/jdenticon"
)
// PerformanceBenchmark represents a single performance test case
type PerformanceBenchmark struct {
Name string
BenchmarkFunc func(*testing.B)
RegressionLimit float64 // Percentage threshold for regression detection
Description string
}
// PerformanceMetrics holds performance metrics for comparison
type PerformanceMetrics struct {
NsPerOp int64 `json:"ns_per_op"`
AllocsPerOp int64 `json:"allocs_per_op"`
BytesPerOp int64 `json:"bytes_per_op"`
Timestamp time.Time `json:"timestamp"`
GoVersion string `json:"go_version"`
OS string `json:"os"`
Arch string `json:"arch"`
}
// RegressionReport holds the results of a regression check
type RegressionReport struct {
Summary string `json:"summary"`
Failures []string `json:"failures"`
Passed int `json:"passed"`
Total int `json:"total"`
Results map[string]string `json:"results"`
}
// PerformanceSuite manages the performance regression test suite
type PerformanceSuite struct {
Benchmarks []PerformanceBenchmark
BaselineFile string
ReportFile string
EnableReports bool
FailOnRegress bool
}
// NewPerformanceSuite creates a new performance regression test suite
func NewPerformanceSuite() *PerformanceSuite {
return &PerformanceSuite{
Benchmarks: []PerformanceBenchmark{
{
Name: "CoreSVGGeneration",
BenchmarkFunc: benchmarkCoreSVGGeneration,
RegressionLimit: 15.0,
Description: "Core SVG generation performance",
},
{
Name: "CorePNGGeneration",
BenchmarkFunc: benchmarkCorePNGGeneration,
RegressionLimit: 25.0,
Description: "Core PNG generation performance",
},
{
Name: "CachedGeneration",
BenchmarkFunc: benchmarkCachedGeneration,
RegressionLimit: 10.0,
Description: "Cached icon generation performance",
},
{
Name: "BatchProcessing",
BenchmarkFunc: benchmarkBatchProcessing,
RegressionLimit: 20.0,
Description: "Batch icon generation performance",
},
{
Name: "LargeIcon256",
BenchmarkFunc: benchmarkLargeIcon256,
RegressionLimit: 30.0,
Description: "Large icon (256px) generation performance",
},
{
Name: "LargeIcon512",
BenchmarkFunc: benchmarkLargeIcon512,
RegressionLimit: 30.0,
Description: "Large icon (512px) generation performance",
},
{
Name: "ColorVariationSaturation",
BenchmarkFunc: benchmarkColorVariationSaturation,
RegressionLimit: 15.0,
Description: "Color saturation variation performance",
},
{
Name: "ColorVariationPadding",
BenchmarkFunc: benchmarkColorVariationPadding,
RegressionLimit: 15.0,
Description: "Padding variation performance",
},
},
BaselineFile: ".performance_baselines.json",
ReportFile: "performance_report.json",
EnableReports: true,
FailOnRegress: true,
}
}
// Individual benchmark functions
func benchmarkCoreSVGGeneration(b *testing.B) {
testCases := []string{
"test@example.com",
"user123",
"performance-test",
"unicode-üser",
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
input := testCases[i%len(testCases)]
_, err := jdenticon.ToSVG(context.Background(), input, 64)
if err != nil {
b.Fatalf("SVG generation failed: %v", err)
}
}
}
func benchmarkCorePNGGeneration(b *testing.B) {
testCases := []string{
"test@example.com",
"user123",
"performance-test",
"unicode-üser",
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
input := testCases[i%len(testCases)]
_, err := jdenticon.ToPNG(context.Background(), input, 64)
if err != nil {
b.Fatalf("PNG generation failed: %v", err)
}
}
}
func benchmarkCachedGeneration(b *testing.B) {
generator, err := jdenticon.NewGeneratorWithConfig(jdenticon.DefaultConfig(), 100)
if err != nil {
b.Fatalf("NewGenerator failed: %v", err)
}
input := "cached-performance-test"
// Warm up cache
icon, err := generator.Generate(context.Background(), input, 64)
if err != nil {
b.Fatalf("Cache warmup failed: %v", err)
}
_, _ = icon.ToSVG()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
icon, err := generator.Generate(context.Background(), input, 64)
if err != nil {
b.Fatalf("Cached generation failed: %v", err)
}
_, err = icon.ToSVG()
if err != nil {
b.Fatalf("Cached SVG failed: %v", err)
}
}
}
func benchmarkBatchProcessing(b *testing.B) {
inputs := []string{
"batch1@test.com", "batch2@test.com", "batch3@test.com",
"batch4@test.com", "batch5@test.com",
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for _, input := range inputs {
_, err := jdenticon.ToSVG(context.Background(), input, 64)
if err != nil {
b.Fatalf("Batch processing failed: %v", err)
}
}
}
}
func benchmarkLargeIcon256(b *testing.B) {
input := "large-icon-test-256"
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := jdenticon.ToSVG(context.Background(), input, 256)
if err != nil {
b.Fatalf("Large icon (256px) generation failed: %v", err)
}
}
}
func benchmarkLargeIcon512(b *testing.B) {
input := "large-icon-test-512"
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := jdenticon.ToSVG(context.Background(), input, 512)
if err != nil {
b.Fatalf("Large icon (512px) generation failed: %v", err)
}
}
}
func benchmarkColorVariationSaturation(b *testing.B) {
config := jdenticon.DefaultConfig()
config.ColorSaturation = 0.9
input := "color-saturation-test"
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := jdenticon.ToSVGWithConfig(context.Background(), input, 64, config)
if err != nil {
b.Fatalf("Color saturation variation failed: %v", err)
}
}
}
func benchmarkColorVariationPadding(b *testing.B) {
config := jdenticon.DefaultConfig()
config.Padding = 0.15
input := "color-padding-test"
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := jdenticon.ToSVGWithConfig(context.Background(), input, 64, config)
if err != nil {
b.Fatalf("Padding variation failed: %v", err)
}
}
}
// calculateChange calculates percentage change between old and new values
func calculateChange(oldVal, newVal int64) float64 {
if oldVal == 0 {
if newVal == 0 {
return 0
}
return 100.0
}
return (float64(newVal-oldVal) / float64(oldVal)) * 100.0
}
// RunBenchmark executes a benchmark and returns metrics
func (ps *PerformanceSuite) RunBenchmark(bench PerformanceBenchmark) (PerformanceMetrics, error) {
result := testing.Benchmark(bench.BenchmarkFunc)
if result.N == 0 {
return PerformanceMetrics{}, fmt.Errorf("benchmark %s failed to run", bench.Name)
}
return PerformanceMetrics{
NsPerOp: result.NsPerOp(),
AllocsPerOp: result.AllocsPerOp(),
BytesPerOp: result.AllocedBytesPerOp(),
Timestamp: time.Now(),
GoVersion: runtime.Version(),
OS: runtime.GOOS,
Arch: runtime.GOARCH,
}, nil
}
// LoadBaselines loads performance baselines from file
func (ps *PerformanceSuite) LoadBaselines() (map[string]PerformanceMetrics, error) {
baselines := make(map[string]PerformanceMetrics)
if _, err := os.Stat(ps.BaselineFile); os.IsNotExist(err) {
return baselines, nil
}
data, err := os.ReadFile(ps.BaselineFile)
if err != nil {
return nil, fmt.Errorf("failed to read baselines: %w", err)
}
if err := json.Unmarshal(data, &baselines); err != nil {
return nil, fmt.Errorf("failed to parse baselines: %w", err)
}
return baselines, nil
}
// SaveBaselines saves performance baselines to file
func (ps *PerformanceSuite) SaveBaselines(baselines map[string]PerformanceMetrics) error {
data, err := json.MarshalIndent(baselines, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal baselines: %w", err)
}
// #nosec G306 -- 0644 is appropriate for benchmark data files
return os.WriteFile(ps.BaselineFile, data, 0644)
}
// EstablishBaselines runs all benchmarks and saves them as baselines
func (ps *PerformanceSuite) EstablishBaselines() error {
fmt.Println("🔥 Establishing performance baselines...")
baselines := make(map[string]PerformanceMetrics)
for _, bench := range ps.Benchmarks {
fmt.Printf(" Running %s...", bench.Name)
metrics, err := ps.RunBenchmark(bench)
if err != nil {
return fmt.Errorf("failed to run benchmark %s: %w", bench.Name, err)
}
baselines[bench.Name] = metrics
fmt.Printf(" ✓ %d ns/op, %d allocs/op\n", metrics.NsPerOp, metrics.AllocsPerOp)
}
if err := ps.SaveBaselines(baselines); err != nil {
return fmt.Errorf("failed to save baselines: %w", err)
}
fmt.Printf("✅ Baselines established (%d benchmarks saved to %s)\n", len(baselines), ps.BaselineFile)
return nil
}
// CheckForRegressions runs benchmarks and compares against baselines
func (ps *PerformanceSuite) CheckForRegressions() error {
fmt.Println("🔍 Checking for performance regressions...")
baselines, err := ps.LoadBaselines()
if err != nil {
return fmt.Errorf("failed to load baselines: %w", err)
}
if len(baselines) == 0 {
return fmt.Errorf("no baselines found - run EstablishBaselines() first")
}
var failures []string
passed := 0
total := 0
results := make(map[string]string)
for _, bench := range ps.Benchmarks {
baseline, exists := baselines[bench.Name]
if !exists {
fmt.Printf("⚠️ %s: No baseline found, skipping\n", bench.Name)
continue
}
fmt.Printf(" %s...", bench.Name)
current, err := ps.RunBenchmark(bench)
if err != nil {
return fmt.Errorf("failed to run benchmark %s: %w", bench.Name, err)
}
total++
// Calculate changes
timeChange := calculateChange(baseline.NsPerOp, current.NsPerOp)
allocChange := calculateChange(baseline.AllocsPerOp, current.AllocsPerOp)
memChange := calculateChange(baseline.BytesPerOp, current.BytesPerOp)
// Check for regressions
hasRegression := false
var issues []string
if timeChange > bench.RegressionLimit {
hasRegression = true
issues = append(issues, fmt.Sprintf("%.1f%% slower", timeChange))
}
if allocChange > bench.RegressionLimit {
hasRegression = true
issues = append(issues, fmt.Sprintf("%.1f%% more allocs", allocChange))
}
if memChange > bench.RegressionLimit {
hasRegression = true
issues = append(issues, fmt.Sprintf("%.1f%% more memory", memChange))
}
if hasRegression {
status := fmt.Sprintf(" ❌ REGRESSION: %s", strings.Join(issues, ", "))
failures = append(failures, fmt.Sprintf("%s: %s", bench.Name, strings.Join(issues, ", ")))
fmt.Println(status)
results[bench.Name] = "FAIL: " + strings.Join(issues, ", ")
} else {
status := " ✅ PASS"
if timeChange != 0 || allocChange != 0 || memChange != 0 {
status += fmt.Sprintf(" (%.1f%% time, %.1f%% allocs, %.1f%% mem)", timeChange, allocChange, memChange)
}
fmt.Println(status)
passed++
results[bench.Name] = "PASS"
}
}
// Report summary
fmt.Printf("\n📊 Performance regression check completed:\n")
fmt.Printf(" • %d tests passed\n", passed)
fmt.Printf(" • %d tests failed\n", len(failures))
fmt.Printf(" • %d tests total\n", total)
// Generate the report file
if ps.EnableReports {
summary := fmt.Sprintf("%d/%d tests passed.", passed, total)
if len(failures) > 0 {
summary = fmt.Sprintf("%d regressions detected.", len(failures))
}
report := RegressionReport{
Summary: summary,
Failures: failures,
Passed: passed,
Total: total,
Results: results,
}
data, err := json.MarshalIndent(report, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal report: %w", err)
}
// #nosec G306 -- 0644 is appropriate for benchmark report files
if err := os.WriteFile(ps.ReportFile, data, 0644); err != nil {
return fmt.Errorf("failed to write report file: %w", err)
}
}
if len(failures) > 0 {
fmt.Printf("\n❌ Performance regressions detected:\n")
for _, failure := range failures {
fmt.Printf(" • %s\n", failure)
}
if ps.FailOnRegress {
return fmt.Errorf("performance regressions detected")
}
} else {
fmt.Printf("\n✅ No performance regressions detected!\n")
}
return nil
}