Visual Regression Testing

Automatically detect visual changes in your web applications using screenshot comparison. This guide shows you how to set up visual regression testing with the Screenshots API for CI/CD pipelines.

Prerequisites

Quick example

Capture screenshots for visual regression testing:

curl -X POST https://api.supacrawler.com/api/v1/screenshots \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "url": "https://app.example.com/dashboard",
    "full_page": true,
    "quality": 95,
    "wait_for": ".content-loaded"
  }'

CI/CD integration

Set up visual regression testing in your deployment pipeline:

# visual_regression_test.py
import requests
import os
import hashlib
from PIL import Image
from io import BytesIO

class VisualRegressionTester:
    def __init__(self, api_key):
        self.api_key = api_key
        self.base_url = "https://api.supacrawler.com/api/v1"
    
    def capture_screenshot(self, url, test_name):
        """Capture screenshot for visual testing"""
        
        response = requests.post(f"{self.base_url}/screenshots",
            headers={"Authorization": f"Bearer {self.api_key}"},
            json={
                "url": url,
                "full_page": True,
                "quality": 95,
                "wait_for": ".app-loaded, .content-ready",
                "viewport_width": 1920,
                "viewport_height": 1080,
                "delay": 2000  # Wait for animations
            }
        )
        
        result = response.json()
        return result.get('screenshot_url')
    
    def compare_screenshots(self, baseline_url, current_url, threshold=0.1):
        """Compare two screenshots and return difference percentage"""
        
        # Download images
        baseline_img = Image.open(BytesIO(requests.get(baseline_url).content))
        current_img = Image.open(BytesIO(requests.get(current_url).content))
        
        # Simple pixel comparison (you can use more sophisticated methods)
        if baseline_img.size != current_img.size:
            return 1.0  # 100% different if sizes don't match
        
        # Calculate difference
        diff_pixels = 0
        total_pixels = baseline_img.size[0] * baseline_img.size[1]
        
        for x in range(baseline_img.size[0]):
            for y in range(baseline_img.size[1]):
                if baseline_img.getpixel((x, y)) != current_img.getpixel((x, y)):
                    diff_pixels += 1
        
        difference_ratio = diff_pixels / total_pixels
        return difference_ratio > threshold

# Usage in CI/CD
def run_visual_tests():
    tester = VisualRegressionTester(os.environ['SUPACRAWLER_API_KEY'])
    
    test_urls = [
        {"name": "homepage", "url": "https://app.example.com/"},
        {"name": "dashboard", "url": "https://app.example.com/dashboard"},
        {"name": "settings", "url": "https://app.example.com/settings"}
    ]
    
    failed_tests = []
    
    for test in test_urls:
        current_screenshot = tester.capture_screenshot(test["url"], test["name"])
        baseline_screenshot = get_baseline_screenshot(test["name"])  # Your storage
        
        if baseline_screenshot:
            has_changes = tester.compare_screenshots(baseline_screenshot, current_screenshot)
            
            if has_changes:
                failed_tests.append(test["name"])
                print(f"❌ Visual regression detected in {test['name']}")
            else:
                print(f"✅ {test['name']} visual test passed")
        else:
            # First run - save as baseline
            save_baseline_screenshot(test["name"], current_screenshot)
            print(f"📸 Baseline saved for {test['name']}")
    
    if failed_tests:
        raise Exception(f"Visual regression tests failed: {', '.join(failed_tests)}")
    
    print("🎉 All visual regression tests passed!")

if __name__ == "__main__":
    run_visual_tests()

Watch-based monitoring

Set up continuous visual monitoring:

# Monitor for unexpected visual changes in production
def setup_visual_monitoring():
    """Set up continuous visual monitoring of key pages"""
    
    critical_pages = [
        {
            "name": "Login Page",
            "url": "https://app.example.com/login",
            "frequency": "hourly"
        },
        {
            "name": "Checkout Flow",
            "url": "https://app.example.com/checkout", 
            "frequency": "every_30_minutes"
        },
        {
            "name": "Homepage",
            "url": "https://app.example.com/",
            "frequency": "daily"
        }
    ]
    
    for page in critical_pages:
        # Create watch job for visual changes
        response = requests.post("https://api.supacrawler.com/api/v1/watch",
            headers={"Authorization": f"Bearer {API_KEY}"},
            json={
                "url": page["url"],
                "frequency": page["frequency"],
                "selector": "body",  # Monitor entire page
                "notification_preference": "changes_only",
                
                # Visual evidence
                "include_image": True,
                "screenshot_quality": 95,
                "full_page": True,
                
                # Alert integration
                "webhook_url": "https://company.com/api/visual-regression-alert",
                "webhook_headers": {
                    "X-Page": page["name"],
                    "X-Alert-Type": "visual-change"
                }
            }
        )
        
        print(f"Visual monitoring active for {page['name']}")

Best practices

  • Consistent viewports: Use standard viewport sizes for reliable comparisons
  • Wait for content: Allow time for dynamic content and animations to load
  • Baseline management: Store and version your baseline screenshots
  • Threshold tuning: Adjust difference thresholds based on your tolerance for changes
  • Critical path focus: Monitor the most important user journeys first

Was this page helpful?