Screenshots Endpoint
Capture website screenshots using the Python SDK
Basic Usage
from supacrawler import SupacrawlerClient
from supacrawler.scraper_client.models import ScreenshotCreateRequest
client = SupacrawlerClient(api_key="YOUR_API_KEY")
# Create screenshot job
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG
))
# Wait for completion
result = client.wait_for_screenshot(job.job_id, timeout_seconds=30)
print(f"screenshot URL: {result.screenshot_url}")
print(f"metadata: {result.metadata}")
Device Presets
Desktop
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
full_page=False # Viewport only
))
Mobile
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.MOBILE,
format_=ScreenshotCreateRequest.format_.JPEG,
quality=90,
full_page=True # Full page screenshot
))
Tablet
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.TABLET,
format_=ScreenshotCreateRequest.format_.PNG,
is_landscape=True # Landscape orientation
))
Custom Size
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.CUSTOM,
width=1200,
height=800,
device_scale=1.5, # 1.5x pixel ratio
format_=ScreenshotCreateRequest.format_.PNG
))
Image Formats
PNG (Lossless)
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG
))
JPEG (Compressed)
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.JPEG,
quality=95 # 1-100, higher = better quality
))
Dark Mode
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
dark_mode=True # Enable dark mode
))
Content Blocking
Block Ads and Trackers
from supacrawler.scraper_client.models import ScreenshotCreateRequestBlockResourcesItem
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://news-site.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
# Block unwanted content
block_ads=True,
block_cookies=True,
block_trackers=True,
# Block specific resources
block_resources=[
ScreenshotCreateRequestBlockResourcesItem.IMAGE, # Block images
ScreenshotCreateRequestBlockResourcesItem.FONT # Block fonts
]
))
Hide Elements
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
# Hide specific elements using CSS selectors
hide_selectors=["footer", ".social-links", "#newsletter-popup"]
))
Waiting Strategies
Wait for Page Load
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
# Wait until DOM is loaded
wait_until=ScreenshotCreateRequest.wait_until.DOMCONTENTLOADED,
delay=2 # Additional 2 second delay
))
Wait for Network Idle
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://dynamic-site.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
# Wait for network to be idle (all images loaded)
wait_until=ScreenshotCreateRequest.wait_until.NETWORKIDLE,
delay=1
))
Accessibility Features
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG,
# Accessibility settings
dark_mode=True,
reduced_motion=True,
high_contrast=True
))
Custom Headers and Cookies
from supacrawler.scraper_client.models import (
ScreenshotCreateRequestHeaders,
ScreenshotCreateRequestCookiesItem
)
# Custom headers
headers = ScreenshotCreateRequestHeaders.from_dict({
"Accept-Language": "en-US,en;q=0.9",
"X-Custom-Header": "screenshot-example"
})
# Custom cookies
cookies = [
ScreenshotCreateRequestCookiesItem.from_dict({
"name": "theme",
"value": "dark",
"domain": "example.com"
}),
ScreenshotCreateRequestCookiesItem.from_dict({
"name": "auth_token",
"value": "abc123",
"path": "/",
"httpOnly": True
})
]
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.TABLET,
format_=ScreenshotCreateRequest.format_.JPEG,
quality=95,
headers=headers,
cookies=cookies,
is_landscape=True
))
Complete Example
import os
from dotenv import load_dotenv
from supacrawler import SupacrawlerClient
from supacrawler.scraper_client.models import (
ScreenshotCreateRequest,
ScreenshotCreateRequestDevice,
ScreenshotCreateRequestFormat,
ScreenshotCreateRequestWaitUntil,
ScreenshotCreateRequestBlockResourcesItem
)
load_dotenv()
client = SupacrawlerClient(api_key=os.environ.get("SUPACRAWLER_API_KEY"))
print("🚀 screenshot examples")
# Example 1: Basic desktop screenshot
job1 = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://antoineross.com",
device=ScreenshotCreateRequestDevice.DESKTOP,
format_=ScreenshotCreateRequestFormat.PNG,
full_page=False
))
result1 = client.wait_for_screenshot(job1.job_id, timeout_seconds=30)
print(f"\n✅ desktop screenshot: {result1.screenshot_url}")
print(f"size: {result1.metadata.get('width')}x{result1.metadata.get('height')}")
# Example 2: Mobile with dark mode
job2 = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://antoineross.com",
device=ScreenshotCreateRequestDevice.MOBILE,
format_=ScreenshotCreateRequestFormat.JPEG,
quality=90,
dark_mode=True,
full_page=True
))
result2 = client.wait_for_screenshot(job2.job_id, timeout_seconds=30)
print(f"\n✅ mobile dark mode: {result2.screenshot_url}")
# Example 3: Content blocking
job3 = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://www.quora.com/Which-website-has-the-most-ads",
device=ScreenshotCreateRequestDevice.DESKTOP,
format_=ScreenshotCreateRequestFormat.PNG,
block_ads=True,
block_cookies=True,
block_trackers=True,
wait_until=ScreenshotCreateRequestWaitUntil.NETWORKIDLE,
delay=2
))
result3 = client.wait_for_screenshot(job3.job_id, timeout_seconds=45)
print(f"\n✅ content blocked: {result3.screenshot_url}")
print(f"load time: {result3.metadata.get('load_time')}ms")
Retrieving Screenshots
Get with Renewed URL
# Screenshot URLs expire after 15 minutes
# Get a renewed signed URL
renewed = client.get_screenshot(job.job_id)
print(f"renewed URL: {renewed.screenshot}")
Response Structure
result = client.wait_for_screenshot(job.job_id)
# Screenshot URL (signed, expires in 15 min)
print(result.screenshot_url)
# Metadata
print(result.metadata['width']) # Image width
print(result.metadata['height']) # Image height
print(result.metadata['format']) # Image format (png/jpeg)
print(result.metadata['file_size']) # File size in bytes
print(result.metadata['load_time']) # Page load time in ms
Error Handling
try:
job = client.create_screenshot_job(ScreenshotCreateRequest(
url="https://example.com",
device=ScreenshotCreateRequest.device.DESKTOP,
format_=ScreenshotCreateRequest.format_.PNG
))
result = client.wait_for_screenshot(
job.job_id,
timeout_seconds=30
)
if result.screenshot_url:
print(f"✅ screenshot captured: {result.screenshot_url}")
else:
print("❌ screenshot failed")
except TimeoutError:
print("screenshot took too long")
except Exception as e:
print(f"error: {e}")
Next Steps
- Scrape Endpoint - Extract page content
- Watch Endpoint - Monitor for visual changes
- Crawl Endpoint - Capture multiple pages
Was this page helpful?