578 lines
21 KiB
Python
578 lines
21 KiB
Python
"""
|
|
Tests for scripts/bake-user-data.sh
|
|
|
|
This module provides comprehensive test coverage for the Golden AMI bake script,
|
|
including parameter validation, AWS S3 interactions (mocked), and installation steps.
|
|
|
|
Uses pytest and moto to mock AWS services.
|
|
"""
|
|
|
|
import os
|
|
import subprocess
|
|
import tempfile
|
|
import json
|
|
from pathlib import Path
|
|
from unittest.mock import patch, MagicMock, call
|
|
import pytest
|
|
import boto3
|
|
from moto import mock_aws
|
|
from botocore.exceptions import ClientError
|
|
|
|
# Markers for organizing tests
|
|
pytestmark = [pytest.mark.unit]
|
|
|
|
|
|
class TestBakeUserDataScript:
|
|
"""Test suite for bake-user-data.sh script."""
|
|
|
|
@pytest.fixture
|
|
def script_path(self):
|
|
"""Path to the bake-user-data.sh script."""
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
assert script.exists(), f"Script not found at {script}"
|
|
return str(script)
|
|
|
|
@pytest.fixture
|
|
def make_executable(self, script_path):
|
|
"""Ensure the script is executable."""
|
|
os.chmod(script_path, 0o755)
|
|
return script_path
|
|
|
|
def run_script(self, script_path, args=None, env=None):
|
|
"""
|
|
Helper to run the bake-user-data.sh script with given arguments and env vars.
|
|
|
|
Args:
|
|
script_path: Path to the script
|
|
args: List of command-line arguments (optional)
|
|
env: Dict of environment variables to set (optional)
|
|
|
|
Returns:
|
|
CompletedProcess with returncode, stdout, stderr
|
|
"""
|
|
cmd = [script_path]
|
|
if args:
|
|
cmd.extend(args)
|
|
|
|
test_env = os.environ.copy()
|
|
# Disable log file redirection for testing
|
|
test_env["SKIP_LOG_FILE"] = "true"
|
|
if env:
|
|
test_env.update(env)
|
|
# Ensure SKIP_LOG_FILE is still true after update
|
|
test_env["SKIP_LOG_FILE"] = "true"
|
|
|
|
# Run the script in a temporary directory to avoid side effects
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
return subprocess.run(
|
|
cmd,
|
|
env=test_env,
|
|
cwd=tmpdir,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=10,
|
|
)
|
|
|
|
# =========================================================================
|
|
# Parameter Validation Tests
|
|
# =========================================================================
|
|
|
|
def test_script_requires_s3_bucket(self, make_executable):
|
|
"""Script should fail if S3_BUCKET is not provided."""
|
|
result = self.run_script(
|
|
make_executable,
|
|
args=["--artifact-key", "test.tar.gz", "--region", "eu-west-1"],
|
|
)
|
|
assert result.returncode != 0, "Script should fail without S3_BUCKET"
|
|
assert "S3_BUCKET is required" in result.stderr
|
|
|
|
def test_script_requires_artifact_key(self, make_executable):
|
|
"""Script should fail if ARTIFACT_KEY is not provided."""
|
|
result = self.run_script(
|
|
make_executable,
|
|
args=["--s3-bucket", "my-bucket", "--region", "eu-west-1"],
|
|
)
|
|
assert result.returncode != 0, "Script should fail without ARTIFACT_KEY"
|
|
assert "ARTIFACT_KEY is required" in result.stderr
|
|
|
|
def test_script_accepts_env_vars(self, make_executable):
|
|
"""Script should accept parameters via environment variables."""
|
|
env = {
|
|
"S3_BUCKET": "test-bucket",
|
|
"ARTIFACT_KEY": "test.tar.gz",
|
|
"REGION": "eu-west-1",
|
|
}
|
|
# Running with valid env vars should fail at docker install, not at validation
|
|
result = self.run_script(make_executable, env=env)
|
|
# Should not fail due to missing parameters
|
|
assert "is required" not in result.stderr or result.returncode != 1
|
|
|
|
def test_script_accepts_cli_args(self, make_executable):
|
|
"""Script should accept parameters via command-line arguments."""
|
|
args = [
|
|
"--s3-bucket",
|
|
"test-bucket",
|
|
"--artifact-key",
|
|
"test.tar.gz",
|
|
"--region",
|
|
"eu-west-1",
|
|
]
|
|
result = self.run_script(make_executable, args=args)
|
|
# Should not fail due to missing parameters
|
|
assert "is required" not in result.stderr or result.returncode != 1
|
|
|
|
def test_script_cli_args_override_env_vars(self, make_executable):
|
|
"""Command-line arguments should override environment variables."""
|
|
env = {
|
|
"S3_BUCKET": "env-bucket",
|
|
"ARTIFACT_KEY": "test.tar.gz",
|
|
"REGION": "eu-west-1",
|
|
}
|
|
args = ["--s3-bucket", "cli-bucket"]
|
|
|
|
result = self.run_script(make_executable, args=args, env=env)
|
|
|
|
# Check that the CLI argument (cli-bucket) is used
|
|
# We can infer this by checking the output logs
|
|
assert "cli-bucket" in result.stdout or "cli-bucket" in result.stderr
|
|
|
|
def test_script_rejects_unknown_arguments(self, make_executable):
|
|
"""Script should reject unknown command-line arguments."""
|
|
args = ["--unknown-arg", "value"]
|
|
result = self.run_script(make_executable, args=args)
|
|
assert result.returncode != 0, "Script should fail with unknown argument"
|
|
assert "Unknown argument" in result.stderr
|
|
|
|
# =========================================================================
|
|
# Output and Logging Tests
|
|
# =========================================================================
|
|
|
|
def test_script_logs_parameters(self, make_executable):
|
|
"""Script should log detected parameters."""
|
|
env = {
|
|
"S3_BUCKET": "test-bucket",
|
|
"ARTIFACT_KEY": "artifact.tar.gz",
|
|
"REGION": "us-east-1",
|
|
}
|
|
result = self.run_script(make_executable, env=env)
|
|
|
|
output = result.stdout + result.stderr
|
|
assert "test-bucket" in output
|
|
assert "artifact.tar.gz" in output
|
|
assert "us-east-1" in output
|
|
|
|
def test_script_outputs_validation_success(self, make_executable):
|
|
"""Script should indicate successful parameter validation."""
|
|
env = {
|
|
"S3_BUCKET": "test-bucket",
|
|
"ARTIFACT_KEY": "test.tar.gz",
|
|
"REGION": "eu-west-1",
|
|
}
|
|
result = self.run_script(make_executable, env=env)
|
|
|
|
output = result.stdout + result.stderr
|
|
assert "✓ Parameters validated" in output
|
|
|
|
# =========================================================================
|
|
# Exit Code Tests
|
|
# =========================================================================
|
|
|
|
def test_script_exit_code_1_on_validation_failure(self, make_executable):
|
|
"""Script should exit with code 1 on parameter validation failure."""
|
|
result = self.run_script(make_executable, args=[])
|
|
assert result.returncode == 1, f"Expected exit code 1, got {result.returncode}"
|
|
|
|
# =========================================================================
|
|
# Script Structure Tests
|
|
# =========================================================================
|
|
|
|
def test_script_is_executable(self, make_executable):
|
|
"""Script file should be executable."""
|
|
stat_info = os.stat(make_executable)
|
|
# Check if execute bit is set
|
|
assert stat_info.st_mode & 0o111, "Script is not executable"
|
|
|
|
def test_script_has_shebang(self, script_path):
|
|
"""Script should have a proper shebang."""
|
|
with open(script_path, "r") as f:
|
|
first_line = f.readline()
|
|
assert first_line.startswith("#!/bin/bash"), "Script missing proper shebang"
|
|
|
|
def test_script_uses_set_euo_pipefail(self, script_path):
|
|
"""Script should use set -euo pipefail for safety."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
assert "set -euo pipefail" in content, "Script should use 'set -euo pipefail'"
|
|
|
|
# =========================================================================
|
|
# Documentation Tests
|
|
# =========================================================================
|
|
|
|
def test_script_has_header_documentation(self, script_path):
|
|
"""Script should have clear header documentation."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
|
|
# Should have parameter documentation
|
|
assert "--s3-bucket" in content
|
|
assert "--artifact-key" in content
|
|
assert "--region" in content
|
|
|
|
def test_script_documents_exit_codes(self, script_path):
|
|
"""Script should document exit codes."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
|
|
assert "Exit codes:" in content
|
|
assert "0 - Success" in content
|
|
|
|
def test_script_documents_usage_examples(self, script_path):
|
|
"""Script should include usage examples."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
|
|
assert "Usage" in content or "usage" in content
|
|
|
|
|
|
class TestBakeScriptIntegration:
|
|
"""Integration tests for bake-user-data.sh with mocked AWS operations."""
|
|
|
|
@pytest.fixture
|
|
def script_path(self):
|
|
"""Path to the bake-user-data.sh script."""
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
return str(script)
|
|
|
|
@pytest.fixture
|
|
def make_executable(self, script_path):
|
|
"""Ensure the script is executable."""
|
|
os.chmod(script_path, 0o755)
|
|
return script_path
|
|
|
|
def test_script_validates_all_required_params_together(self, make_executable):
|
|
"""
|
|
Test that the script validates all required parameters together
|
|
and reports all missing ones.
|
|
"""
|
|
env = os.environ.copy()
|
|
env["SKIP_LOG_FILE"] = "true"
|
|
result = subprocess.run(
|
|
[make_executable],
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=10,
|
|
cwd=tempfile.gettempdir(),
|
|
env=env,
|
|
)
|
|
|
|
assert result.returncode == 1
|
|
# Should report missing required parameters
|
|
stderr = result.stderr
|
|
assert "S3_BUCKET" in stderr
|
|
assert "ARTIFACT_KEY" in stderr
|
|
|
|
def test_script_parameter_validation_with_partial_args(self, make_executable):
|
|
"""Test script validation when only some parameters are provided."""
|
|
env = os.environ.copy()
|
|
env["SKIP_LOG_FILE"] = "true"
|
|
result = subprocess.run(
|
|
[make_executable, "--s3-bucket", "test-bucket"],
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=10,
|
|
cwd=tempfile.gettempdir(),
|
|
env=env,
|
|
)
|
|
|
|
assert result.returncode == 1
|
|
assert "ARTIFACT_KEY is required" in result.stderr
|
|
|
|
|
|
class TestDocumentation:
|
|
"""Tests to verify documentation quality."""
|
|
|
|
@pytest.fixture
|
|
def script_path(self):
|
|
"""Path to the bake-user-data.sh script."""
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
return str(script)
|
|
|
|
def test_script_has_comprehensive_comments(self, script_path):
|
|
"""Script should have clear inline comments."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
|
|
section_markers = [
|
|
"Configuration & Defaults",
|
|
"Argument Parsing",
|
|
"Validation",
|
|
"Installation Functions",
|
|
"Main Execution",
|
|
]
|
|
|
|
for marker in section_markers:
|
|
assert marker in content, f"Missing section: {marker}"
|
|
|
|
def test_readme_should_exist_or_docs_updated(self):
|
|
"""Verify that documentation exists for the bake process."""
|
|
# Check if there's a README or docs about the bake process
|
|
doc_paths = [
|
|
Path(__file__).parent.parent.parent / "README.md",
|
|
Path(__file__).parent.parent.parent / "docs" / "bake-process.md",
|
|
Path(__file__).parent.parent.parent / ".github" / "wiki" / "bake-process.md",
|
|
]
|
|
|
|
# At least one doc path should exist or be created
|
|
# For now, we just verify that the script itself is well-documented
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
with open(script, "r") as f:
|
|
content = f.read()
|
|
|
|
# Should have usage documentation
|
|
assert "Usage" in content
|
|
|
|
|
|
class TestS3Integration:
|
|
"""Integration tests using moto to mock AWS S3."""
|
|
|
|
@pytest.mark.integration
|
|
@mock_aws
|
|
def test_s3_download_success(self):
|
|
"""Test that script can download from S3 (mocked)."""
|
|
# Setup mocked S3
|
|
conn = boto3.resource("s3", region_name="eu-west-1")
|
|
conn.create_bucket(
|
|
Bucket="test-bucket",
|
|
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},
|
|
)
|
|
|
|
# Upload a test artifact
|
|
bucket = conn.Bucket("test-bucket")
|
|
bucket.put_object(Key="test-artifact.tar.gz", Body=b"test content")
|
|
|
|
# Verify that the object exists
|
|
s3_client = boto3.client("s3", region_name="eu-west-1")
|
|
response = s3_client.head_object(Bucket="test-bucket", Key="test-artifact.tar.gz")
|
|
assert response["ContentLength"] > 0
|
|
|
|
@pytest.mark.integration
|
|
@mock_aws
|
|
def test_s3_bucket_operations(self):
|
|
"""Test mocked S3 bucket operations."""
|
|
s3 = boto3.client("s3", region_name="eu-west-1")
|
|
|
|
# Create bucket
|
|
s3.create_bucket(
|
|
Bucket="fellowship-sut-dev",
|
|
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},
|
|
)
|
|
|
|
# List buckets
|
|
response = s3.list_buckets()
|
|
bucket_names = [b["Name"] for b in response["Buckets"]]
|
|
assert "fellowship-sut-dev" in bucket_names
|
|
|
|
# Put object
|
|
s3.put_object(
|
|
Bucket="fellowship-sut-dev",
|
|
Key="fellowship-sut-commit123.tar.gz",
|
|
Body=b"fake tarball content",
|
|
)
|
|
|
|
# Get object
|
|
response = s3.get_object(Bucket="fellowship-sut-dev", Key="fellowship-sut-commit123.tar.gz")
|
|
assert response["Body"].read() == b"fake tarball content"
|
|
|
|
@pytest.mark.integration
|
|
@mock_aws
|
|
def test_s3_download_with_aws_cli_simulation(self):
|
|
"""Simulate S3 download using boto3."""
|
|
# Setup
|
|
s3 = boto3.client("s3", region_name="eu-west-1")
|
|
s3.create_bucket(
|
|
Bucket="sut-artifacts",
|
|
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
|
|
)
|
|
|
|
# Upload test artifact
|
|
test_data = b"This is a test SUT artifact"
|
|
s3.put_object(Bucket="sut-artifacts", Key="sut.tar.gz", Body=test_data)
|
|
|
|
# Simulate download (what the script does)
|
|
response = s3.get_object(Bucket="sut-artifacts", Key="sut.tar.gz")
|
|
downloaded_data = response["Body"].read()
|
|
|
|
# Assert downloaded successfully
|
|
assert downloaded_data == test_data
|
|
|
|
|
|
class TestPerformanceMetrics:
|
|
"""Tests for performance tracking features."""
|
|
|
|
@pytest.fixture
|
|
def script_path(self):
|
|
"""Path to the bake-user-data.sh script."""
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
assert script.exists(), f"Script not found at {script}"
|
|
return str(script)
|
|
|
|
@pytest.fixture
|
|
def make_executable(self, script_path):
|
|
"""Ensure the script is executable."""
|
|
os.chmod(script_path, 0o755)
|
|
return script_path
|
|
|
|
def run_script(self, script_path, args=None, env=None):
|
|
"""Run the script helper method."""
|
|
cmd = [script_path]
|
|
if args:
|
|
cmd.extend(args)
|
|
|
|
test_env = os.environ.copy()
|
|
test_env["SKIP_LOG_FILE"] = "true"
|
|
test_env["METRICS_FILE"] = "/tmp/bake-metrics-test.json"
|
|
if env:
|
|
test_env.update(env)
|
|
test_env["SKIP_LOG_FILE"] = "true"
|
|
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
return subprocess.run(
|
|
cmd,
|
|
env=test_env,
|
|
cwd=tmpdir,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=10,
|
|
)
|
|
|
|
def test_script_outputs_performance_metrics(self, make_executable):
|
|
"""Script should output performance metrics."""
|
|
env = {
|
|
"S3_BUCKET": "test-bucket",
|
|
"ARTIFACT_KEY": "test.tar.gz",
|
|
"REGION": "eu-west-1",
|
|
}
|
|
result = self.run_script(make_executable, env=env)
|
|
|
|
output = result.stdout + result.stderr
|
|
# Should contain performance timing information
|
|
assert "Performance Metrics" in output or "completed in" in output or "executing" in output.lower()
|
|
|
|
def test_script_documents_performance_tracking(self, script_path):
|
|
"""Script should have performance tracking functions."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
|
|
# Check for performance tracking functions
|
|
assert "record_step_start" in content, "Missing performance tracking start function"
|
|
assert "record_step_end" in content, "Missing performance tracking end function"
|
|
assert "output_metrics" in content, "Missing metrics output function"
|
|
|
|
|
|
class TestS3MockingAdvanced:
|
|
"""Advanced integration tests with moto for complex scenarios."""
|
|
|
|
@pytest.mark.integration
|
|
@mock_aws
|
|
def test_s3_artifact_download_simulation(self):
|
|
"""Simulate the complete S3 download flow used by the script."""
|
|
# Setup S3 mock
|
|
s3 = boto3.client("s3", region_name="eu-west-1")
|
|
bucket_name = "fellowship-sut-artifacts"
|
|
artifact_key = "fellowship-sut-abc123.tar.gz"
|
|
|
|
# Create bucket
|
|
s3.create_bucket(
|
|
Bucket=bucket_name,
|
|
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
|
|
)
|
|
|
|
# Create a simulated artifact (small tar.gz data)
|
|
artifact_content = b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x00test artifact data"
|
|
s3.put_object(Bucket=bucket_name, Key=artifact_key, Body=artifact_content)
|
|
|
|
# Download it back
|
|
response = s3.get_object(Bucket=bucket_name, Key=artifact_key)
|
|
downloaded = response["Body"].read()
|
|
|
|
assert downloaded == artifact_content
|
|
assert response["ContentLength"] == len(artifact_content)
|
|
|
|
@pytest.mark.integration
|
|
@mock_aws
|
|
def test_s3_multiple_artifacts_management(self):
|
|
"""Test S3 operations for managing multiple artifact versions."""
|
|
s3 = boto3.client("s3", region_name="eu-west-1")
|
|
bucket_name = "fellowship-artifacts"
|
|
|
|
# Create bucket
|
|
s3.create_bucket(
|
|
Bucket=bucket_name,
|
|
CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
|
|
)
|
|
|
|
# Upload multiple versions
|
|
artifacts = [
|
|
"fellowship-sut-commit1.tar.gz",
|
|
"fellowship-sut-commit2.tar.gz",
|
|
"fellowship-sut-commit3.tar.gz",
|
|
]
|
|
|
|
for artifact in artifacts:
|
|
s3.put_object(Bucket=bucket_name, Key=artifact, Body=b"artifact data")
|
|
|
|
# List artifacts
|
|
response = s3.list_objects_v2(Bucket=bucket_name)
|
|
keys = [obj["Key"] for obj in response.get("Contents", [])]
|
|
|
|
assert len(keys) == 3
|
|
for artifact in artifacts:
|
|
assert artifact in keys
|
|
|
|
|
|
class TestDocumentation:
|
|
"""Tests to verify documentation quality."""
|
|
|
|
@pytest.fixture
|
|
def script_path(self):
|
|
"""Path to the bake-user-data.sh script."""
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
return str(script)
|
|
|
|
def test_script_has_comprehensive_comments(self, script_path):
|
|
"""Script should have clear inline comments."""
|
|
with open(script_path, "r") as f:
|
|
content = f.read()
|
|
|
|
section_markers = [
|
|
"Configuration & Defaults",
|
|
"Argument Parsing",
|
|
"Validation",
|
|
"Installation Functions",
|
|
"Main Execution",
|
|
]
|
|
|
|
for marker in section_markers:
|
|
assert marker in content, f"Missing section: {marker}"
|
|
|
|
def test_readme_should_exist_or_docs_updated(self):
|
|
"""Verify that documentation exists for the bake process."""
|
|
# Check if there's a README or docs about the bake process
|
|
doc_paths = [
|
|
Path(__file__).parent.parent.parent / "README.md",
|
|
Path(__file__).parent.parent.parent / "docs" / "bake-process.md",
|
|
Path(__file__).parent.parent.parent / ".github" / "wiki" / "bake-process.md",
|
|
]
|
|
|
|
# At least one doc path should exist or be created
|
|
# For now, we just verify that the script itself is well-documented
|
|
script = Path(__file__).parent.parent.parent / "scripts" / "bake-user-data.sh"
|
|
with open(script, "r") as f:
|
|
content = f.read()
|
|
|
|
# Should have usage documentation
|
|
assert "Usage" in content
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v"]) |