diff --git a/tests/test_config_files.py b/tests/test_config_files.py new file mode 100644 index 0000000..18b67d0 --- /dev/null +++ b/tests/test_config_files.py @@ -0,0 +1,309 @@ +# +# Copyright 2025 ABSA Group Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import re +import pytest + +PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__)) +EXPECTED_PYTHON_VERSION = "3.11" + + +def test_pyproject_toml_exists(): + """Ensure pyproject.toml exists.""" + path = os.path.join(PROJECT_ROOT, "pyproject.toml") + assert os.path.exists(path), "pyproject.toml not found" + + +def test_pyproject_toml_black_config(): + """Validate Black configuration in pyproject.toml.""" + path = os.path.join(PROJECT_ROOT, "pyproject.toml") + with open(path, "r") as f: + content = f.read() + + assert "[tool.black]" in content, "pyproject.toml missing [tool.black] section" + assert "line-length = 120" in content, "Black line-length should be 120" + assert f"target-version = ['py{EXPECTED_PYTHON_VERSION.replace('.', '')}']" in content, ( + f"Black target-version should be py{EXPECTED_PYTHON_VERSION.replace('.', '')}" + ) + + +def test_pyproject_toml_coverage_config(): + """Validate coverage configuration in pyproject.toml.""" + path = os.path.join(PROJECT_ROOT, "pyproject.toml") + with open(path, "r") as f: + content = f.read() + + assert "[tool.coverage.run]" in content, "pyproject.toml missing [tool.coverage.run] section" + assert 'omit = ["tests/*"]' in content, "Coverage should omit tests directory" + + +def test_pyproject_toml_mypy_config(): + """Validate mypy configuration in pyproject.toml.""" + path = os.path.join(PROJECT_ROOT, "pyproject.toml") + with open(path, "r") as f: + content = f.read() + + assert "[tool.mypy]" in content, "pyproject.toml missing [tool.mypy] section" + assert "check_untyped_defs = true" in content, "mypy should check untyped defs" + assert 'exclude = "tests"' in content, "mypy should exclude tests" + assert "ignore_missing_imports = true" in content, "mypy should ignore missing imports" + assert f'python_version = "{EXPECTED_PYTHON_VERSION}"' in content, ( + f"mypy python_version should be {EXPECTED_PYTHON_VERSION}" + ) + assert 'packages = ["src"]' in content, "mypy should check src package" + assert "explicit_package_bases = true" in content, "mypy should use explicit package bases" + + +def test_requirements_txt_exists(): + """Ensure requirements.txt exists.""" + path = os.path.join(PROJECT_ROOT, "requirements.txt") + assert os.path.exists(path), "requirements.txt not found" + + +def test_requirements_txt_format(): + """Validate requirements.txt format and content.""" + path = os.path.join(PROJECT_ROOT, "requirements.txt") + with open(path, "r") as f: + lines = f.readlines() + + # Should not be empty + assert len(lines) > 0, "requirements.txt should not be empty" + + # Check for required packages + content = "".join(lines) + required_packages = [ + "pytest", + "pytest-cov", + "pytest-mock", + "pylint", + "black", + "mypy", + "boto3", + "PyJWT", + "requests", + "psycopg2", + ] + + for package in required_packages: + assert package in content, f"requirements.txt missing {package}" + + +def test_requirements_txt_no_commented_alternatives(): + """Ensure no commented alternative packages in requirements.txt.""" + path = os.path.join(PROJECT_ROOT, "requirements.txt") + with open(path, "r") as f: + lines = f.readlines() + + # Should not have commented psycopg2-binary + for line in lines: + if line.strip().startswith("#"): + assert "psycopg2-binary" not in line, ( + "Commented psycopg2-binary should be removed from requirements.txt" + ) + + +def test_requirements_txt_pinned_versions(): + """Verify all packages have pinned versions.""" + path = os.path.join(PROJECT_ROOT, "requirements.txt") + with open(path, "r") as f: + lines = f.readlines() + + for line in lines: + line = line.strip() + # Skip empty lines and comments + if not line or line.startswith("#"): + continue + + # Each line should have == for version pinning + assert "==" in line, f"Package should have pinned version: {line}" + + # Verify version format + parts = line.split("==") + assert len(parts) == 2, f"Invalid package format: {line}" + package_name, version = parts + assert package_name.strip(), f"Empty package name: {line}" + assert version.strip(), f"Empty version: {line}" + + # Version should be a valid semantic version + version_pattern = r'^\d+\.\d+(\.\d+)?$' + assert re.match(version_pattern, version.strip()), ( + f"Invalid version format for {package_name}: {version}" + ) + + +def test_requirements_txt_psycopg2_not_binary(): + """Ensure psycopg2 (not binary) is used in requirements.""" + path = os.path.join(PROJECT_ROOT, "requirements.txt") + with open(path, "r") as f: + content = f.read() + + # Should have psycopg2 without -binary suffix + assert "psycopg2==" in content, "requirements.txt should include psycopg2" + assert "psycopg2-binary" not in content, ( + "requirements.txt should not use psycopg2-binary for production" + ) + + +def test_gitignore_exists(): + """Ensure .gitignore exists.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + assert os.path.exists(path), ".gitignore not found" + + +def test_gitignore_python_patterns(): + """Validate Python-related patterns in .gitignore.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + with open(path, "r") as f: + content = f.read() + + required_patterns = [ + "__pycache__", + ".venv", + ".ipynb_checkpoints", + ] + + for pattern in required_patterns: + assert pattern in content, f".gitignore missing pattern: {pattern}" + + +def test_gitignore_terraform_patterns(): + """Validate Terraform-related patterns in .gitignore.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + with open(path, "r") as f: + content = f.read() + + terraform_patterns = [ + "/terraform/*.tfvars", + "/terraform/*.tfstate", + "/terraform/.terraform", + ] + + for pattern in terraform_patterns: + assert pattern in content, f".gitignore missing Terraform pattern: {pattern}" + + +def test_gitignore_sarif_files(): + """Ensure .sarif files are ignored.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + with open(path, "r") as f: + content = f.read() + + assert "*.sarif" in content, ".gitignore should ignore SARIF files" + + +def test_gitignore_build_artifacts(): + """Validate build artifacts are ignored.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + with open(path, "r") as f: + content = f.read() + + build_patterns = [ + "/dependencies", + "/lambda_function.zip", + ] + + for pattern in build_patterns: + assert pattern in content, f".gitignore missing build pattern: {pattern}" + + +def test_gitignore_ide_patterns(): + """Validate IDE-related patterns in .gitignore.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + with open(path, "r") as f: + content = f.read() + + assert "/.idea/" in content, ".gitignore should ignore .idea directory" + + +def test_gitignore_organized_sections(): + """Verify .gitignore has organized sections with comments.""" + path = os.path.join(PROJECT_ROOT, ".gitignore") + with open(path, "r") as f: + content = f.read() + + # Check for section comments + assert "# Terraform" in content, ".gitignore should have Terraform section comment" + + +def test_codeowners_exists(): + """Ensure CODEOWNERS file exists.""" + path = os.path.join(PROJECT_ROOT, ".github", "CODEOWNERS") + assert os.path.exists(path), "CODEOWNERS file not found" + + +def test_codeowners_format(): + """Validate CODEOWNERS file format.""" + path = os.path.join(PROJECT_ROOT, ".github", "CODEOWNERS") + with open(path, "r") as f: + lines = f.readlines() + + # Should not be empty + assert len(lines) > 0, "CODEOWNERS should not be empty" + + for line in lines: + line = line.strip() + # Skip empty lines and comments + if not line or line.startswith("#"): + continue + + # Each line should have pattern and at least one owner + parts = line.split() + assert len(parts) >= 2, f"CODEOWNERS line must have pattern and owner(s): {line}" + + # Pattern should be first + pattern = parts[0] + assert pattern, "CODEOWNERS pattern cannot be empty" + + # Owners should start with @ + owners = parts[1:] + for owner in owners: + assert owner.startswith("@"), f"CODEOWNERS owner must start with @: {owner}" + + +def test_codeowners_default_pattern(): + """Verify CODEOWNERS has a default pattern.""" + path = os.path.join(PROJECT_ROOT, ".github", "CODEOWNERS") + with open(path, "r") as f: + content = f.read() + + # Should have a default pattern (*) + assert content.strip().startswith("*"), "CODEOWNERS should start with default pattern (*)" + + +def test_python_version_consistency(): + """Verify Python version is consistent across configuration files.""" + # Check pyproject.toml + pyproject_path = os.path.join(PROJECT_ROOT, "pyproject.toml") + with open(pyproject_path, "r") as f: + pyproject_content = f.read() + + # Extract Python version from Black config + black_version_match = re.search(r"target-version = \['py(\d+)'\]", pyproject_content) + assert black_version_match, "Could not find Black target-version in pyproject.toml" + black_version = f"{black_version_match.group(1)[0]}.{black_version_match.group(1)[1:]}" + + # Extract Python version from mypy config + mypy_version_match = re.search(r'python_version = "(\d+\.\d+)"', pyproject_content) + assert mypy_version_match, "Could not find mypy python_version in pyproject.toml" + mypy_version = mypy_version_match.group(1) + + # Both should match expected version + assert black_version == EXPECTED_PYTHON_VERSION, ( + f"Black target-version {black_version} doesn't match expected {EXPECTED_PYTHON_VERSION}" + ) + assert mypy_version == EXPECTED_PYTHON_VERSION, ( + f"mypy python_version {mypy_version} doesn't match expected {EXPECTED_PYTHON_VERSION}" + ) \ No newline at end of file diff --git a/tests/test_documentation.py b/tests/test_documentation.py new file mode 100644 index 0000000..6c16c53 --- /dev/null +++ b/tests/test_documentation.py @@ -0,0 +1,218 @@ +# +# Copyright 2025 ABSA Group Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import re +import pytest + +PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__)) + + +def test_developer_md_exists(): + """Ensure DEVELOPER.md exists.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + assert os.path.exists(path), "DEVELOPER.md not found" + + +def test_developer_md_has_content(): + """Verify DEVELOPER.md is not empty.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + with open(path, "r") as f: + content = f.read() + + assert content.strip(), "DEVELOPER.md should not be empty" + assert len(content) > 100, "DEVELOPER.md should have substantial content" + + +def test_developer_md_has_sections(): + """Verify DEVELOPER.md has proper markdown sections.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + with open(path, "r") as f: + content = f.read() + + # Should have markdown headers + assert re.search(r'^#{1,3}\s+\w+', content, re.MULTILINE), ( + "DEVELOPER.md should have markdown sections (headers)" + ) + + +def test_developer_md_clone_instructions(): + """Verify DEVELOPER.md includes repository clone instructions.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + with open(path, "r") as f: + content = f.read() + + assert "git clone" in content, "DEVELOPER.md should include git clone instructions" + assert "EventGate" in content, "DEVELOPER.md should reference EventGate repository" + + +def test_developer_md_python_venv_setup(): + """Verify DEVELOPER.md includes Python virtual environment setup.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + with open(path, "r") as f: + content = f.read() + + # Should mention venv setup + assert "venv" in content or "virtual environment" in content.lower(), ( + "DEVELOPER.md should include virtual environment setup" + ) + + # Should have command to create venv + assert "python3 -m venv" in content or "python -m venv" in content, ( + "DEVELOPER.md should include venv creation command" + ) + + +def test_developer_md_no_outdated_python_version(): + """Ensure DEVELOPER.md doesn't reference outdated Python version.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + with open(path, "r") as f: + content = f.read() + + # Should not mention Python 3.13 as requirement + assert "Python 3.13" not in content, ( + "DEVELOPER.md should not reference Python 3.13 after downgrade to 3.11" + ) + + # Should not have a Prerequisites section mentioning specific Python version + # (as this was removed in the diff) + prerequisites_section = re.search( + r'##\s+Prerequisites.*?(?=##|$)', + content, + re.DOTALL | re.IGNORECASE + ) + + if prerequisites_section: + section_content = prerequisites_section.group(0) + # If Prerequisites section exists, it should not mandate a specific Python version + assert "Python 3.13" not in section_content, ( + "Prerequisites section should not require Python 3.13" + ) + + +def test_developer_md_code_blocks(): + """Verify DEVELOPER.md has properly formatted code blocks.""" + path = os.path.join(PROJECT_ROOT, "DEVELOPER.md") + with open(path, "r") as f: + content = f.read() + + # Should have code blocks (triple backticks) + assert "```" in content, "DEVELOPER.md should have code blocks" + + # Code blocks should be balanced + code_block_markers = content.count("```") + assert code_block_markers % 2 == 0, "DEVELOPER.md has unbalanced code blocks" + + +def test_readme_exists(): + """Ensure README.md exists.""" + path = os.path.join(PROJECT_ROOT, "README.md") + assert os.path.exists(path), "README.md not found" + + +def test_readme_has_title(): + """Verify README.md has a title.""" + path = os.path.join(PROJECT_ROOT, "README.md") + with open(path, "r") as f: + content = f.read() + + # Should start with a level 1 heading + assert re.match(r'^#\s+\w+', content), "README.md should start with a title (# heading)" + + +def test_markdown_files_valid_links(): + """Check for obviously broken links in markdown files.""" + markdown_files = [ + os.path.join(PROJECT_ROOT, "README.md"), + os.path.join(PROJECT_ROOT, "DEVELOPER.md"), + ] + + for path in markdown_files: + if not os.path.exists(path): + continue + + with open(path, "r") as f: + content = f.read() + + # Find markdown links [text](url) + links = re.findall(r'\[([^\]]+)\]\(([^)]+)\)', content) + + for _text, url in links: + # Check for relative file links + if not url.startswith(("http://", "https://", "#", "mailto:")): + # It's a relative path + if url.startswith("/"): + # Absolute path from repo root + file_path = os.path.join(PROJECT_ROOT, url.lstrip("/")) + else: + # Relative to markdown file + file_path = os.path.join(os.path.dirname(path), url) + + # Remove any anchor + file_path = file_path.split("#")[0] + + if file_path: + assert os.path.exists(file_path), ( + f"{os.path.basename(path)} has broken link: {url} -> {file_path}" + ) + + +def test_license_file_exists(): + """Ensure LICENSE file exists.""" + path = os.path.join(PROJECT_ROOT, "LICENSE") + assert os.path.exists(path), "LICENSE file not found" + + +def test_license_is_apache(): + """Verify LICENSE is Apache License 2.0.""" + path = os.path.join(PROJECT_ROOT, "LICENSE") + with open(path, "r") as f: + content = f.read() + + assert "Apache License" in content, "LICENSE should be Apache License" + assert "Version 2.0" in content, "LICENSE should be version 2.0" + + +def test_python_files_have_license_header(): + """Verify Python files have Apache license header.""" + # Check a few key Python files + src_dir = os.path.join(PROJECT_ROOT, "src") + test_dir = os.path.join(PROJECT_ROOT, "tests") + + python_files = [] + + # Get Python files from src + if os.path.exists(src_dir): + for root, _dirs, files in os.walk(src_dir): + for file in files: + if file.endswith(".py"): + python_files.append(os.path.join(root, file)) + + # Get a sample of test files + if os.path.exists(test_dir): + for file in os.listdir(test_dir): + if file.endswith(".py") and file.startswith("test_"): + python_files.append(os.path.join(test_dir, file)) + if len([f for f in python_files if test_dir in f]) >= 3: + break + + for path in python_files: + with open(path, "r") as f: + content = f.read(1000) # Read first 1000 chars + + # Should have copyright notice + assert "Copyright" in content, f"{path} missing copyright notice" + assert "ABSA Group Limited" in content, f"{path} missing ABSA Group Limited in copyright" + assert "Apache License" in content, f"{path} missing Apache License reference" \ No newline at end of file diff --git a/tests/test_github_workflows.py b/tests/test_github_workflows.py new file mode 100644 index 0000000..65ec279 --- /dev/null +++ b/tests/test_github_workflows.py @@ -0,0 +1,380 @@ +# +# Copyright 2025 ABSA Group Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +from glob import glob +import pytest + +try: + import yaml +except ImportError: + yaml = None + +WORKFLOWS_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), ".github", "workflows") +EXPECTED_PYTHON_VERSION = "3.11" + + +def load_yaml(path): + """Load and parse a YAML file.""" + if yaml is None: + pytest.skip("PyYAML not installed") + with open(path, "r") as f: + return yaml.safe_load(f) + + +@pytest.fixture(scope="module") +def workflow_files(): + """Get all workflow YAML files.""" + files = glob(os.path.join(WORKFLOWS_DIR, "*.yml")) + glob(os.path.join(WORKFLOWS_DIR, "*.yaml")) + assert files, "No workflow files found in .github/workflows/" + return files + + +def test_workflow_files_exist(): + """Ensure critical workflow files exist.""" + required_workflows = [ + "test.yml", + "check_terraform.yml", + "check_pr_release_notes.yml", + "release_draft.yml", + ] + for workflow in required_workflows: + path = os.path.join(WORKFLOWS_DIR, workflow) + assert os.path.exists(path), f"Required workflow {workflow} not found" + + +def test_workflows_are_valid_yaml(workflow_files): + """Validate that all workflow files contain valid YAML.""" + for path in workflow_files: + data = load_yaml(path) + assert data is not None, f"{path} is empty or invalid YAML" + assert isinstance(data, dict), f"{path} must be a YAML mapping (object)" + + +def test_workflows_have_name(workflow_files): + """Ensure all workflows have a name field.""" + for path in workflow_files: + data = load_yaml(path) + assert "name" in data, f"{path} missing 'name' field" + assert isinstance(data["name"], str), f"{path} 'name' must be a string" + assert data["name"].strip(), f"{path} 'name' cannot be empty" + + +def test_workflows_have_on_trigger(workflow_files): + """Ensure all workflows have an 'on' trigger.""" + for path in workflow_files: + data = load_yaml(path) + assert "on" in data, f"{path} missing 'on' trigger" + trigger = data["on"] + assert trigger, f"{path} 'on' trigger cannot be empty" + + +def test_workflows_have_jobs(workflow_files): + """Ensure all workflows have at least one job.""" + for path in workflow_files: + data = load_yaml(path) + assert "jobs" in data, f"{path} missing 'jobs' section" + jobs = data["jobs"] + assert isinstance(jobs, dict), f"{path} 'jobs' must be a mapping" + assert len(jobs) > 0, f"{path} must have at least one job" + + +def test_python_version_consistency(workflow_files): + """Verify Python version is consistent across all workflows.""" + for path in workflow_files: + data = load_yaml(path) + jobs = data.get("jobs", {}) + + for job_name, job_spec in jobs.items(): + steps = job_spec.get("steps", []) + for step in steps: + # Check for setup-python actions + if isinstance(step.get("uses"), str) and "setup-python@" in step["uses"]: + with_config = step.get("with", {}) + if "python-version" in with_config: + version = with_config["python-version"] + assert version == EXPECTED_PYTHON_VERSION, ( + f"{path}: job '{job_name}' uses Python {version}, " + f"expected {EXPECTED_PYTHON_VERSION}" + ) + + +def test_test_workflow_structure(): + """Validate the structure of test.yml workflow.""" + path = os.path.join(WORKFLOWS_DIR, "test.yml") + data = load_yaml(path) + + assert data["name"] == "Test", "test.yml should be named 'Test'" + + jobs = data["jobs"] + expected_jobs = ["unit-tests", "mypy-check", "black-check", "pylint-check"] + for job in expected_jobs: + assert job in jobs, f"test.yml missing required job: {job}" + + +def test_check_terraform_workflow_structure(): + """Validate the structure of check_terraform.yml workflow.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + assert data["name"] == "Static Terraform Check" + + jobs = data["jobs"] + assert "trivy" in jobs, "check_terraform.yml missing 'trivy' job" + assert "tflint" in jobs, "check_terraform.yml missing 'tflint' job" + + # Verify Trivy job steps + trivy_steps = jobs["trivy"]["steps"] + step_names = [step.get("name", step.get("uses", "")) for step in trivy_steps] + assert any("Trivy" in name for name in step_names), "Trivy job missing Trivy scan step" + + # Verify TFLint job steps + tflint_steps = jobs["tflint"]["steps"] + step_names = [step.get("name", step.get("uses", "")) for step in tflint_steps] + assert any("TFLint" in name for name in step_names), "TFLint job missing TFLint run step" + + +def test_check_terraform_workflow_triggers(): + """Validate triggers for check_terraform.yml.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + triggers = data["on"] + assert "pull_request" in triggers, "check_terraform.yml should trigger on pull_request" + assert "push" in triggers, "check_terraform.yml should trigger on push" + assert "workflow_dispatch" in triggers, "check_terraform.yml should support manual dispatch" + + # Verify path filters + pr_config = triggers["pull_request"] + assert "paths" in pr_config, "pull_request should have path filters" + assert "terraform/**" in pr_config["paths"], "Should filter for terraform directory" + + push_config = triggers["push"] + assert "paths" in push_config, "push should have path filters" + assert "terraform/**" in push_config["paths"], "Should filter for terraform directory" + + +def test_check_terraform_concurrency(): + """Verify concurrency settings in check_terraform.yml.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + assert "concurrency" in data, "check_terraform.yml should have concurrency settings" + concurrency = data["concurrency"] + assert "group" in concurrency, "Concurrency must specify a group" + assert "cancel-in-progress" in concurrency, "Concurrency should specify cancel-in-progress" + assert concurrency["cancel-in-progress"] is True, "cancel-in-progress should be true" + + +def test_check_terraform_permissions(): + """Verify permissions in check_terraform.yml.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + assert "permissions" in data, "check_terraform.yml should have permissions" + permissions = data["permissions"] + assert "contents" in permissions, "Must specify contents permission" + assert "security-events" in permissions, "Must specify security-events permission" + assert permissions["contents"] == "read", "Contents should have read permission" + assert permissions["security-events"] == "write", "Security events should have write permission" + + +def test_trivy_sarif_upload(): + """Ensure Trivy results are uploaded as SARIF.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + trivy_job = data["jobs"]["trivy"] + steps = trivy_job["steps"] + + # Find SARIF upload step + sarif_upload = None + for step in steps: + if "upload-sarif" in step.get("uses", "").lower(): + sarif_upload = step + break + + assert sarif_upload is not None, "Trivy job missing SARIF upload step" + assert "with" in sarif_upload, "SARIF upload step missing 'with' configuration" + assert "sarif_file" in sarif_upload["with"], "SARIF upload missing sarif_file parameter" + + +def test_tflint_sarif_upload(): + """Ensure TFLint results are uploaded as SARIF.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + tflint_job = data["jobs"]["tflint"] + steps = tflint_job["steps"] + + # Find SARIF upload step + sarif_upload = None + for step in steps: + if "upload-sarif" in step.get("uses", "").lower(): + sarif_upload = step + break + + assert sarif_upload is not None, "TFLint job missing SARIF upload step" + assert "with" in sarif_upload, "SARIF upload step missing 'with' configuration" + assert "sarif_file" in sarif_upload["with"], "SARIF upload missing sarif_file parameter" + + +def test_workflows_use_checkout_v4(workflow_files): + """Ensure workflows use actions/checkout@v4 or later.""" + for path in workflow_files: + data = load_yaml(path) + jobs = data.get("jobs", {}) + + for job_name, job_spec in jobs.items(): + steps = job_spec.get("steps", []) + for step in steps: + if isinstance(step.get("uses"), str) and "actions/checkout@" in step["uses"]: + version = step["uses"].split("@")[1] + major_version = version.split(".")[0].lstrip("v") + assert major_version.isdigit(), f"Invalid version format in {path}" + assert int(major_version) >= 4, ( + f"{path}: job '{job_name}' should use actions/checkout@v4 or later, " + f"found {step['uses']}" + ) + + +def test_check_pr_release_notes_structure(): + """Validate check_pr_release_notes.yml structure.""" + path = os.path.join(WORKFLOWS_DIR, "check_pr_release_notes.yml") + data = load_yaml(path) + + assert "on" in data + assert "pull_request" in data["on"] + + jobs = data["jobs"] + assert "check" in jobs, "check_pr_release_notes.yml missing 'check' job" + + check_job = jobs["check"] + steps = check_job["steps"] + + # Should setup Python + has_python_setup = any( + "setup-python" in step.get("uses", "") + for step in steps + ) + assert has_python_setup, "check job should setup Python" + + # Should use release notes check action + has_release_check = any( + "release-notes-presence-check" in step.get("uses", "") + for step in steps + ) + assert has_release_check, "check job should use release-notes-presence-check action" + + +def test_release_draft_structure(): + """Validate release_draft.yml structure.""" + path = os.path.join(WORKFLOWS_DIR, "release_draft.yml") + data = load_yaml(path) + + assert "on" in data + assert "push" in data["on"] + push_config = data["on"]["push"] + assert "tags" in push_config, "release_draft should trigger on tags" + + jobs = data["jobs"] + assert len(jobs) > 0, "release_draft.yml should have at least one job" + + +def test_workflow_runs_on_ubuntu(workflow_files): + """Ensure all jobs run on ubuntu-latest.""" + for path in workflow_files: + data = load_yaml(path) + jobs = data.get("jobs", {}) + + for job_name, job_spec in jobs.items(): + assert "runs-on" in job_spec, f"{path}: job '{job_name}' missing 'runs-on'" + runs_on = job_spec["runs-on"] + if isinstance(runs_on, str): + assert "ubuntu" in runs_on.lower(), ( + f"{path}: job '{job_name}' should run on Ubuntu" + ) + + +def test_workflows_have_valid_step_structure(workflow_files): + """Validate that all workflow steps have proper structure.""" + for path in workflow_files: + data = load_yaml(path) + jobs = data.get("jobs", {}) + + for job_name, job_spec in jobs.items(): + steps = job_spec.get("steps", []) + assert isinstance(steps, list), f"{path}: job '{job_name}' steps must be a list" + + for i, step in enumerate(steps): + assert isinstance(step, dict), ( + f"{path}: job '{job_name}' step {i} must be a mapping" + ) + # Each step should have either 'uses' or 'run' + has_uses = "uses" in step + has_run = "run" in step + assert has_uses or has_run, ( + f"{path}: job '{job_name}' step {i} must have 'uses' or 'run'" + ) + + +def test_trivy_severity_levels(): + """Verify Trivy scans for HIGH and CRITICAL severities.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + trivy_job = data["jobs"]["trivy"] + steps = trivy_job["steps"] + + # Find Trivy scan step + trivy_scan = None + for step in steps: + if step.get("name") and "Trivy" in step["name"] and "scan" in step["name"].lower(): + trivy_scan = step + break + + assert trivy_scan is not None, "Trivy scan step not found" + assert "run" in trivy_scan, "Trivy scan should be a run command" + + run_command = trivy_scan["run"] + assert "HIGH" in run_command or "CRITICAL" in run_command, ( + "Trivy should scan for HIGH and CRITICAL severities" + ) + + +def test_terraform_working_directory(): + """Verify Terraform checks use correct working directory.""" + path = os.path.join(WORKFLOWS_DIR, "check_terraform.yml") + data = load_yaml(path) + + jobs = data["jobs"] + + # Check Trivy job + trivy_steps = jobs["trivy"]["steps"] + for step in trivy_steps: + if "Trivy" in step.get("name", ""): + if "working-directory" in step: + assert step["working-directory"] == "terraform", ( + "Trivy should use terraform working directory" + ) + + # Check TFLint job + tflint_steps = jobs["tflint"]["steps"] + for step in tflint_steps: + if "TFLint" in step.get("name", ""): + if "working-directory" in step: + assert step["working-directory"] == "terraform", ( + "TFLint should use terraform working directory" + ) \ No newline at end of file diff --git a/tests/test_terraform_config.py b/tests/test_terraform_config.py new file mode 100644 index 0000000..2b22dc3 --- /dev/null +++ b/tests/test_terraform_config.py @@ -0,0 +1,254 @@ +# +# Copyright 2025 ABSA Group Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import re +from glob import glob +import pytest + +TERRAFORM_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "terraform") +EXPECTED_PYTHON_RUNTIME = "python3.11" + + +@pytest.fixture(scope="module") +def terraform_files(): + """Get all Terraform configuration files.""" + files = glob(os.path.join(TERRAFORM_DIR, "*.tf")) + assert files, "No Terraform files found" + return files + + +def test_terraform_directory_exists(): + """Ensure terraform directory exists.""" + assert os.path.exists(TERRAFORM_DIR), "terraform directory not found" + assert os.path.isdir(TERRAFORM_DIR), "terraform path is not a directory" + + +def test_lambda_tf_exists(): + """Ensure lambda.tf exists.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + assert os.path.exists(path), "lambda.tf not found" + + +def test_lambda_tf_python_runtime(): + """Verify Lambda function uses correct Python runtime.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Should have runtime specification + assert "runtime" in content, "lambda.tf should specify runtime" + + # Extract runtime value + runtime_match = re.search(r'runtime\s*=\s*"([^"]+)"', content) + assert runtime_match, "Could not find runtime value in lambda.tf" + + runtime = runtime_match.group(1) + assert runtime == EXPECTED_PYTHON_RUNTIME, ( + f"Lambda runtime should be {EXPECTED_PYTHON_RUNTIME}, found {runtime}" + ) + + +def test_lambda_tf_resource_structure(): + """Validate Lambda resource structure in lambda.tf.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Should have Lambda function resource + assert 'resource "aws_lambda_function"' in content, ( + "lambda.tf should define aws_lambda_function resource" + ) + + # Extract resource name + resource_match = re.search(r'resource "aws_lambda_function" "([^"]+)"', content) + assert resource_match, "Could not find Lambda function resource name" + + resource_name = resource_match.group(1) + assert resource_name, "Lambda function resource name should not be empty" + + +def test_lambda_tf_required_attributes(): + """Verify Lambda function has required attributes.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + required_attributes = [ + "function_name", + "handler", + "role", + "runtime", + "timeout", + ] + + for attr in required_attributes: + assert f"{attr}" in content, f"lambda.tf should specify {attr}" + + +def test_lambda_tf_architecture(): + """Verify Lambda function architecture setting.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Should specify architectures + assert "architectures" in content, "lambda.tf should specify architectures" + + # Should use x86_64 + assert "x86_64" in content, "Lambda should use x86_64 architecture" + + +def test_lambda_tf_timeout(): + """Verify Lambda function timeout is reasonable.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Extract timeout value + timeout_match = re.search(r'timeout\s*=\s*(\d+)', content) + assert timeout_match, "Could not find timeout value in lambda.tf" + + timeout = int(timeout_match.group(1)) + assert timeout > 0, "Lambda timeout must be positive" + assert timeout <= 900, "Lambda timeout cannot exceed 15 minutes (900 seconds)" + + +def test_lambda_tf_package_type(): + """Verify Lambda package_type is properly configured.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Should specify package_type + assert "package_type" in content, "lambda.tf should specify package_type" + + +def test_lambda_tf_deployment_config(): + """Verify Lambda has deployment configuration.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Should have S3 bucket configuration for Zip deployments + assert "s3_bucket" in content, "lambda.tf should have s3_bucket configuration" + + +def test_terraform_files_valid_syntax(terraform_files): + """Basic syntax validation for Terraform files.""" + for path in terraform_files: + with open(path, "r") as f: + content = f.read() + + # Should not be empty + assert content.strip(), f"{path} is empty" + + # Check for balanced braces + open_braces = content.count("{") + close_braces = content.count("}") + assert open_braces == close_braces, ( + f"{path} has unbalanced braces: {open_braces} open, {close_braces} close" + ) + + +def test_terraform_files_have_resources_or_variables(terraform_files): + """Ensure Terraform files define resources, variables, or outputs.""" + for path in terraform_files: + with open(path, "r") as f: + content = f.read() + + # File should have at least one of: resource, variable, output, data, locals + has_definition = any( + keyword in content + for keyword in ["resource ", "variable ", "output ", "data ", "locals "] + ) + assert has_definition, f"{path} should define resources, variables, or outputs" + + +def test_lambda_tf_variables_referenced(): + """Verify Lambda configuration uses variables where appropriate.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # Should use var. references for configuration + assert "var." in content, "lambda.tf should use variables for configuration" + + # Check for specific variable usage + expected_vars = [ + "var.lambda_role_arn", + "var.lambda_package_type", + ] + + for var in expected_vars: + assert var in content, f"lambda.tf should reference {var}" + + +def test_lambda_tf_conditional_s3_config(): + """Verify Lambda has conditional S3 configuration based on package type.""" + path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(path, "r") as f: + content = f.read() + + # S3 bucket should be conditional based on package_type + assert 's3_bucket' in content, "lambda.tf should have s3_bucket" + + # Should have ternary condition for Zip vs Image + s3_bucket_line = [line for line in content.split('\n') if 's3_bucket' in line] + assert s3_bucket_line, "Could not find s3_bucket line" + + s3_line = s3_bucket_line[0] + assert '?' in s3_line, "s3_bucket should use conditional expression" + assert 'Zip' in s3_line or 'zip' in s3_line, ( + "s3_bucket condition should check for Zip package type" + ) + + +def test_terraform_no_hardcoded_sensitive_values(terraform_files): + """Ensure no hardcoded sensitive values in Terraform files.""" + sensitive_patterns = [ + r'password\s*=\s*"[^"]+"', + r'secret\s*=\s*"[^"]+"', + r'api_key\s*=\s*"[^"]+"', + r'access_key\s*=\s*"[^"]+"', + ] + + for path in terraform_files: + with open(path, "r") as f: + content = f.read() + + for pattern in sensitive_patterns: + matches = re.findall(pattern, content, re.IGNORECASE) + assert not matches, ( + f"{path} contains hardcoded sensitive values: {matches}" + ) + + +def test_python_runtime_consistency(): + """Verify Python runtime in Terraform matches project configuration.""" + lambda_path = os.path.join(TERRAFORM_DIR, "lambda.tf") + with open(lambda_path, "r") as f: + content = f.read() + + runtime_match = re.search(r'runtime\s*=\s*"python(\d+\.\d+)"', content) + assert runtime_match, "Could not extract Python version from lambda.tf runtime" + + runtime_version = runtime_match.group(1) + + # Should match the expected Python version (3.11) + expected_version = "3.11" + assert runtime_version == expected_version, ( + f"Lambda runtime python{runtime_version} doesn't match expected python{expected_version}" + ) \ No newline at end of file