Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 3 additions & 33 deletions .skills-proposals/discovered-skills.json
Original file line number Diff line number Diff line change
@@ -1,36 +1,6 @@
{
"generated_at": "2025-11-21T14:17:47.941871",
"skill_count": 2,
"generated_at": "2025-11-23T00:18:53.479358",
"skill_count": 0,
"min_confidence": 70,
"discovered_skills": [
{
"skill_id": "setup-claude-md",
"name": "Setup CLAUDE.md Configuration",
"description": "Create comprehensive CLAUDE.md files with tech stack, standard commands, repository structure, and boundaries to optimize repositories for AI-assisted development",
"confidence": 100.0,
"source_attribute_id": "claude_md_file",
"reusability_score": 100.0,
"impact_score": 50.0,
"pattern_summary": "Project-specific configuration for Claude Code",
"code_examples": [
"CLAUDE.md found at /Users/jeder/repos/agentready/CLAUDE.md"
],
"citations": []
},
{
"skill_id": "implement-type-annotations",
"name": "Implement Type Annotations",
"description": "Add comprehensive type hints to Python/TypeScript code to improve IDE support, catch errors early, and enable better AI code understanding",
"confidence": 100.0,
"source_attribute_id": "type_annotations",
"reusability_score": 100.0,
"impact_score": 50.0,
"pattern_summary": "Type hints in function signatures",
"code_examples": [
"Typed functions: 180/186",
"Coverage: 96.8%"
],
"citations": []
}
]
"discovered_skills": []
}
29 changes: 17 additions & 12 deletions src/agentready/cli/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,22 +76,27 @@ def align(repository, dry_run, attributes, interactive):
# Step 1: Run assessment
click.echo("📊 Running assessment...")
try:
# Create repository model
detector = LanguageDetector(repo_path)
languages = detector.detect_languages()

repo = Repository(
path=repo_path,
languages=languages,
metadata={},
)

# Load config
config = Config.load_default()

# Create scanner
scanner = Scanner(repo_path, config)
Comment on lines 79 to +83
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Define repository before generating fix plan

The align command now skips constructing a Repository and only instantiates Scanner and assessors (lines 79-83), but the subsequent call to fixer_service.generate_fix_plan(assessment, repo, attribute_list) still passes repo, which is never defined anymore. After a scan completes, running agentready align will throw a NameError instead of producing a fix plan for any repository. A repository object (e.g., from the scanner’s assessment) needs to be created or reused before generating the plan.

Useful? React with 👍 / 👎.


# Create assessors
from agentready.assessors import create_all_assessors

assessors = create_all_assessors()

# Filter assessors if specific attributes requested
if attributes:
attr_set = set(attributes.split(","))
assessors = [a for a in assessors if a.attribute_id in attr_set]

# Run assessment
scanner = Scanner(config=config)
assessment = scanner.scan(repo)
from agentready.cli.main import get_agentready_version

version = get_agentready_version()
assessment = scanner.scan(assessors, verbose=False, version=version)

current_level, current_emoji = get_certification_level(assessment.overall_score)

Expand Down
1 change: 1 addition & 0 deletions tests/fixtures/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Shared test fixtures for AgentReady tests."""
143 changes: 143 additions & 0 deletions tests/fixtures/assessment_fixtures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
"""Shared test fixtures for creating valid Assessment JSON data.

This module provides factory functions for generating valid Assessment and Finding
JSON structures that match the current schema (v1.0.0).

The fixtures ensure consistency across tests and prevent schema breakage.
"""


def create_test_assessment_json(
overall_score=85.0,
num_findings=2,
repo_path="/tmp/test",
repo_name="test-repo",
certification_level=None,
):
"""Create valid Assessment JSON matching current schema.

Args:
overall_score: Score between 0-100
num_findings: Number of findings to include (must match attributes_total)
repo_path: Repository path
repo_name: Repository name
certification_level: Override certification level (auto-calculated if None)

Returns:
dict: Valid Assessment JSON that can be serialized and loaded
"""
if certification_level is None:
# Auto-calculate certification level
if overall_score >= 90:
certification_level = "Platinum"
elif overall_score >= 75:
certification_level = "Gold"
elif overall_score >= 60:
certification_level = "Silver"
elif overall_score >= 40:
certification_level = "Bronze"
else:
certification_level = "Needs Improvement"

findings = []
for i in range(num_findings):
status = "pass" if overall_score >= 60 else "fail"
score = overall_score if status == "pass" else max(0, overall_score - 10)
findings.append(
create_test_finding_json(
attribute_id=f"test_attr_{i}",
attribute_name=f"Test Attribute {i}",
status=status,
score=score,
)
)

return {
"schema_version": "1.0.0",
"timestamp": "2025-11-22T06:00:00",
"repository": {
"name": repo_name,
"path": repo_path,
"url": None,
"branch": "main",
"commit_hash": "abc123",
"languages": {"Python": 100},
"total_files": 10,
"total_lines": 500,
},
"overall_score": overall_score,
"certification_level": certification_level,
"attributes_assessed": num_findings,
"attributes_not_assessed": 0,
"attributes_total": num_findings,
"findings": findings,
"config": None, # CRITICAL: Must be present in current schema
"duration_seconds": 1.5,
"discovered_skills": [], # Optional but good to include
}


def create_test_finding_json(
attribute_id="test_attr",
attribute_name="Test Attribute",
status="pass",
score=90.0,
category="Documentation",
tier=1,
):
"""Create valid Finding JSON.

Args:
attribute_id: Unique attribute identifier
attribute_name: Human-readable attribute name
status: pass, fail, skipped, error, or not_applicable
score: Score 0-100 (or None for non-pass/fail statuses)
category: Attribute category
tier: Attribute tier (1-4)

Returns:
dict: Valid Finding JSON
"""
return {
"attribute": {
"id": attribute_id,
"name": attribute_name,
"category": category,
"tier": tier,
"description": f"Test description for {attribute_name}",
"criteria": "Test criteria",
"default_weight": 1.0,
},
"status": status,
"score": score if status in ("pass", "fail") else None,
"measured_value": "present" if status == "pass" else "missing",
"threshold": "present",
"evidence": [f"Test evidence for {attribute_name}"],
"error_message": None if status != "error" else "Test error",
}


def create_test_repository_json(path="/tmp/test", name="test-repo", languages=None):
"""Create valid Repository JSON.

Args:
path: Repository path
name: Repository name
languages: Language breakdown dict (default: {"Python": 100})

Returns:
dict: Valid Repository JSON
"""
if languages is None:
languages = {"Python": 100}

return {
"name": name,
"path": path,
"url": None,
"branch": "main",
"commit_hash": "abc123",
"languages": languages,
"total_files": 10,
"total_lines": 500,
}
74 changes: 15 additions & 59 deletions tests/unit/test_cli_learn.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from click.testing import CliRunner

from agentready.cli.learn import learn
from tests.fixtures.assessment_fixtures import create_test_assessment_json


@pytest.fixture
Expand All @@ -24,46 +25,13 @@ def temp_repo():
agentready_dir = repo_path / ".agentready"
agentready_dir.mkdir()

# Create sample assessment
assessment_data = {
"schema_version": "1.0.0",
"timestamp": "2025-11-22T06:00:00",
"repository": {
"name": "test-repo",
"path": str(repo_path),
"url": None,
"branch": "main",
"commit_hash": "abc123",
"languages": {"Python": 100},
"total_files": 5,
"total_lines": 100,
},
"overall_score": 85.0,
"certification_level": "Gold",
"attributes_assessed": 2,
"attributes_not_assessed": 0,
"attributes_total": 2,
"findings": [
{
"attribute": {
"id": "claude_md_file",
"name": "CLAUDE.md File",
"category": "Documentation",
"tier": 1,
"description": "Test attribute",
"criteria": "Must exist",
"default_weight": 1.0,
},
"status": "pass",
"score": 100.0,
"measured_value": "present",
"threshold": "present",
"evidence": ["CLAUDE.md exists"],
"error_message": None,
}
],
"duration_seconds": 1.5,
}
# Create sample assessment using shared fixture
assessment_data = create_test_assessment_json(
overall_score=85.0,
num_findings=2,
repo_path=str(repo_path),
repo_name="test-repo",
)

assessment_file = agentready_dir / "assessment-latest.json"
with open(assessment_file, "w") as f:
Expand Down Expand Up @@ -301,25 +269,13 @@ def test_learn_command_default_repository(self, runner):
agentready_dir = Path(".agentready")
agentready_dir.mkdir()

# Create minimal assessment
assessment_data = {
"schema_version": "1.0.0",
"timestamp": "2025-11-22T06:00:00",
"repository": {
"name": "test",
"path": ".",
"languages": {"Python": 100},
"total_files": 1,
"total_lines": 10,
},
"overall_score": 75.0,
"certification_level": "Gold",
"attributes_assessed": 1,
"attributes_not_assessed": 0,
"attributes_total": 1,
"findings": [],
"duration_seconds": 1.0,
}
# Create minimal assessment using shared fixture
assessment_data = create_test_assessment_json(
overall_score=75.0,
num_findings=1,
repo_path=".",
repo_name="test",
)

with open(agentready_dir / "assessment-latest.json", "w") as f:
json.dump(assessment_data, f)
Expand Down
Loading