diff --git a/README.md b/README.md index da38a18d3..41de580d6 100644 --- a/README.md +++ b/README.md @@ -330,6 +330,15 @@ user_data_collection: **Important**: Only MCP servers defined in the `lightspeed-stack.yaml` configuration are available to the agents. Tools configured in the llama-stack `run.yaml` are not accessible to lightspeed-core agents. +Besides configuring the MCP Servers in `lightspeed-stack.yaml` we also need to enable the appropriate tool in llama-stack's `run.yaml` file under the `tool_runtime` section. Here's an example using the default `provider_id` name used by lightspeed-stack for MCPs: + +```yaml + tool_runtime: + - provider_id: model-context-protocol + provider_type: remote::model-context-protocol + config: {} +``` + #### Configuring MCP Servers MCP (Model Context Protocol) servers provide tools and capabilities to the AI agents. These are configured in the `mcp_servers` section of your `lightspeed-stack.yaml`. @@ -377,7 +386,7 @@ The secret files should contain only the header value (tokens are automatically ```bash # /var/secrets/api-token -Bearer sk-abc123def456... +sk-abc123def456... # /var/secrets/api-key my-api-key-value diff --git a/examples/run.yaml b/examples/run.yaml index 9d8ed9fac..e4951ee2b 100644 --- a/examples/run.yaml +++ b/examples/run.yaml @@ -1,9 +1,9 @@ # Example llama-stack configuration for OpenAI inference + FAISS (RAG) -# +# # Notes: # - You will need an OpenAI API key # - You can generate the vector index with the rag-content tool (https://github.com/lightspeed-core/rag-content) -# +# version: 2 apis: @@ -17,7 +17,7 @@ apis: - scoring - tool_runtime - vector_io - + benchmarks: [] datasets: [] image_name: starter @@ -61,6 +61,9 @@ providers: - config: {} # Enable the RAG tool provider_id: rag-runtime provider_type: inline::rag-runtime + - config: {} # Enable the MCP tool + provider_id: model-context-protocol + provider_type: remote::model-context-protocol vector_io: - config: # Define the storage backend for RAG persistence: @@ -144,7 +147,7 @@ registered_resources: provider_model_id: sentence-transformers/all-mpnet-base-v2 metadata: embedding_dimension: 768 - vector_stores: + vector_stores: - embedding_dimension: 768 embedding_model: sentence-transformers/nomic-ai/nomic-embed-text-v1.5 provider_id: faiss @@ -167,4 +170,4 @@ vector_stores: safety: default_shield_id: llama-guard telemetry: - enabled: true \ No newline at end of file + enabled: true