Skip to content

Extending Ash

Extend Ash with custom tools, skills, providers, and agents.

Adding Tools

Tools are Python classes that implement the Tool interface.

Tool Interface

from abc import ABC, abstractmethod
from typing import Any
from ash.tools.base import Tool, ToolContext, ToolResult
class MyTool(Tool):
@property
def name(self) -> str:
return "my_tool"
@property
def description(self) -> str:
return "Description shown to the LLM"
@property
def input_schema(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query.",
},
"limit": {
"type": "integer",
"description": "Max results.",
"default": 10,
},
},
"required": ["query"],
}
async def execute(
self,
input_data: dict[str, Any],
context: ToolContext,
) -> ToolResult:
query = input_data.get("query")
limit = input_data.get("limit", 10)
# Implementation
result = await do_something(query, limit)
return ToolResult.success(result)

Registering Tools

Add to the tool registry in your application:

from ash.tools.registry import ToolRegistry
registry = ToolRegistry()
registry.register(MyTool())

Adding Skills

Skills are instruction files that the agent reads and follows. They use markdown with YAML frontmatter.

Skill Format

Create ~/.ash/workspace/skills/my-skill/SKILL.md:

---
description: What this skill does
requires:
bins:
- curl
env:
- API_KEY
tools:
- bash
- web_fetch
---
## Instructions
Step-by-step instructions the agent will follow...
1. First, check if the API key is available
2. Use bash to call the API
3. Parse the response and format the output

Frontmatter Fields

FieldTypeRequiredDescription
descriptionstringYesOne-line description
requiresobjectNoSystem requirements
toolslistNoTools the skill needs
input_schemaobjectNoJSON Schema for inputs

Requirements

requires:
bins: # Required binaries (must be in PATH)
- curl
- jq
env: # Required environment variables
- API_KEY
os: # Supported operating systems
- darwin
- linux

If requirements aren’t met, the skill is hidden from the agent.

Using the CLI

Terminal window
# Create a new skill
uv run ash skill init my-skill -d "Do something useful"
# Validate a skill file
uv run ash skill validate ~/.ash/workspace/skills/my-skill
# List available skills
uv run ash skill list

Adding Providers

Providers handle communication channels (Telegram, CLI, etc.).

Provider Interface

from ash.providers.base import Provider, IncomingMessage, OutgoingMessage
class MyProvider(Provider):
@property
def name(self) -> str:
return "my_provider"
async def start(self, handler) -> None:
"""Start receiving messages."""
# Set up message reception
# Call handler(message) for each incoming message
async def stop(self) -> None:
"""Clean up resources."""
async def send(self, message: OutgoingMessage) -> str:
"""Send message, return message ID."""
async def send_streaming(
self,
chat_id: str,
stream,
reply_to=None,
) -> str:
"""Stream response with live updates."""
async def edit(self, message_id: str, content: str) -> None:
"""Edit existing message."""
async def delete(self, message_id: str) -> None:
"""Delete message."""

Example: Discord Provider

import discord
from ash.providers.base import Provider
class DiscordProvider(Provider):
def __init__(self, token: str, allowed_channels: list[str]):
self.client = discord.Client()
self.token = token
self.allowed_channels = allowed_channels
self.handler = None
async def start(self, handler) -> None:
self.handler = handler
@self.client.event
async def on_message(message):
if message.author.bot:
return
if str(message.channel.id) not in self.allowed_channels:
return
incoming = IncomingMessage(
id=str(message.id),
chat_id=str(message.channel.id),
user_id=str(message.author.id),
content=message.content,
)
await handler(incoming)
await self.client.start(self.token)
async def send(self, message: OutgoingMessage) -> str:
channel = self.client.get_channel(int(message.chat_id))
sent = await channel.send(message.content)
return str(sent.id)

Adding LLM Providers

For new LLM backends:

from ash.llm.base import LLMProvider
class MyLLMProvider(LLMProvider):
@property
def name(self) -> str:
return "my_llm"
async def complete(self, messages, **kwargs):
# Call your LLM API
pass
async def stream(self, messages, **kwargs):
# Yield streaming chunks
pass
async def embed(self, texts, **kwargs):
# Generate embeddings
pass

Register in src/ash/llm/registry.py.

Adding Agents

Agents are specialized sub-processes that run in isolated LLM loops with restricted tool access.

Agent Interface

Location: src/ash/agents/base.py

from abc import ABC, abstractmethod
from ash.agents.base import Agent, AgentConfig, AgentContext, AgentResult
class MyAgent(Agent):
@property
def config(self) -> AgentConfig:
return AgentConfig(
name="my-agent",
description="What this agent does",
system_prompt="You are a specialized agent that...",
model="default", # Model alias from config
max_iterations=25,
tools=["bash", "web_search"], # Restrict tools
)
async def execute(
self,
message: str,
context: AgentContext,
) -> AgentResult:
# Run agent logic using self.llm_provider and self.tools
return AgentResult(content="Result", is_error=False)

Built-in Agents

AgentPurpose
researchMulti-step research tasks
skill-writerCreates new skills autonomously

Agent Configuration

Override agent settings in config.toml:

[agents.research]
model = "sonnet" # Use sonnet model for research
max_iterations = 50