Skip to content

Fleet Mode (slum)

When one tenement isn’t enough, use slum - the multi-server fleet orchestrator.

slum (the neighborhood) coordinates tenement (the building) instances across a fleet of servers.

┌─────────────────────────────────────────┐
│ slum (Fleet Database + Router) │
├──────────────┬──────────────┬───────────┤
│ tenement │ tenement │ tenement │
│ (east-1) │ (west-1) │ (south-1) │
└──────────────┴──────────────┴───────────┘
use slum::{SlumDb, Server};
#[tokio::main]
async fn main() -> Result<()> {
// Create/open the fleet database
let db = SlumDb::init("slum.db").await?;
// Add buildings (tenement servers) to the fleet
db.add_server(&Server {
id: "east".into(),
url: "http://east.example.com".into(),
region: Some("us-east".into()),
capacity_mb: 1024,
..Default::default()
}).await?;
db.add_server(&Server {
id: "west".into(),
url: "http://west.example.com".into(),
region: Some("us-west".into()),
capacity_mb: 1024,
..Default::default()
}).await?;
Ok(())
}
use slum::{Tenant};
// Assign a tenant to a specific server
db.add_tenant(&Tenant {
domain: "customer-1.example.com".into(),
server_id: "east".into(),
process: "api".into(),
instance_id: "prod".into(),
..Default::default()
}).await?;
// Lookup where a tenant lives
let tenant = db.get_tenant_by_domain("customer-1.example.com").await?;
println!("Tenant is on server: {}", tenant.server_id);
// Spawn instance across fleet
db.spawn_instance(
"customer-2",
"api",
"prod",
"west" // target server
).await?;
// Stop instance (slum finds the right server)
db.stop_instance("customer-2:prod").await?;
// List all instances across all servers
let instances = db.list_all_instances().await?;
// Add server
db.add_server(&Server {
id: "east".into(),
url: "http://east.example.com".into(),
capacity_mb: 2048,
..Default::default()
}).await?;
// Update server
db.update_server(&updated_server).await?;
// List servers
let servers = db.list_servers().await?;
// Get server stats
let stats = db.get_server_stats("east").await?;
// Add tenant
db.add_tenant(&Tenant {
domain: "customer.example.com".into(),
server_id: "east".into(),
process: "api".into(),
instance_id: "prod".into(),
..Default::default()
}).await?;
// Get tenant
let tenant = db.get_tenant_by_domain("customer.example.com").await?;
// List tenants on server
let tenants = db.list_tenants_on_server("east").await?;
// Reassign tenant to different server
db.update_tenant(&updated_tenant).await?;
// Spawn on specific server
db.spawn_instance(
"customer-3",
"api",
"prod",
"east"
).await?;
// Stop (slum finds server automatically)
db.stop_instance("customer-3:prod").await?;
// List instances on server
let instances = db.list_instances_on_server("east").await?;
// Get instance status
let status = db.get_instance_status("customer-3:prod").await?;

Route tenants to nearest server:

let tenant = db.get_tenant_by_domain("customer.example.com").await?;
if let Some(region) = customer.preferred_region {
// Find server in that region
let server = db.find_server_in_region(&region).await?;
// Reassign tenant
tenant.server_id = server.id;
db.update_tenant(&tenant).await?;
}

Distribute new tenants based on server capacity:

let servers = db.list_servers().await?;
let least_used = servers
.iter()
.min_by_key(|s| s.used_capacity_mb())
.expect("No servers available");
db.spawn_instance(
&new_customer_id,
"api",
"prod",
&least_used.id
).await?;

Mirror tenants across servers:

// Primary instance
db.spawn_instance("customer-4", "api", "prod", "east").await?;
// Backup instance
db.spawn_instance("customer-4", "api", "backup", "west").await?;
// Route to primary, fail over to backup if needed
FieldTypeDescription
idStringUnique server ID (e.g., “east-1”)
urlStringServer URL (e.g., “http://east-1.example.com”)
regionOptionRegion (e.g., “us-east”)
capacity_mbi64Total available memory (MB)
created_atDateTimeCreation timestamp
FieldTypeDescription
domainStringCustomer domain (e.g., “acme.example.com”)
server_idStringHome server ID
processStringProcess name (e.g., “api”)
instance_idStringInstance ID (e.g., “prod”)
created_atDateTimeCreation timestamp

slum enforces referential integrity:

  • Tenant’s server_id must exist
  • Can’t delete server with active tenants
  • Can’t update non-existent tenant