Auto-archive stale HubSpot deals using code and cron
Prerequisites
- Node.js 18+ or Python 3.9+
- HubSpot private app token (scopes:
crm.objects.deals.read,crm.objects.deals.write) - Slack Bot Token (
xoxb-...) withchat:writescope - A scheduling environment: cron, GitHub Actions, or a cloud function
Overview
This uses a two-phase approach with a state file to handle the 48-hour grace period:
- Phase 1 (runs daily): Finds stale deals, warns owners in Slack, writes deal IDs and timestamps to a state file
- Phase 2 (runs on the same schedule): Checks the state file for deals warned 48+ hours ago, verifies they haven't been updated, and closes them
Both phases run in the same script on every execution. Phase 2 processes previously warned deals while Phase 1 finds new ones.
Step 1: Set up the project
mkdir stale-deal-archiver && cd stale-deal-archiver
python -m venv venv && source venv/bin/activate
pip install requests slack_sdkStep 2: Build the stale deal search
import os, json, requests
from datetime import datetime, timedelta, timezone
from pathlib import Path
from slack_sdk import WebClient
HUBSPOT_TOKEN = os.environ["HUBSPOT_TOKEN"]
HEADERS = {"Authorization": f"Bearer {HUBSPOT_TOKEN}", "Content-Type": "application/json"}
slack = WebClient(token=os.environ["SLACK_BOT_TOKEN"])
CHANNEL = os.environ["SLACK_CHANNEL_ID"]
STATE_FILE = Path(__file__).parent / "warned_deals.json"
STALE_DAYS = 60
GRACE_HOURS = 48
def load_state():
if STATE_FILE.exists():
return json.loads(STATE_FILE.read_text())
return {}
def save_state(state):
STATE_FILE.write_text(json.dumps(state, indent=2))
def search_stale_deals():
"""Find active deals with no activity for STALE_DAYS+ days."""
cutoff_ms = int((datetime.now(timezone.utc) - timedelta(days=STALE_DAYS)).timestamp() * 1000)
all_deals = []
after = 0
while True:
resp = requests.post(
"https://api.hubapi.com/crm/v3/objects/deals/search",
headers=HEADERS,
json={
"filterGroups": [{"filters": [
{"propertyName": "hs_lastmodifieddate", "operator": "LT",
"value": str(cutoff_ms)},
{"propertyName": "dealstage", "operator": "NOT_IN",
"values": ["closedwon", "closedlost"]}
]}],
"properties": ["dealname", "amount", "dealstage",
"hubspot_owner_id", "hs_lastmodifieddate"],
"sorts": [{"propertyName": "hs_lastmodifieddate",
"direction": "ASCENDING"}],
"limit": 100,
"after": after,
},
)
resp.raise_for_status()
data = resp.json()
all_deals.extend(data["results"])
if data.get("paging", {}).get("next"):
after = data["paging"]["next"]["after"]
else:
break
return all_dealsStep 3: Phase 2 — close deals past the grace period
This runs first on each execution, processing any deals that were warned in a previous run.
def close_expired_warnings(state):
"""Close deals warned 48+ hours ago that haven't been updated."""
now = datetime.now(timezone.utc)
closed = []
still_waiting = {}
for deal_id, info in state.items():
warned_at = datetime.fromisoformat(info["warned_at"])
hours_since_warning = (now - warned_at).total_seconds() / 3600
if hours_since_warning < GRACE_HOURS:
still_waiting[deal_id] = info
continue
# Re-fetch the deal to check if it was updated
resp = requests.get(
f"https://api.hubapi.com/crm/v3/objects/deals/{deal_id}",
headers=HEADERS,
params={"properties": "hs_lastmodifieddate,dealstage"}
)
if resp.status_code == 404:
continue # deal was deleted
resp.raise_for_status()
deal = resp.json()
# Skip if the deal was updated after the warning
last_mod = datetime.fromisoformat(
deal["properties"]["hs_lastmodifieddate"].replace("Z", "+00:00"))
if last_mod > warned_at:
print(f" Skipping {info['name']} — updated since warning")
continue
# Skip if already closed
if deal["properties"]["dealstage"] in ("closedwon", "closedlost"):
continue
# Close it
close_resp = requests.patch(
f"https://api.hubapi.com/crm/v3/objects/deals/{deal_id}",
headers=HEADERS,
json={"properties": {
"dealstage": "closedlost",
"closed_lost_reason": "Stale — auto-archived after 60 days"
}}
)
close_resp.raise_for_status()
closed.append(info["name"])
print(f" Closed: {info['name']}")
return closed, still_waitingStep 4: Phase 1 — warn new stale deals
def warn_new_deals(deals, state):
"""Send Slack warnings for newly discovered stale deals."""
warned = []
for deal in deals:
deal_id = deal["id"]
if deal_id in state:
continue # already warned
props = deal["properties"]
name = props["dealname"]
amount = float(props.get("amount") or 0)
last_mod = datetime.fromisoformat(
props["hs_lastmodifieddate"].replace("Z", "+00:00"))
days_stale = (datetime.now(timezone.utc) - last_mod).days
slack.chat_postMessage(
channel=CHANNEL,
text=f"Stale deal warning: {name}",
blocks=[
{"type": "section", "text": {"type": "mrkdwn", "text":
f":warning: *Stale Deal Warning*\n\n"
f"*{name}* (${amount:,.0f}) has had no activity "
f"for {days_stale} days.\n\n"
f"This deal will be moved to *Closed Lost* in 48 hours "
f"unless someone updates it in HubSpot."
}},
],
unfurl_links=False,
)
state[deal_id] = {
"name": name,
"warned_at": datetime.now(timezone.utc).isoformat(),
}
warned.append(name)
print(f" Warned: {name} ({days_stale}d stale)")
return warnedStep 5: Wire it all together
def main():
print(f"=== Stale Deal Archiver — {datetime.now().isoformat()} ===")
# Phase 2: Close deals past grace period
state = load_state()
print(f"\nPhase 2: Checking {len(state)} previously warned deals...")
closed, remaining = close_expired_warnings(state)
# Phase 1: Find and warn new stale deals
print("\nPhase 1: Searching for stale deals...")
stale_deals = search_stale_deals()
print(f" Found {len(stale_deals)} stale deals")
warned = warn_new_deals(stale_deals, remaining)
# Save updated state
save_state(remaining)
# Summary
print(f"\nDone — warned: {len(warned)}, closed: {len(closed)}, "
f"waiting: {len(remaining)}")
# Post summary to Slack if any deals were closed
if closed:
slack.chat_postMessage(
channel=CHANNEL,
text=f"Auto-archived {len(closed)} stale deals",
blocks=[
{"type": "section", "text": {"type": "mrkdwn", "text":
f"🗄️ *{len(closed)} Deals Auto-Archived*\n"
+ "\n".join(f"• {name}" for name in closed)
+ "\n\nThese deals had no activity for 60+ days and "
"no objection during the 48-hour grace period."
}},
],
)
if __name__ == "__main__":
main()The warned_deals.json file must persist between runs. If you use GitHub Actions, the state file won't survive across runs since each run gets a fresh checkout. Use a GitHub Artifact, an S3 bucket, or a database (SQLite, Redis) instead. For a simple fix, commit the state file to the repo — the action can push updates back.
Step 6: Schedule
Cron (server-based):
# Run daily at 7 AM
0 7 * * * cd /path/to/stale-deal-archiver && python archiver.py >> /var/log/stale-archiver.log 2>&1GitHub Actions:
name: Stale Deal Archiver
on:
schedule:
- cron: '0 12 * * *' # 7 AM ET = 12 PM UTC
workflow_dispatch: {}
jobs:
archive:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install requests slack_sdk
- name: Download state
uses: actions/download-artifact@v4
with:
name: archiver-state
path: .
continue-on-error: true # first run has no state
- run: python archiver.py
env:
HUBSPOT_TOKEN: ${{ secrets.HUBSPOT_TOKEN }}
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
- name: Upload state
uses: actions/upload-artifact@v4
with:
name: archiver-state
path: warned_deals.json
overwrite: trueGitHub Actions runs in a fresh environment each time. The artifact upload/download steps persist the warned_deals.json state file between runs, so Phase 2 can check for deals warned in previous executions.
Cost
- Free — GitHub Actions provides 2,000 minutes/month on the free tier. This script runs in under 30 seconds per execution.
Need help implementing this?
We build and optimize automation systems for mid-market businesses. Let's discuss the right approach for your team.