From f4f05519a072aa5399f50aa559f60fca358cd5b7 Mon Sep 17 00:00:00 2001 From: precondition <57645186+precondition@users.noreply.github.com> Date: Mon, 13 Jan 2025 14:31:46 +0100 Subject: [PATCH] Set unlimited depth within the domain and reduce verbosity --- .github/workflows/broken-links-crawler.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/broken-links-crawler.yml b/.github/workflows/broken-links-crawler.yml index f19282a8..aa24db3e 100644 --- a/.github/workflows/broken-links-crawler.yml +++ b/.github/workflows/broken-links-crawler.yml @@ -18,10 +18,10 @@ jobs: id: check-broken-links uses: ScholliYT/Broken-Links-Crawler-Action@v3.3.1 with: - # We would need a high `max_depth` for the crawler to naturally find all the keymapdb pages but we do not want the crawler to go deep - # in unrelated websites, so in order to keep the `max_depth` low but the keymapdb page coverage high, we manually list them here. - website_url: "https://keymapdb.com,http://keymapdb.com/page/2/,http://keymapdb.com/page/3/,http://keymapdb.com/page/3/,http://keymapdb.com/page/4/,http://keymapdb.com/page/5/,http://keymapdb.com/page/6/,http://keymapdb.com/page/7/" + website_url: "https://keymapdb.com" exclude_url_prefix: "/assets,https://mechdb.net" max_retries: 2 - max_depth: 3 - verbose: debug + # The crawler stops going deeper once it leaves the keymapdb domain + # so it won't accidentally crawl the whole web even if you put -1. + max_depth: -1 + verbose: true