From 0726376955f38f60db7a51b862aafb6992dab866 Mon Sep 17 00:00:00 2001 From: epi052 <43392618+epi052@users.noreply.github.com> Date: Sat, 26 Dec 2020 19:11:58 -0600 Subject: [PATCH] started documentation, fixed scanner option/result --- .github/workflows/build.yml | 2 +- README.md | 5 +++++ ferox-config.toml.example | 1 + src/scanner.rs | 2 +- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8d1bbee..c7480ea 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,7 +5,7 @@ on: [push] jobs: build-nix: runs-on: ${{ matrix.os }} - if: github.ref == 'refs/heads/master' +# if: github.ref == 'refs/heads/master' strategy: matrix: type: [ubuntu-x64, ubuntu-x86] diff --git a/README.md b/README.md index 2a35050..084c7b0 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,8 @@ This attack is also known as Predictable Resource Location, File Enumeration, Di - [Filter Response Using a Regular Expression (new in `v1.8.0`)](#filter-response-using-a-regular-expression-new-in-v180) - [Stop and Resume Scans (save scan's state to disk) (new in `v1.9.0`)](#stop-and-resume-scans---resume-from-file-new-in-v190) - [Enforce a Time Limit on Your Scan (new in `v1.10.0`)](#enforce-a-time-limit-on-your-scan-new-in-v1100) + - [Extract Links from robots.txt (New in `v1.10.2`)](#extract-links-from-robotstxt-new-in-v1102) + - [Filter Response by Similarity to A Given Page (new in `v1.11.0`)](#filter-response-by-similarity-to-a-given-page-new-in-v1110) - [Comparison w/ Similar Tools](#-comparison-w-similar-tools) - [Common Problems/Issues (FAQ)](#-common-problemsissues-faq) - [No file descriptors available](#no-file-descriptors-available) @@ -352,6 +354,7 @@ A pre-made configuration file with examples of all available settings can be fou # depth = 1 # filter_size = [5174] # filter_regex = ["^ignore me$"] +# filter_similar = ["https://somesite.com/soft404"] # filter_word_count = [993] # filter_line_count = [35, 36] # queries = [["name","value"], ["rick", "astley"]] @@ -658,6 +661,8 @@ In addition to [extracting links from the response body](#extract-links-from-res `--extract-links` makes a request to `/robots.txt` and examines all `Allow` and `Disallow` entries. Directory entries are added to the scan queue, while file entries are requested and then reported if appropriate. +### Filter Response by Similarity to A Given Page (new in `v1.11.0`) + ## 🧐 Comparison w/ Similar Tools There are quite a few similar tools for forced browsing/content discovery. Burp Suite Pro, Dirb, Dirbuster, etc... diff --git a/ferox-config.toml.example b/ferox-config.toml.example index 59a3d56..bfc801c 100644 --- a/ferox-config.toml.example +++ b/ferox-config.toml.example @@ -33,6 +33,7 @@ # depth = 1 # filter_size = [5174] # filter_regex = ["^ignore me$"] +# filter_similar = ["https://somesite.com/soft404"] # filter_word_count = [993] # filter_line_count = [35, 36] # queries = [["name","value"], ["rick", "astley"]] diff --git a/src/scanner.rs b/src/scanner.rs index 0e745ce..da5d8bb 100644 --- a/src/scanner.rs +++ b/src/scanner.rs @@ -676,7 +676,7 @@ pub async fn initialize(num_words: usize, config: &Configuration) { // if successful, create a filter based on the response's body let fr = FeroxResponse::from(resp, true).await; - if let Ok(hash) = ssdeep::hash(fr.text().as_bytes()) { + if let Some(hash) = ssdeep::hash(fr.text().as_bytes()) { // hash the response body and store the resulting has in the filter object let filter = SimilarityFilter { text: hash,