452 lines
17 KiB
YAML
452 lines
17 KiB
YAML
name: 'Cairn Zig Fuzz (AFL++)'
|
|
description: 'Build and run Zig AFL++ fuzz targets, reporting crashes and corpus to Cairn. Each target is built twice: `zig build fuzz -Dfuzz-target=<name>` (AFL-instrumented) and `zig build fuzz-replay -Dfuzz-target=<name>` (plain, for crash replay).'
|
|
|
|
inputs:
|
|
cairn_server:
|
|
description: 'Cairn server URL'
|
|
required: true
|
|
repo:
|
|
description: 'Repository name'
|
|
required: true
|
|
owner:
|
|
description: 'Repository owner'
|
|
required: true
|
|
commit:
|
|
description: 'Commit SHA'
|
|
required: false
|
|
default: '${{ github.sha }}'
|
|
targets:
|
|
description: |
|
|
Line-delimited fuzz target names. Each line is passed as -Dfuzz-target=<name>.
|
|
Example:
|
|
lexer
|
|
parser
|
|
varint_decode
|
|
required: true
|
|
fuzz_binary:
|
|
description: 'Binary name in the build output bin/ directory (auto-detected if only one)'
|
|
required: false
|
|
default: ''
|
|
seed_dir:
|
|
description: |
|
|
Directory containing per-target seed inputs. Each subdirectory should match
|
|
a target name from the targets input. Files are merged with the downloaded corpus.
|
|
Example layout:
|
|
seeds/lexer/valid-token.txt
|
|
seeds/parser/minimal-program.txt
|
|
required: false
|
|
default: ''
|
|
duration:
|
|
description: 'Fuzz duration per target in seconds'
|
|
required: false
|
|
default: '600'
|
|
afl_args:
|
|
description: 'Extra afl-fuzz arguments'
|
|
required: false
|
|
default: ''
|
|
target:
|
|
description: 'Target platform metadata'
|
|
required: false
|
|
default: ''
|
|
cairn_version:
|
|
description: 'Cairn CLI version'
|
|
required: false
|
|
default: 'latest'
|
|
|
|
runs:
|
|
using: 'composite'
|
|
steps:
|
|
- name: Setup AFL++
|
|
uses: https://git.ts.mattnite.net/mattnite/cairn/actions/setup-afl@main
|
|
|
|
- name: Check prerequisites
|
|
shell: sh
|
|
run: |
|
|
set -eu
|
|
command -v zig >/dev/null 2>&1 || { echo "ERROR: zig not found in PATH. Install Zig before using this action (e.g. https://codeberg.org/mlugg/setup-zig@v2)."; exit 1; }
|
|
command -v afl-cc >/dev/null 2>&1 || { echo "ERROR: afl-cc not found in PATH after setup-afl step."; exit 1; }
|
|
echo "zig $(zig version), afl-cc found"
|
|
|
|
- name: Setup Cairn CLI
|
|
uses: https://git.ts.mattnite.net/mattnite/cairn/actions/setup-cairn@main
|
|
with:
|
|
version: ${{ inputs.cairn_version }}
|
|
server_url: ${{ inputs.cairn_server }}
|
|
|
|
- name: Build and fuzz with AFL++
|
|
shell: bash
|
|
env:
|
|
CAIRN_SERVER: ${{ inputs.cairn_server }}
|
|
REPO: ${{ inputs.repo }}
|
|
OWNER: ${{ inputs.owner }}
|
|
COMMIT: ${{ inputs.commit }}
|
|
TARGETS: ${{ inputs.targets }}
|
|
FUZZ_BINARY: ${{ inputs.fuzz_binary }}
|
|
SEED_DIR: ${{ inputs.seed_dir }}
|
|
DURATION: ${{ inputs.duration }}
|
|
EXTRA_AFL_ARGS: ${{ inputs.afl_args }}
|
|
TARGET_PLATFORM: ${{ inputs.target }}
|
|
run: |
|
|
set -eu
|
|
|
|
echo "Cairn CLI version: $(cairn version)"
|
|
NCPU=$(nproc 2>/dev/null || echo 1)
|
|
echo "Available cores: ${NCPU}"
|
|
|
|
RESULTS_DIR=$(mktemp -d)
|
|
trap 'rm -rf "${RESULTS_DIR}"' EXIT
|
|
|
|
# ── Collect target names ──
|
|
TARGET_NAMES=()
|
|
while IFS= read -r line; do
|
|
line=$(echo "${line}" | sed 's/#.*//' | xargs)
|
|
[ -z "${line}" ] && continue
|
|
TARGET_NAMES+=("${line}")
|
|
done <<EOF
|
|
${TARGETS}
|
|
EOF
|
|
|
|
TARGET_COUNT=${#TARGET_NAMES[@]}
|
|
echo "Targets: ${TARGET_COUNT}, parallel slots: ${NCPU}"
|
|
|
|
# ── Phase 1: Build all targets sequentially ──
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Building ${TARGET_COUNT} target(s)"
|
|
echo "=========================================="
|
|
|
|
# Helper to find the single executable in a bin/ directory.
|
|
find_bin() {
|
|
local BIN_DIR="$1"
|
|
local LABEL="$2"
|
|
if [ -n "${FUZZ_BINARY}" ]; then
|
|
echo "${BIN_DIR}/${FUZZ_BINARY}"
|
|
return
|
|
fi
|
|
local BIN
|
|
BIN=$(find "${BIN_DIR}" -maxdepth 1 -type f -executable)
|
|
local COUNT
|
|
COUNT=$(echo "${BIN}" | wc -l)
|
|
if [ "${COUNT}" -eq 0 ] || [ -z "${BIN}" ]; then
|
|
echo "ERROR: No executable found in ${BIN_DIR} (${LABEL})" >&2
|
|
return 1
|
|
elif [ "${COUNT}" -gt 1 ]; then
|
|
echo "ERROR: Multiple executables in ${BIN_DIR} (${LABEL}), specify fuzz_binary input" >&2
|
|
return 1
|
|
fi
|
|
echo "${BIN}"
|
|
}
|
|
|
|
declare -A TARGET_BINS
|
|
declare -A REPLAY_BINS
|
|
for i in "${!TARGET_NAMES[@]}"; do
|
|
FUZZ_TARGET="${TARGET_NAMES[$i]}"
|
|
NUM=$((i + 1))
|
|
WORK="work/${FUZZ_TARGET}"
|
|
mkdir -p "${WORK}"
|
|
|
|
echo "[${NUM}/${TARGET_COUNT}] Building ${FUZZ_TARGET}..."
|
|
|
|
# Each target gets its own output dir to avoid binary name collisions.
|
|
# 1) AFL-instrumented binary for fuzzing.
|
|
AFL_OUT="${WORK}/afl-out"
|
|
rm -rf "${AFL_OUT}"
|
|
zig build fuzz -Dfuzz-target="${FUZZ_TARGET}" --prefix "${AFL_OUT}"
|
|
|
|
FUZZ_BIN=$(find_bin "${AFL_OUT}/bin" "afl")
|
|
if [ ! -x "${FUZZ_BIN}" ]; then
|
|
echo "ERROR: Fuzz binary not found or not executable: ${FUZZ_BIN}"
|
|
exit 1
|
|
fi
|
|
TARGET_BINS["${FUZZ_TARGET}"]="${FUZZ_BIN}"
|
|
echo " AFL binary: ${FUZZ_BIN}"
|
|
|
|
# 2) Plain binary for crash replay (no AFL instrumentation).
|
|
REPLAY_OUT="${WORK}/replay-out"
|
|
rm -rf "${REPLAY_OUT}"
|
|
zig build fuzz-replay -Dfuzz-target="${FUZZ_TARGET}" --prefix "${REPLAY_OUT}"
|
|
|
|
REPLAY_BIN=$(find_bin "${REPLAY_OUT}/bin" "replay")
|
|
if [ ! -x "${REPLAY_BIN}" ]; then
|
|
echo "ERROR: Replay binary not found or not executable: ${REPLAY_BIN}"
|
|
exit 1
|
|
fi
|
|
REPLAY_BINS["${FUZZ_TARGET}"]="${REPLAY_BIN}"
|
|
echo " Replay binary: ${REPLAY_BIN}"
|
|
done
|
|
|
|
# ── Phase 2: Prepare seeds and Cairn targets (sequential, network I/O) ──
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Preparing corpus for ${TARGET_COUNT} target(s)"
|
|
echo "=========================================="
|
|
|
|
declare -A CAIRN_TARGET_IDS
|
|
declare -A RUN_IDS
|
|
|
|
for i in "${!TARGET_NAMES[@]}"; do
|
|
FUZZ_TARGET="${TARGET_NAMES[$i]}"
|
|
NUM=$((i + 1))
|
|
FUZZ_BIN="${TARGET_BINS[${FUZZ_TARGET}]}"
|
|
SEEDS="work/${FUZZ_TARGET}/seeds"
|
|
mkdir -p "${SEEDS}"
|
|
|
|
echo "[${NUM}/${TARGET_COUNT}] Preparing ${FUZZ_TARGET}..."
|
|
|
|
# Ensure Cairn target.
|
|
CAIRN_TARGET_ID=$(cairn target ensure \
|
|
-server "${CAIRN_SERVER}" \
|
|
-repo "${REPO}" \
|
|
-owner "${OWNER}" \
|
|
-name "${FUZZ_TARGET}" \
|
|
-type fuzz)
|
|
CAIRN_TARGET_IDS["${FUZZ_TARGET}"]="${CAIRN_TARGET_ID}"
|
|
|
|
# Start a run.
|
|
RUN_ID=$(cairn run start \
|
|
-server "${CAIRN_SERVER}" \
|
|
-target-id "${CAIRN_TARGET_ID}" \
|
|
-commit "${COMMIT}")
|
|
RUN_IDS["${FUZZ_TARGET}"]="${RUN_ID}"
|
|
echo " Target ID: ${CAIRN_TARGET_ID}, Run ID: ${RUN_ID}"
|
|
|
|
# Copy per-target seeds from repo.
|
|
if [ -n "${SEED_DIR}" ] && [ -d "${SEED_DIR}/${FUZZ_TARGET}" ]; then
|
|
REPO_SEED_COUNT=$(find "${SEED_DIR}/${FUZZ_TARGET}" -maxdepth 1 -type f | wc -l)
|
|
if [ "${REPO_SEED_COUNT}" -gt 0 ]; then
|
|
cp "${SEED_DIR}/${FUZZ_TARGET}"/* "${SEEDS}/" 2>/dev/null || true
|
|
echo " Copied ${REPO_SEED_COUNT} repo seeds"
|
|
fi
|
|
fi
|
|
|
|
# Download existing corpus.
|
|
DL_START=$(date +%s)
|
|
cairn corpus download \
|
|
-server "${CAIRN_SERVER}" \
|
|
-target-id "${CAIRN_TARGET_ID}" \
|
|
-dir "${SEEDS}" || true
|
|
DL_ELAPSED=$(( $(date +%s) - DL_START ))
|
|
DL_COUNT=$(find "${SEEDS}" -maxdepth 1 -type f | wc -l)
|
|
echo " Corpus: ${DL_COUNT} entries (${DL_ELAPSED}s)"
|
|
|
|
if [ "$(find "${SEEDS}" -maxdepth 1 -type f | wc -l)" -eq 0 ]; then
|
|
printf 'A' > "${SEEDS}/seed-0"
|
|
fi
|
|
|
|
# Minimize corpus.
|
|
SEED_COUNT=$(find "${SEEDS}" -maxdepth 1 -type f | wc -l)
|
|
if [ "${SEED_COUNT}" -gt 1 ]; then
|
|
echo " Minimizing corpus (${SEED_COUNT} inputs)..."
|
|
CMIN_START=$(date +%s)
|
|
MINIMIZED="work/${FUZZ_TARGET}/minimized"
|
|
rm -rf "${MINIMIZED}"
|
|
mkdir -p "${MINIMIZED}"
|
|
if afl-cmin -i "${SEEDS}" -o "${MINIMIZED}" -- "${FUZZ_BIN}" >/dev/null 2>&1; then
|
|
CMIN_ELAPSED=$(( $(date +%s) - CMIN_START ))
|
|
MINIMIZED_COUNT=$(find "${MINIMIZED}" -maxdepth 1 -type f | wc -l)
|
|
echo " Minimized: ${SEED_COUNT} -> ${MINIMIZED_COUNT} inputs (${CMIN_ELAPSED}s)"
|
|
rm -rf "${SEEDS}"
|
|
mv "${MINIMIZED}" "${SEEDS}"
|
|
else
|
|
echo " afl-cmin failed, using unminimized corpus"
|
|
rm -rf "${MINIMIZED}"
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# ── Phase 3: Fuzz all targets in parallel ──
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Fuzzing ${TARGET_COUNT} target(s) in parallel (${NCPU} cores, ${DURATION}s each)"
|
|
echo "=========================================="
|
|
|
|
# Function that runs in a background subshell per target.
|
|
fuzz_target() {
|
|
local FUZZ_TARGET="$1"
|
|
local FUZZ_BIN="$2"
|
|
local SEEDS="$3"
|
|
local FINDINGS="$4"
|
|
local RESULT_FILE="$5"
|
|
local SEED_COUNT="$6"
|
|
|
|
mkdir -p "${FINDINGS}"
|
|
echo "[${FUZZ_TARGET}] Starting AFL++ (${SEED_COUNT} seeds, ${DURATION}s)"
|
|
|
|
local FUZZ_START=$(date +%s)
|
|
local AFL_EXIT=0
|
|
{
|
|
AFL_NO_UI=1 \
|
|
AFL_SKIP_CPUFREQ=1 \
|
|
AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES=1 \
|
|
AFL_NO_CRASH_README=1 \
|
|
afl-fuzz \
|
|
-V "${DURATION}" \
|
|
-i "${SEEDS}" \
|
|
-o "${FINDINGS}" \
|
|
${EXTRA_AFL_ARGS} \
|
|
-- "${FUZZ_BIN}"
|
|
} >/dev/null 2>&1 || AFL_EXIT=$?
|
|
local FUZZ_ELAPSED=$(( $(date +%s) - FUZZ_START ))
|
|
|
|
# Summary from fuzzer_stats.
|
|
local STATS_FILE="${FINDINGS}/default/fuzzer_stats"
|
|
if [ -f "${STATS_FILE}" ]; then
|
|
local EXECS=$(grep -oP 'execs_done\s*:\s*\K\d+' "${STATS_FILE}" || echo "?")
|
|
local PATHS=$(grep -oP 'corpus_count\s*:\s*\K\d+' "${STATS_FILE}" || echo "?")
|
|
local CRASHES=$(grep -oP 'saved_crashes\s*:\s*\K\d+' "${STATS_FILE}" || echo "?")
|
|
local SPEED=$(grep -oP 'execs_per_sec\s*:\s*\K[\d.]+' "${STATS_FILE}" || echo "?")
|
|
local COVERAGE=$(grep -oP 'bitmap_cvg\s*:\s*\K[\d.]+%' "${STATS_FILE}" || echo "?")
|
|
echo "[${FUZZ_TARGET}] Finished in ${FUZZ_ELAPSED}s: ${EXECS} execs (${SPEED}/s), ${PATHS} paths, ${CRASHES} crashes, ${COVERAGE} coverage (exit ${AFL_EXIT})"
|
|
else
|
|
echo "[${FUZZ_TARGET}] AFL++ exited with code ${AFL_EXIT} after ${FUZZ_ELAPSED}s"
|
|
fi
|
|
|
|
# Count crashes for the result file.
|
|
local CRASH_COUNT=0
|
|
local CRASH_DIR="${FINDINGS}/default/crashes"
|
|
if [ -d "${CRASH_DIR}" ]; then
|
|
CRASH_COUNT=$(find "${CRASH_DIR}" -maxdepth 1 -type f -name 'id:*' | wc -l)
|
|
fi
|
|
echo "${CRASH_COUNT}" > "${RESULT_FILE}"
|
|
}
|
|
|
|
FUZZ_PHASE_START=$(date +%s)
|
|
PIDS=()
|
|
for i in "${!TARGET_NAMES[@]}"; do
|
|
FUZZ_TARGET="${TARGET_NAMES[$i]}"
|
|
FUZZ_BIN="${TARGET_BINS[${FUZZ_TARGET}]}"
|
|
SEEDS="work/${FUZZ_TARGET}/seeds"
|
|
FINDINGS="work/${FUZZ_TARGET}/findings"
|
|
RESULT_FILE="${RESULTS_DIR}/${FUZZ_TARGET}"
|
|
SEED_COUNT=$(find "${SEEDS}" -maxdepth 1 -type f | wc -l)
|
|
|
|
fuzz_target "${FUZZ_TARGET}" "${FUZZ_BIN}" "${SEEDS}" "${FINDINGS}" "${RESULT_FILE}" "${SEED_COUNT}" &
|
|
PIDS+=($!)
|
|
|
|
# Limit parallelism to available cores.
|
|
if [ "${#PIDS[@]}" -ge "${NCPU}" ]; then
|
|
wait "${PIDS[0]}"
|
|
PIDS=("${PIDS[@]:1}")
|
|
fi
|
|
done
|
|
|
|
# Wait for remaining jobs.
|
|
for pid in "${PIDS[@]}"; do
|
|
wait "${pid}"
|
|
done
|
|
FUZZ_PHASE_ELAPSED=$(( $(date +%s) - FUZZ_PHASE_START ))
|
|
echo "Fuzzing phase completed in ${FUZZ_PHASE_ELAPSED}s"
|
|
|
|
# ── Phase 4: Upload results sequentially ──
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Uploading results"
|
|
echo "=========================================="
|
|
|
|
TOTAL_CRASHES=0
|
|
for i in "${!TARGET_NAMES[@]}"; do
|
|
FUZZ_TARGET="${TARGET_NAMES[$i]}"
|
|
NUM=$((i + 1))
|
|
FUZZ_BIN="${TARGET_BINS[${FUZZ_TARGET}]}"
|
|
REPLAY_BIN="${REPLAY_BINS[${FUZZ_TARGET}]}"
|
|
RUN_ID="${RUN_IDS[${FUZZ_TARGET}]}"
|
|
CAIRN_TARGET_ID="${CAIRN_TARGET_IDS[${FUZZ_TARGET}]}"
|
|
FINDINGS="work/${FUZZ_TARGET}/findings"
|
|
|
|
echo "[${NUM}/${TARGET_COUNT}] ${FUZZ_TARGET}..."
|
|
|
|
# Upload crashes.
|
|
CRASH_DIR="${FINDINGS}/default/crashes"
|
|
if [ -d "${CRASH_DIR}" ]; then
|
|
for crash_file in "${CRASH_DIR}"/id:*; do
|
|
[ -f "${crash_file}" ] || continue
|
|
|
|
CRASH_NAME=$(basename "${crash_file}")
|
|
SIG=""
|
|
case "${CRASH_NAME}" in
|
|
*,sig:*)
|
|
SIG=$(echo "${CRASH_NAME}" | sed 's/.*,sig:\([0-9]*\).*/\1/')
|
|
;;
|
|
esac
|
|
|
|
# Replay using the non-AFL binary to get a proper stack trace.
|
|
STACK_TRACE=""
|
|
CRASH_MSG="AFL++ crash (${FUZZ_TARGET}): ${CRASH_NAME}"
|
|
REPLAY_OUTPUT=$(timeout 10 "${REPLAY_BIN}" < "${crash_file}" 2>&1 || true)
|
|
if [ -n "${REPLAY_OUTPUT}" ]; then
|
|
STACK_TRACE="${REPLAY_OUTPUT}"
|
|
FIRST_LINE=$(echo "${REPLAY_OUTPUT}" | grep -m1 -iE 'panic|error|fault|abort|overflow|undefined|sanitizer|SUMMARY' || true)
|
|
if [ -n "${FIRST_LINE}" ]; then
|
|
CRASH_MSG="${FIRST_LINE}"
|
|
fi
|
|
echo " Replay (${CRASH_NAME}):"
|
|
echo "${REPLAY_OUTPUT}" | head -10 | sed 's/^/ /'
|
|
else
|
|
echo " Replay produced no output for ${CRASH_NAME}"
|
|
fi
|
|
|
|
echo " Uploading crash: ${CRASH_NAME}"
|
|
set -- -server "${CAIRN_SERVER}" -repo "${REPO}" -owner "${OWNER}" \
|
|
-commit "${COMMIT}" -run-id "${RUN_ID}" -type fuzz -file "${crash_file}" \
|
|
-kind crash \
|
|
-crash-message "${CRASH_MSG}"
|
|
|
|
if [ -n "${STACK_TRACE}" ]; then
|
|
set -- "$@" -stack-trace "${STACK_TRACE}"
|
|
fi
|
|
if [ -n "${TARGET_PLATFORM}" ]; then
|
|
set -- "$@" -target "${TARGET_PLATFORM}"
|
|
fi
|
|
if [ -n "${SIG}" ]; then
|
|
set -- "$@" -signal "${SIG}"
|
|
fi
|
|
|
|
cairn upload "$@"
|
|
TOTAL_CRASHES=$((TOTAL_CRASHES + 1))
|
|
done
|
|
fi
|
|
|
|
# Minimize and upload corpus.
|
|
QUEUE_DIR="${FINDINGS}/default/queue"
|
|
if [ -d "${QUEUE_DIR}" ]; then
|
|
QUEUE_COUNT=$(find "${QUEUE_DIR}" -maxdepth 1 -type f -name 'id:*' | wc -l)
|
|
if [ "${QUEUE_COUNT}" -gt 0 ]; then
|
|
UPLOAD_DIR="work/${FUZZ_TARGET}/corpus-upload"
|
|
rm -rf "${UPLOAD_DIR}"
|
|
mkdir -p "${UPLOAD_DIR}"
|
|
CMIN_START=$(date +%s)
|
|
if afl-cmin -i "${QUEUE_DIR}" -o "${UPLOAD_DIR}" -- "${FUZZ_BIN}" >/dev/null 2>&1; then
|
|
CMIN_ELAPSED=$(( $(date +%s) - CMIN_START ))
|
|
UPLOAD_COUNT=$(find "${UPLOAD_DIR}" -maxdepth 1 -type f | wc -l)
|
|
echo " Corpus minimized: ${QUEUE_COUNT} -> ${UPLOAD_COUNT} entries (${CMIN_ELAPSED}s)"
|
|
else
|
|
echo " afl-cmin failed, uploading full queue"
|
|
rm -rf "${UPLOAD_DIR}"
|
|
UPLOAD_DIR="${QUEUE_DIR}"
|
|
UPLOAD_COUNT="${QUEUE_COUNT}"
|
|
fi
|
|
echo " Uploading corpus (${UPLOAD_COUNT} entries)..."
|
|
cairn corpus upload \
|
|
-server "${CAIRN_SERVER}" \
|
|
-target-id "${CAIRN_TARGET_ID}" \
|
|
-run-id "${RUN_ID}" \
|
|
-dir "${UPLOAD_DIR}"
|
|
fi
|
|
fi
|
|
|
|
# Finish run.
|
|
cairn run finish -server "${CAIRN_SERVER}" -id "${RUN_ID}" || true
|
|
done
|
|
|
|
# ── Final report ──
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Fuzzed ${TARGET_COUNT} target(s), ${TOTAL_CRASHES} total crash(es)"
|
|
echo "=========================================="
|
|
|
|
if [ "${TOTAL_CRASHES}" -gt 0 ]; then
|
|
echo "WARNING: ${TOTAL_CRASHES} crash(es) found and uploaded to Cairn"
|
|
else
|
|
echo "OK: No crashes found"
|
|
fi
|