aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.claude/hooks/validate-el.sh87
-rw-r--r--.claude/rules/elisp-testing.md81
-rw-r--r--.claude/rules/elisp.md75
-rw-r--r--.claude/rules/testing.md153
-rw-r--r--.claude/rules/verification.md42
-rw-r--r--.claude/settings.json70
-rw-r--r--.gitignore6
-rw-r--r--CLAUDE.md66
-rwxr-xr-xgithooks/pre-commit50
9 files changed, 630 insertions, 0 deletions
diff --git a/.claude/hooks/validate-el.sh b/.claude/hooks/validate-el.sh
new file mode 100755
index 0000000..6f93d48
--- /dev/null
+++ b/.claude/hooks/validate-el.sh
@@ -0,0 +1,87 @@
+#!/usr/bin/env bash
+# Validate and test .el files after Edit/Write/MultiEdit.
+# PostToolUse hook: receives tool-call JSON on stdin.
+# Silent on success; on failure, prints emacs output and exits 2
+# so Claude sees the error and can correct it.
+#
+# Phase 1: check-parens + byte-compile
+# Phase 2: for modules/*.el, run matching tests/test-<stem>*.el
+
+set -u
+
+# Portable project root: prefer Claude Code's env var, fall back to deriving
+# from this script's location ($project/.claude/hooks/validate-el.sh).
+PROJECT_ROOT="${CLAUDE_PROJECT_DIR:-$(cd "$(dirname "$0")/../.." && pwd)}"
+
+f="$(jq -r '.tool_input.file_path // .tool_response.filePath // empty')"
+[ -z "$f" ] && exit 0
+[ "${f##*.}" = "el" ] || exit 0
+
+MAX_AUTO_TEST_FILES=20 # skip if more matches than this (large test suites)
+
+# --- Phase 1: syntax + byte-compile ---
+case "$f" in
+ */init.el|*/early-init.el)
+ # Byte-compile here would load the full package graph. Parens only.
+ if ! output="$(emacs --batch --no-site-file --no-site-lisp "$f" \
+ --eval '(check-parens)' 2>&1)"; then
+ printf 'PAREN CHECK FAILED: %s\n%s\n' "$f" "$output" >&2
+ exit 2
+ fi
+ ;;
+ *.el)
+ if ! output="$(emacs --batch --no-site-file --no-site-lisp \
+ -L "$PROJECT_ROOT" \
+ -L "$PROJECT_ROOT/modules" \
+ -L "$PROJECT_ROOT/tests" \
+ --eval '(package-initialize)' \
+ "$f" \
+ --eval '(check-parens)' \
+ --eval "(or (byte-compile-file \"$f\") (kill-emacs 1))" 2>&1)"; then
+ printf 'VALIDATION FAILED: %s\n%s\n' "$f" "$output" >&2
+ exit 2
+ fi
+ ;;
+esac
+
+# --- Phase 2: test runner ---
+# Determine which tests (if any) apply to this edit. Works for projects with
+# source at root, in modules/, or elsewhere — stem-based test lookup is the
+# common pattern.
+tests=()
+case "$f" in
+ */init.el|*/early-init.el)
+ : # Phase 1 handled it; skip test runner
+ ;;
+ "$PROJECT_ROOT/tests/testutil-"*.el)
+ stem="$(basename "${f%.el}")"
+ stem="${stem#testutil-}"
+ mapfile -t tests < <(find "$PROJECT_ROOT/tests" -maxdepth 1 -name "test-${stem}*.el" 2>/dev/null | sort)
+ ;;
+ "$PROJECT_ROOT/tests/test-"*.el)
+ tests=("$f")
+ ;;
+ *.el)
+ # Any other .el under the project — find matching tests by stem
+ stem="$(basename "${f%.el}")"
+ mapfile -t tests < <(find "$PROJECT_ROOT/tests" -maxdepth 1 -name "test-${stem}*.el" 2>/dev/null | sort)
+ ;;
+esac
+
+count="${#tests[@]}"
+if [ "$count" -ge 1 ] && [ "$count" -le "$MAX_AUTO_TEST_FILES" ]; then
+ load_args=()
+ for t in "${tests[@]}"; do load_args+=("-l" "$t"); done
+ if ! output="$(emacs --batch --no-site-file --no-site-lisp \
+ -L "$PROJECT_ROOT" \
+ -L "$PROJECT_ROOT/modules" \
+ -L "$PROJECT_ROOT/tests" \
+ --eval '(package-initialize)' \
+ -l ert "${load_args[@]}" \
+ --eval "(ert-run-tests-batch-and-exit '(not (tag :slow)))" 2>&1)"; then
+ printf 'TESTS FAILED for %s (%d test file(s)):\n%s\n' "$f" "$count" "$output" >&2
+ exit 2
+ fi
+fi
+
+exit 0
diff --git a/.claude/rules/elisp-testing.md b/.claude/rules/elisp-testing.md
new file mode 100644
index 0000000..6cb59b1
--- /dev/null
+++ b/.claude/rules/elisp-testing.md
@@ -0,0 +1,81 @@
+# Elisp Testing Rules
+
+Applies to: `**/tests/*.el`
+
+Implements the core principles from `testing.md`. All rules there apply here —
+this file covers Elisp-specific patterns.
+
+## Framework: ERT
+
+Use `ert-deftest` for all tests. One test = one scenario.
+
+## File Layout
+
+- `tests/test-<module>.el` — tests for `modules/<module>.el`
+- `tests/test-<module>--<helper>.el` — tests for a specific private helper (matches `<module>--<helper>` function naming)
+- `tests/testutil-<module>.el` — fixtures and mocks for one module
+- `tests/testutil-general.el`, `testutil-filesystem.el`, `testutil-org.el` — cross-module helpers
+
+Tests must `(require 'module-name)` before the testutil file that stubs its internals, unless documented otherwise. Order matters — a testutil that defines a stub can be shadowed by a later `require` of the real module.
+
+## Test Naming
+
+```elisp
+(ert-deftest test-<module>-<function>-<scenario> ()
+ "Normal/Boundary/Error: brief description."
+ ...)
+```
+
+Put the category (Normal, Boundary, Error) in the docstring so the category is grep-able.
+
+## Required Coverage
+
+Every non-trivial function needs at least:
+- One **Normal** case (happy path)
+- One **Boundary** case (empty, nil, min, max, unicode, long string)
+- One **Error** case (invalid input, missing resource, failure mode)
+
+Missing a category is a test gap. If three cases look near-identical, parametrize with a loop or `dolist` rather than copy-pasting.
+
+## TDD Workflow
+
+Write the failing test first. A failing test proves you understand the change. Assume the bug is in production code until the test proves otherwise — never fix the test before proving the test is wrong.
+
+For untested code, write a **characterization test** that captures current behavior before you change anything. It becomes the safety net for the refactor.
+
+## Mocking
+
+Mock at boundaries:
+- Shell: `cl-letf` on `shell-command`, `shell-command-to-string`, `call-process`
+- File I/O when tests shouldn't touch disk
+- Network: URL retrievers, HTTP clients
+- Time: `cl-letf` on `current-time`, `format-time-string`
+
+Never mock:
+- The code under test
+- Core Emacs primitives (buffer ops, string ops, lists)
+- Your own domain logic — restructure it to be testable instead
+
+## Idioms
+
+- `cl-letf` for scoped overrides (self-cleaning)
+- `with-temp-buffer` for buffer manipulation tests
+- `make-temp-file` with `.el` suffix for on-disk fixtures
+- Tests must run in any order; no shared mutable state
+
+## Running Tests
+
+```bash
+make test # All
+make test-file FILE=tests/test-foo.el # One file
+make test-name TEST=pattern # Match by test name pattern
+```
+
+A PostToolUse hook runs matching tests automatically after edits to a module, when the match count is small enough to be fast.
+
+## Anti-Patterns
+
+- Hardcoded timestamps — generate relative to `current-time` or mock
+- Testing implementation details (private storage structure) instead of behavior
+- Mocking the thing you're testing
+- Skipping a failing test without an issue to track it
diff --git a/.claude/rules/elisp.md b/.claude/rules/elisp.md
new file mode 100644
index 0000000..e641058
--- /dev/null
+++ b/.claude/rules/elisp.md
@@ -0,0 +1,75 @@
+# Elisp / Emacs Rules
+
+Applies to: `**/*.el`
+
+## Style
+
+- 2-space indent, no tabs
+- Hyphen-case for identifiers: `cj/do-thing`, not `cj/doThing`
+- Naming prefixes:
+ - `cj/name` — user-facing functions and commands (bound to keys, called from init)
+ - `cj/--name` — private helpers (double-dash signals "internal")
+ - `<module>/name` — module-scoped where appropriate (e.g., `calendar-sync/parse-ics`)
+- File header: `;;; foo-config.el --- brief description -*- lexical-binding: t -*-`
+- `(provide 'foo-config)` at the bottom of every module
+- `lexical-binding: t` is mandatory — no file without it
+
+## Function Design
+
+- Keep functions under 15 lines where possible
+- One responsibility per function
+- Extract helpers instead of nesting deeply — 5+ levels of nesting is a refactor signal
+- Prefer named helpers over lambdas for anything nontrivial
+- No premature abstraction — three similar lines beats a clever macro
+
+Small functions are the single strongest defense against paren errors. Deeply nested code is where AI and humans both fail.
+
+## Requires and Loading
+
+- Every `(require 'foo)` must correspond to a loadable file on the load-path
+- Byte-compile warnings about free variables usually indicate a missing `require` or a typo in a symbol name — read them
+- Use `use-package` for external (MELPA/ELPA) packages
+- Use plain `(require 'foo-config)` for internal modules
+- For optional features, `(when (require 'foo nil t) ...)` degrades gracefully if absent
+
+## Lexical-Binding Traps
+
+- `(boundp 'x)` where `x` is a lexical variable always returns nil. Bind with `defvar` at top level if you need `boundp` to work, or use the value directly.
+- `setq` on an undeclared free variable is a warning — use `let` for locals or `defvar` for module-level state
+- Closures capture by reference. Avoid capturing mutating loop variables in nested defuns.
+
+## Regex Gotchas
+
+- `\s` is NOT whitespace in Emacs regex. Use `[ \t]` or `\\s-` (syntax class).
+- `^` in `string-match` matches after `\n` OR at position 0 — use `(= (match-beginning 0) start)` for positional checks when that matters.
+- `replace-regexp-in-string` interprets backslashes in the replacement. Pass `t t` (FIXEDCASE LITERAL) when the replacement contains literal backslashes.
+
+## Keybindings
+
+- `keymap-global-set` for global; `keymap-set KEYMAP ...` for mode-local
+- Group module-specific bindings inside the module's file
+- Autoload cookies (`;;;###autoload`) don't activate through plain `(require ...)` — use the form directly, not an autoloaded wrapper
+
+## Module Template
+
+```elisp
+;;; foo-config.el --- Foo feature configuration -*- lexical-binding: t -*-
+
+;;; Commentary:
+;; One-line description.
+
+;;; Code:
+
+;; ... code ...
+
+(provide 'foo-config)
+;;; foo-config.el ends here
+```
+
+Then `(require 'foo-config)` in `init.el` (or a config aggregator).
+
+## Editing Workflow
+
+- A PostToolUse hook runs `check-parens` and `byte-compile-file` on every `.el` save
+- If it blocks, read the error — don't retry blindly
+- Prefer Write over repeated Edits for nontrivial new code; incremental edits accumulate subtle paren mismatches
diff --git a/.claude/rules/testing.md b/.claude/rules/testing.md
new file mode 100644
index 0000000..42cc528
--- /dev/null
+++ b/.claude/rules/testing.md
@@ -0,0 +1,153 @@
+# Testing Standards
+
+Applies to: `**/*`
+
+Core TDD discipline and test quality rules. Language-specific patterns
+(frameworks, fixture idioms, mocking tools) live in per-language testing files
+under `languages/<lang>/claude/rules/`.
+
+## Test-Driven Development (Default)
+
+TDD is the default workflow for all code, including demos and prototypes. **Write tests first, before any implementation code.** Tests are how you prove you understand the problem — if you can't write a failing test, you don't yet understand what needs to change.
+
+1. **Red**: Write a failing test that defines the desired behavior
+2. **Green**: Write the minimal code to make the test pass
+3. **Refactor**: Clean up while keeping tests green
+
+Do not skip TDD for demo code. Demos build muscle memory — the habit carries into production.
+
+### Understand Before You Test
+
+Before writing tests, invest time in understanding the code:
+
+1. **Explore the codebase** — Read the module under test, its callers, and its dependencies. Understand the data flow end to end.
+2. **Identify the root cause** — If fixing a bug, trace the problem to its origin. Don't test (or fix) surface symptoms when the real issue is deeper in the call chain.
+3. **Reason through edge cases** — Consider boundary conditions, error states, concurrent access, and interactions with adjacent modules. Your tests should cover what could actually go wrong, not just the obvious happy path.
+
+### Adding Tests to Existing Untested Code
+
+When working in a codebase without tests:
+
+1. Write a **characterization test** that captures current behavior before making changes
+2. Use the characterization test as a safety net while refactoring
+3. Then follow normal TDD for the new change
+
+## Test Categories (Required for All Code)
+
+Every unit under test requires coverage across three categories:
+
+### 1. Normal Cases (Happy Path)
+- Standard inputs and expected use cases
+- Common workflows and default configurations
+- Typical data volumes
+
+### 2. Boundary Cases
+- Minimum/maximum values (0, 1, -1, MAX_INT)
+- Empty vs null vs undefined (language-appropriate)
+- Single-element collections
+- Unicode and internationalization (emoji, RTL text, combining characters)
+- Very long strings, deeply nested structures
+- Timezone boundaries (midnight, DST transitions)
+- Date edge cases (leap years, month boundaries)
+
+### 3. Error Cases
+- Invalid inputs and type mismatches
+- Network failures and timeouts
+- Missing required parameters
+- Permission denied scenarios
+- Resource exhaustion
+- Malformed data
+
+## Test Organization
+
+Typical layout:
+
+```
+tests/
+ unit/ # One test file per source file
+ integration/ # Multi-component workflows
+ e2e/ # Full system tests
+```
+
+Per-language files may adjust this (e.g. Elisp collates ERT tests into
+`tests/test-<module>*.el` without subdirectories).
+
+## Naming Convention
+
+- Unit: `test_<module>_<function>_<scenario>_<expected>`
+- Integration: `test_integration_<workflow>_<scenario>_<outcome>`
+
+Examples:
+- `test_cart_apply_discount_expired_coupon_raises_error`
+- `test_integration_order_sync_network_timeout_retries_three_times`
+
+Languages that prefer camelCase, kebab-case, or other conventions keep the
+structure but use their idiom. Consistency within a project matters more than
+the specific case choice.
+
+## Test Quality
+
+### Independence
+- No shared mutable state between tests
+- Each test runs successfully in isolation
+- Explicit setup and teardown
+
+### Determinism
+- Never hardcode dates or times — generate them relative to `now()`
+- No reliance on test execution order
+- No flaky network calls in unit tests
+
+### Performance
+- Unit tests: <100ms each
+- Integration tests: <1s each
+- E2E tests: <10s each
+- Mark slow tests with appropriate decorators/tags
+
+### Mocking Boundaries
+Mock external dependencies at the system boundary:
+- Network calls (HTTP, gRPC, WebSocket)
+- File I/O and cloud storage
+- Time and dates
+- Third-party service clients
+
+Never mock:
+- The code under test
+- Internal domain logic
+- Framework behavior (ORM queries, middleware, hooks, buffer primitives)
+
+## Coverage Targets
+
+- Business logic and domain services: **90%+**
+- API endpoints and views: **80%+**
+- UI components: **70%+**
+- Utilities and helpers: **90%+**
+- Overall project minimum: **80%+**
+
+New code must not decrease coverage. PRs that lower coverage require justification.
+
+## TDD Discipline
+
+TDD is non-negotiable. These are the rationalizations agents use to skip it — don't fall for them:
+
+| Excuse | Why It's Wrong |
+|--------|----------------|
+| "This is too simple to need a test" | Simple code breaks too. The test takes 30 seconds. Write it. |
+| "I'll add tests after the implementation" | You won't, and even if you do, they'll test what you wrote rather than what was needed. Test-after validates implementation, not behavior. |
+| "Let me just get it working first" | That's not TDD. If you can't write a failing test, you don't understand the requirement yet. |
+| "This is just a refactor" | Refactors without tests are guesses. Write a characterization test first, then refactor while it stays green. |
+| "I'm only changing one line" | One-line changes cause production outages. Write a test that covers the line you're changing. |
+| "The existing code has no tests" | Start with a characterization test. Don't make the problem worse. |
+| "This is demo/prototype code" | Demos build habits. Untested demo code becomes untested production code. |
+| "I need to spike first" | Spikes are fine — then throw away the spike, write the test, and implement properly. |
+
+If you catch yourself thinking any of these, stop and write the test.
+
+## Anti-Patterns (Do Not Do)
+
+- Hardcoded dates or timestamps (they rot)
+- Testing implementation details instead of behavior
+- Mocking the thing you're testing
+- Shared mutable state between tests
+- Non-deterministic tests (random without seed, network in unit tests)
+- Testing framework behavior instead of your code
+- Ignoring or skipping failing tests without a tracking issue
diff --git a/.claude/rules/verification.md b/.claude/rules/verification.md
new file mode 100644
index 0000000..8993736
--- /dev/null
+++ b/.claude/rules/verification.md
@@ -0,0 +1,42 @@
+# Verification Before Completion
+
+Applies to: `**/*`
+
+## The Rule
+
+Do not claim work is done without fresh verification evidence. Run the command, read the output, confirm it matches the claim, then — and only then — declare success.
+
+This applies to every completion claim:
+- "Tests pass" → Run the test suite. Read the output. Confirm all green.
+- "Linter is clean" → Run the linter. Read the output. Confirm no warnings.
+- "Build succeeds" → Run the build. Read the output. Confirm no errors.
+- "Bug is fixed" → Run the reproduction steps. Confirm the bug is gone.
+- "No regressions" → Run the full test suite, not just the tests you added.
+
+## What Fresh Means
+
+- Run the verification command **now**, in the current session
+- Do not rely on a previous run from before your changes
+- Do not assume your changes didn't break something unrelated
+- Do not extrapolate from partial output — read the whole result
+
+## Red Flags
+
+If you find yourself using these words, you haven't verified:
+
+- "should" ("tests should pass")
+- "probably" ("this probably works")
+- "I believe" ("I believe the build is clean")
+- "based on the changes" ("based on the changes, nothing should break")
+
+Replace beliefs with evidence. Run the command.
+
+## Before Committing
+
+Before any commit:
+1. Run the test suite — confirm all tests pass
+2. Run the linter — confirm no new warnings
+3. Run the type checker — confirm no new errors
+4. Review the diff — confirm only intended changes are staged
+
+Do not commit based on the assumption that nothing broke. Verify.
diff --git a/.claude/settings.json b/.claude/settings.json
new file mode 100644
index 0000000..cca0eaa
--- /dev/null
+++ b/.claude/settings.json
@@ -0,0 +1,70 @@
+{
+ "permissions": {
+ "allow": [
+ "Bash(make)",
+ "Bash(make help)",
+ "Bash(make targets)",
+ "Bash(make test)",
+ "Bash(make test *)",
+ "Bash(make test-all)",
+ "Bash(make test-unit)",
+ "Bash(make test-integration)",
+ "Bash(make test-file *)",
+ "Bash(make test-name *)",
+ "Bash(make validate-parens)",
+ "Bash(make validate-modules)",
+ "Bash(make compile)",
+ "Bash(make lint)",
+ "Bash(make profile)",
+ "Bash(emacs --batch *)",
+ "Bash(emacs -Q --batch *)",
+ "Bash(git status)",
+ "Bash(git status *)",
+ "Bash(git diff)",
+ "Bash(git diff *)",
+ "Bash(git log)",
+ "Bash(git log *)",
+ "Bash(git show)",
+ "Bash(git show *)",
+ "Bash(git blame *)",
+ "Bash(git branch)",
+ "Bash(git branch -v)",
+ "Bash(git branch -a)",
+ "Bash(git branch --list *)",
+ "Bash(git remote)",
+ "Bash(git remote -v)",
+ "Bash(git remote show *)",
+ "Bash(git ls-files *)",
+ "Bash(git rev-parse *)",
+ "Bash(git cat-file *)",
+ "Bash(git stash list)",
+ "Bash(git stash show *)",
+ "Bash(jq *)",
+ "Bash(date)",
+ "Bash(date *)",
+ "Bash(which *)",
+ "Bash(file *)",
+ "Bash(ls)",
+ "Bash(ls *)",
+ "Bash(wc *)",
+ "Bash(du *)",
+ "Bash(readlink *)",
+ "Bash(realpath *)",
+ "Bash(basename *)",
+ "Bash(dirname *)"
+ ]
+ },
+ "hooks": {
+ "PostToolUse": [
+ {
+ "matcher": "Edit|Write|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "$CLAUDE_PROJECT_DIR/.claude/hooks/validate-el.sh"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/.gitignore b/.gitignore
index 95e126a..f48a36a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,9 @@
/todo.org
/docs/
/tests/*-output.log
+
+# --- elisp ruleset ---
+/.claude/settings.local.json
+/.claude/.cache/
+*.elc
+*.eln
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 0000000..9b95e4f
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,66 @@
+# CLAUDE.md
+
+## Project
+
+Elisp project. Customize this section with your own description, layout, and conventions.
+
+**Typical layout:**
+- `init.el`, `early-init.el` — entry points (Emacs config projects)
+- `modules/*.el` — feature modules
+- `tests/test-*.el` — ERT unit tests
+- `tests/testutil-*.el` — shared test fixtures and mocks
+
+## Build & Test Commands
+
+If the project has a Makefile, document targets here. Common pattern:
+
+```bash
+make test # All tests
+make test-file FILE=tests/test-foo.el # One file
+make test-name TEST=pattern # Match test names
+make validate-parens # Balanced parens in modules
+make validate-modules # Load all modules to verify they compile
+make compile # Byte-compile (writes .elc)
+make lint # checkdoc + package-lint + elisp-lint
+```
+
+Alternative build tools: `eldev`, `cask`, or direct `emacs --batch` invocations.
+
+## Language Rules
+
+See rule files in `.claude/rules/`:
+- `elisp.md` — code style and patterns
+- `elisp-testing.md` — ERT conventions
+- `verification.md` — verify-before-claim-done discipline
+
+## Git Workflow
+
+- Conventional commit prefixes: `feat:`, `fix:`, `refactor:`, `test:`, `docs:`, `chore:`
+- Pre-commit hook in `githooks/` scans for secrets and runs `check-parens` on staged `.el` files
+- Activate on fresh clone: `git config core.hooksPath githooks`
+
+## Problem-Solving Approach
+
+Investigate before fixing. When diagnosing a bug:
+1. Read the relevant module and trace what actually happens
+2. Identify the root cause, not a surface symptom
+3. Write a failing test that captures the correct behavior
+4. Fix, then re-run tests
+
+## Testing Discipline
+
+TDD is the default: write a failing test before any implementation. If you can't write the test, you don't yet understand the change. Details in `.claude/rules/elisp-testing.md`.
+
+## Editing Discipline
+
+A PostToolUse hook runs `check-parens` + `byte-compile-file` on every `.el` file after Edit/Write/MultiEdit. Byte-compile warnings (free variables, wrong argument counts) are signal — read them.
+
+Prefer Write over cumulative Edits for nontrivial new code. Small functions (under 15 lines) are near-impossible to get wrong; deeply nested code is where paren errors hide.
+
+## What Not to Do
+
+- Don't add features beyond what was asked
+- Don't refactor surrounding code when fixing a bug
+- Don't add comments to code you didn't change
+- Don't create abstractions for one-time operations
+- Don't commit `.env` files, credentials, or API keys — pre-commit hook catches common patterns but isn't a substitute for care
diff --git a/githooks/pre-commit b/githooks/pre-commit
new file mode 100755
index 0000000..909cde2
--- /dev/null
+++ b/githooks/pre-commit
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+# Pre-commit hook: secret scan + paren validation on staged .el files.
+# Use `git commit --no-verify` to bypass for confirmed false positives.
+
+set -u
+
+REPO_ROOT="$(git rev-parse --show-toplevel)"
+cd "$REPO_ROOT"
+
+# --- 1. Secret scan ---
+# Patterns for common credentials. Scans only added lines in the staged diff.
+SECRET_PATTERNS='(AKIA[0-9A-Z]{16}|sk-[a-zA-Z0-9_-]{20,}|-----BEGIN (RSA|DSA|EC|OPENSSH|PGP)( PRIVATE)?( KEY| KEY BLOCK)?-----|(api[_-]?key|api[_-]?secret|auth[_-]?token|secret[_-]?key|bearer[_-]?token|access[_-]?token|password)[[:space:]]*[:=][[:space:]]*["'"'"'][^"'"'"']{16,}["'"'"'])'
+
+secret_hits="$(git diff --cached -U0 --diff-filter=AM \
+ | grep '^+' | grep -v '^+++' \
+ | grep -iEn "$SECRET_PATTERNS" || true)"
+
+if [ -n "$secret_hits" ]; then
+ echo "pre-commit: potential secret in staged changes:" >&2
+ echo "$secret_hits" >&2
+ echo "" >&2
+ echo "Review the lines above. If this is a false positive (test fixture, documentation)," >&2
+ echo "bypass with: git commit --no-verify" >&2
+ exit 1
+fi
+
+# --- 2. Paren check on staged .el files ---
+staged_el="$(git diff --cached --name-only --diff-filter=AM | grep '\.el$' || true)"
+
+if [ -n "$staged_el" ]; then
+ paren_fail=""
+ while IFS= read -r f; do
+ [ -z "$f" ] && continue
+ [ -f "$f" ] || continue
+ if ! out="$(emacs --batch --no-site-file --no-site-lisp "$f" \
+ --eval '(check-parens)' 2>&1)"; then
+ paren_fail="${paren_fail}${f}:
+${out}
+
+"
+ fi
+ done <<< "$staged_el"
+
+ if [ -n "$paren_fail" ]; then
+ printf 'pre-commit: paren check failed:\n\n%s' "$paren_fail" >&2
+ exit 1
+ fi
+fi
+
+exit 0