summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGene Pasquet <dev@etenil.net>2026-02-08 15:50:56 +0000
committerGene Pasquet <dev@etenil.net>2026-02-08 15:50:56 +0000
commit0d83af279438397992d87ef268c4e6922d84ed77 (patch)
tree94136c95485b0095dd9f915035f47a9786790cf1
First version with annotations
-rw-r--r--algo1.lisp294
1 files changed, 294 insertions, 0 deletions
diff --git a/algo1.lisp b/algo1.lisp
new file mode 100644
index 0000000..4b678a9
--- /dev/null
+++ b/algo1.lisp
@@ -0,0 +1,294 @@
+;;;; ==========================================================================
+;;;; SYMBOLIC FEED-FORWARD NEURAL NETWORK (ASSOCIATIVE MEMORY)
+;;;; ==========================================================================
+;;;;
+;;;; OVERVIEW: This module implements a neural network designed for
+;;;; pattern recognition within symbolic grids. Instead of using
+;;;; traditional "Dense" layers of floating-point weights, it uses
+;;;; "Memory Blocks".
+;;;;
+;;;; HOW IT WORKS:
+;;;; 1. WINDOWING: The system slides a "lens" (the memory block size) over
+;;;; the input data (the input grid).
+;;;;
+;;;; 2. CORRELATION: It performs a symbolic comparison between the memory
+;;;; and the input. It checks for "Co-occurrence":
+;;;; - If the feature is present in BOTH, it's a positive match.
+;;;; - If the feature is absent in BOTH, it's also a positive match.
+;;;; - Mismatches generate negative signals.
+;;;;
+;;;; 3. NON-LINEAR ACTIVATION: The resulting sums are passed through a
+;;;; "Rectified Polynomial." This is the "secret sauce" of Deep Learning.
+;;;; By squaring or cubing the positive signals (the pluses), we amplify
+;;;; strong patterns and suppress weak noise.
+;;;;
+;;;; 4. INFERENCE: The system aggregates the "votes" of all memories. If the
+;;;; net signal is positive, the network confirms the presence of the
+;;;; requested item (T); otherwise, it rejects it (NIL).
+;;;;
+;;;; PERSPECTIVE:
+;;;; Think of this as a "Fuzzy Grep" for 2D grids. It doesn't look for
+;;;; exact matches, but rather "statistical resonance" between a known
+;;;; template and a raw input patch.
+;;;; ==========================================================================
+
+
+
+;;; --- 1. THE ACTIVATION FUNCTION ---
+;;;
+;;; This defines how a "neuron" scales its output. By squaring the
+;;; result (degree 2), we make strong matches significantly more
+;;; influential than weak ones. The DEGREE is the knob that can be
+;;; cranked to 11 to control how the NN behaves.
+(defun make-rectified-polynomial (degree)
+ (lambda (x)
+ ;; "Rectified": Only positive values pass.
+ (cond ((plusp x) (expt x degree))
+ ;; Negative or zero signals are killed.
+ ((not (plusp x)) '0))))
+
+;;; --- 2. THE MAGNITUDE CALCULATOR ---
+;;;
+;;; This compares a row of input to a row of memory. It's essentially
+;;; calculating "Overlap" and "Divergence."
+(defun sum-one-row
+ (item row-in mem-in idx
+ &key (predicate 'member) predicate-keys &allow-other-keys)
+ (loop :for me :in mem-in ; Walk the memory column-to-column and
+ :for ro :in row-in ; row-to-row, _simultaneously_!
+ :for count :from 0
+ ;; Transform presence of 'item' into +1 or -1
+ :for m := (if (apply predicate item me predicate-keys) +1 -1)
+ :for c := (if (apply predicate item ro predicate-keys) +1 -1)
+
+ ;; CASE A: We are at the 'Target'
+ ;; If it matches, we sum the memory signal (+m or -m) directly.
+ :when (and idx (= count idx))
+ :sum (* +1 m) :into pluses
+ :when (and idx (= count idx))
+ :sum (* -1 m) :into minuses
+
+ ;; CASE B: We are at any other column (The context)
+ ;;
+ ;; Multiply the input (c) by the memory (m). If both match (+1
+ ;; * +1) or both lack the item (-1 * -1), result is +1. This
+ ;; is the "correlation" or "weighting" step.
+ :unless (and idx (= count idx))
+ :sum (* c m) :into pluses
+ :unless (and idx (= count idx))
+ :sum (* c m) :into minuses
+ :finally
+ ;; Returns the total "evidence" for and against this row.
+ (return (list pluses minuses))))
+
+(when nil
+ ;; Test to understand how sum-one-row works. A plus is added when
+ ;; the symbol is in the memory row, if the symbol occurs on the
+ ;; index, 2 pluses are added. Eval with SLIME (C-x C-e at the
+ ;; closing brace.)
+ (sum-one-row 'foo
+ '((foo) () (bar))
+ '((foo) (foo) (bar))
+ 1) ; (3 1) - strong sign that foo belongs there
+ (sum-one-row 'foo
+ '((bar) () (bar))
+ '((bar) (bar) (foo))
+ 1) ; (-1 1) - undecided?
+ (sum-one-row 'foo
+ '((bar) () (bar))
+ '((bar) (bar) (bar))
+ 1) ; (1 3) - allergy!
+ (sum-one-row 'foo
+ '((foo) () (bar))
+ '((foo) (foo) (bar))
+ nil) ; (3 1) - strong sign that foo belongs there
+ )
+
+;;; --- 3. THE SPATIAL AGGREGATOR ---
+;;;
+;;; This loops through the rows of a single memory "block." Think of a
+;;; memory block.
+(defun sum-one-memory
+ (item input memory
+ &rest keys
+ &key target-row target-col
+ start-row start-col
+ rectified-polynomial &allow-other-keys)
+ (loop :for mem-row :in memory
+ :for count :from 0
+ ;; 'nthcdr' is used to offset into the input matrix,
+ ;; allowing the network to look at specific "patches" of data.
+ :for whole-input-row :in (nthcdr start-row input)
+ :for input-row := (nthcdr start-col whole-input-row)
+
+ ;; If the current row matches the row we are predicting,
+ ;; pass that column index down to sum-one-row.
+ :for idx := (when (= count target-row) target-col)
+
+ ;; Execute the row-level comparison.
+ :for (row-plus row-minus)
+ := (apply 'sum-one-row item input-row mem-row idx keys)
+
+ ;; Accumulate the results for the entire memory block.
+ :summing row-plus :into row-pluses
+ :summing row-minus :into row-minuses
+ :finally
+ ;; Apply the "Polynomial" to amp up strong signals,
+ ;; then subtract the 'negative evidence' from the 'positive evidence'.
+ (return
+ (- (funcall rectified-polynomial row-pluses)
+ (funcall rectified-polynomial row-minuses)))))
+
+(when nil
+ ;;; Tests for sum-one-memory
+ (sum-one-memory 'foo
+ '(((foo) (foo))
+ ((bar) (bar))) ;; Input
+ '(((foo) (foo))
+ ((bar) (bar))) ;; Memory
+ :rectified-polynomial (make-rectified-polynomial 2)
+ :target-row 0 :target-col 0
+ :start-row 0 :start-col 0)
+
+ ;; The above is like calling (you can try!):
+ ;; This outputs (2 0)
+ (sum-one-row 'foo
+ '((foo) (foo))
+ '((foo) (foo))
+ 0)
+ ;; This outputs (2 2)
+ (sum-one-row 'foo
+ '((bar) (bar))
+ '((bar) (bar))
+ nil)
+ ;; final output: 12
+ (- (funcall (make-rectified-polynomial 2) 4)
+ (funcall (make-rectified-polynomial 2) 2))
+
+
+ (sum-one-memory 'foo
+ '(((foo) (foo))
+ ((baz) (baz)))
+ '(((foo) (foo))
+ ((foo) (foo)))
+ :rectified-polynomial (make-rectified-polynomial 2)
+ :target-row 0 :target-col 0
+ :start-row 0 :start-col 0)
+ )
+
+;;; --- 4. Decision making ---
+;;;
+;;; This is the final layer. It asks every memory block for its
+;;; opinion on the input and combines them into a binary T/NIL.
+(defun sum-memories (item input memories
+ &rest keys &key &allow-other-keys)
+ ;; Identify the 'old' value that currently exists at the target coordinates.
+ (loop :with old := (elt (elt input
+ (+ (getf keys :start-row)
+ (getf keys :target-row)))
+ (+ (getf keys :start-col)
+ (getf keys :target-col)))
+ :for memory :in memories
+ ;; Sum up the scores from every memory block in the "brain".
+ :sum (apply 'sum-one-memory item input memory keys)
+ :into best-memory-yes
+ ;; Final threshold: If the total signal is positive, return True (T).
+ :finally (return (if (minusp best-memory-yes)
+ (values NIL old)
+ (values t old)))))
+
+(when nil
+ ;;; Test cases for sum-memories, eval the defparameters first!
+
+ (defparameter *memories*
+ '((((foo) (foo)) ((bar) (bar)))
+ (((baz) (baz)) ((foo) (foo)))
+ (((qux) (qux)) ((qux) (qux)))))
+
+ (defparameter *poly-2* (make-rectified-polynomial 2))
+
+ ;; Return T
+ (sum-memories 'foo
+ '(((foo) (foo)) ((bar) (bar))) ;; Input
+ *memories* ;; The Library of templates
+ :rectified-polynomial *poly-2*
+ :start-row 0 :start-col 0
+ :target-row 0 :target-col 0)
+
+ ;; Returns NIL
+ (sum-memories 'foo
+ '(((qux) (qux)) ((qux) (qux)))
+ *memories*
+ :rectified-polynomial *poly-2*
+ :start-row 0 :start-col 0
+ :target-row 0 :target-col 0)
+ ;; returns NIL
+ (sum-memories 'foo
+ '(((baz) (baz)) ((foo) (foo)))
+ *memories*
+ :rectified-polynomial *poly-2*
+ :start-row 0 :start-col 0
+ :target-row 0 :target-col 0)
+ ;; returns T (targetting second row)
+ (sum-memories 'foo
+ '(((baz) (baz)) ((foo) (foo)))
+ *memories*
+ :rectified-polynomial *poly-2*
+ :start-row 0 :start-col 0
+ :target-row 1 :target-col 0)
+ )
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Game AI for a tic-tac-toe board! ;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+(defun ttt-candidates (board training-data)
+ (loop :for r :from 0 :to 2
+ :append (loop
+ :for c :from 0 :to 2
+ :when (null (elt (elt *current-board* r) c)) ;; If the spot is empty
+ :collect
+ (list r c (sum-memories
+ 'foo
+ board
+ training-data
+ :rectified-polynomial (make-rectified-polynomial 2)
+ :start-row 0 :start-col 0
+ :target-row r :target-col c)))))
+
+(defun play-ttt (board training-data)
+ (let* ((rated-positions (ttt-candidates board training-data))
+ (candidates (remove-if-not (lambda (x) (nth 2 x)) rated-positions)))
+ (loop :for candidate :in candidates
+ :collect (cons (car candidate) (cadr candidate)))))
+
+(when nil
+ (let ((training-data
+ '(;; 1. Horizontal Win (Top Row)
+ (((foo) (foo) (foo))
+ (( ) ( ) ( ))
+ (( ) ( ) ( )))
+
+ ;; 2. Vertical Win (Left Column)
+ (((foo) ( ) ( ))
+ ((foo) ( ) ( ))
+ ((foo) ( ) ( )))
+
+ ;; 3. Diagonal Win (Top-Left to Bottom-Right)
+ (((foo) ( ) ( ))
+ (( ) (foo) ( ))
+ (( ) ( ) (foo)))
+
+ ;; 4. Diagonal Win (Top-Right to Bottom-Left)
+ ((( ) ( ) (foo))
+ (( ) (foo) ( ))
+ ((foo) ( ) ( )))))
+ (board
+ '(((foo) (foo) ( )) ;; Potential win at (0, 2)
+ ((bar) ( ) ( ))
+ (( ) (bar) ( )))))
+ (play-ttt board training-data))
+ ;; Output: '((0 . 2))
+ )