From 77d1b104115d8e29a5217d82ba47fd9e542fbc8a Mon Sep 17 00:00:00 2001 From: Etenil Date: Sun, 15 Feb 2026 12:24:45 +0000 Subject: Set up ASDF and tests --- algo1.lisp | 305 ------------------------------------------------------- ffnn.asd | 24 +++++ package.lisp | 4 + src/algo1.lisp | 224 ++++++++++++++++++++++++++++++++++++++++ src/package.lisp | 2 + tests/algo1.lisp | 70 +++++++++++++ 6 files changed, 324 insertions(+), 305 deletions(-) delete mode 100644 algo1.lisp create mode 100644 ffnn.asd create mode 100644 package.lisp create mode 100644 src/algo1.lisp create mode 100644 src/package.lisp create mode 100644 tests/algo1.lisp diff --git a/algo1.lisp b/algo1.lisp deleted file mode 100644 index 5fd4882..0000000 --- a/algo1.lisp +++ /dev/null @@ -1,305 +0,0 @@ -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;;; SYMBOLIC FEED-FORWARD NEURAL NETWORK ;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;;; -;;;; (C) 2026 Screwlisp, CC-By-SA -;;;; -;;;; Originally published on https://screwlisp.small-web.org/fundamental/a-better-deep-learning-algorithm/ -;;;; -;;;; OVERVIEW: This implements a neural network designed for pattern -;;;; recognition within symbolic grids. Instead of using traditional -;;;; "Dense" layers of floating-point weights, it uses "Memory -;;;; Blocks". -;;;; -;;;; HOW IT WORKS: -;;;; 1. WINDOWING: The system slides a "lens" (the memory block size) over -;;;; the input data (the input grid). -;;;; -;;;; 2. CORRELATION: It performs a symbolic comparison between the memory -;;;; and the input. It checks for "Co-occurrence": -;;;; - If the feature is present in BOTH, it's a positive match. -;;;; - If the feature is absent in BOTH, it's also a positive match. -;;;; - Mismatches generate negative signals. -;;;; -;;;; 3. NON-LINEAR ACTIVATION: The resulting sums are passed through a -;;;; "Rectified Polynomial." This is the "secret sauce" of Deep Learning. -;;;; By squaring or cubing the positive signals (the pluses), we amplify -;;;; strong patterns and suppress weak noise. -;;;; -;;;; 4. INFERENCE: The system aggregates the "votes" of all memories. If the -;;;; net signal is positive, the network confirms the presence of the -;;;; requested item (T); otherwise, it rejects it (NIL). -;;;; -;;;; PERSPECTIVE: -;;;; Think of this as a "Fuzzy Grep" for 2D grids. It doesn't look for -;;;; exact matches, but rather "statistical resonance" between a known -;;;; template and a raw input patch. -;;;; ========================================================================== - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; 1. THE ACTIVATION FUNCTION ;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; -;;; This defines how a "neuron" scales its output. By squaring the -;;; result (degree 2), we make strong matches significantly more -;;; influential than weak ones. The DEGREE is the knob that can be -;;; cranked to 11 to control how the NN behaves. -(defun make-rectified-polynomial (degree) - (lambda (x) - ;; "Rectified": Only positive values pass. - (cond ((plusp x) (expt x degree)) - ;; Negative or zero signals are killed. - ((not (plusp x)) '0)))) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; 2. THE MAGNITUDE CALCULATOR ;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; -;;; This compares a row of input to a row of memory. It's essentially -;;; calculating "Overlap" and "Divergence." -(defun sum-one-row - (item row-in mem-in idx - &key (predicate 'member) predicate-keys &allow-other-keys) - (loop :for me :in mem-in ; Walk the memory column-to-column and - :for ro :in row-in ; row-to-row, _simultaneously_! - :for count :from 0 - ;; Transform presence of 'item' into +1 or -1 - :for m := (if (apply predicate item me predicate-keys) +1 -1) - :for c := (if (apply predicate item ro predicate-keys) +1 -1) - - ;; CASE A: We are at the 'Target' - ;; If it matches, we sum the memory signal (+m or -m) directly. - :when (and idx (= count idx)) - :sum (* +1 m) :into pluses - :when (and idx (= count idx)) - :sum (* -1 m) :into minuses - - ;; CASE B: We are at any other column (The context) - ;; - ;; Multiply the input (c) by the memory (m). If both match (+1 - ;; * +1) or both lack the item (-1 * -1), result is +1. This - ;; is the "correlation" or "weighting" step. - :unless (and idx (= count idx)) - :sum (* c m) :into pluses - :unless (and idx (= count idx)) - :sum (* c m) :into minuses - :finally - ;; Returns the total "evidence" for and against this row. - (return (list pluses minuses)))) - -(when nil - ;; Test to understand how sum-one-row works. A plus is added when - ;; the symbol is in the memory row, if the symbol occurs on the - ;; index, 2 pluses are added. Eval with SLIME (C-x C-e at the - ;; closing brace.) - (sum-one-row 'foo - '((foo) () (bar)) - '((foo) (foo) (bar)) - 1) ; (3 1) - strong sign that foo belongs there - (sum-one-row 'foo - '((bar) () (bar)) - '((bar) (bar) (foo)) - 1) ; (-1 1) - undecided? - (sum-one-row 'foo - '((bar) () (bar)) - '((bar) (bar) (bar)) - 1) ; (1 3) - allergy! - (sum-one-row 'foo - '((foo) () (bar)) - '((foo) (foo) (bar)) - nil) ; (3 1) - strong sign that foo belongs there - ) - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; 3. THE SPATIAL AGGREGATOR ;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; -;;; This loops through the rows of a single memory "block." Think of a -;;; memory block. -(defun sum-one-memory - (item input memory - &rest keys - &key target-row target-col - start-row start-col - rectified-polynomial &allow-other-keys) - (loop :for mem-row :in memory - :for count :from 0 - ;; 'nthcdr' is used to offset into the input matrix, - ;; allowing the network to look at specific "patches" of data. - :for whole-input-row :in (nthcdr start-row input) - :for input-row := (nthcdr start-col whole-input-row) - - ;; If the current row matches the row we are predicting, - ;; pass that column index down to sum-one-row. - :for idx := (when (= count target-row) target-col) - - ;; Execute the row-level comparison. - :for (row-plus row-minus) - := (apply 'sum-one-row item input-row mem-row idx keys) - - ;; Accumulate the results for the entire memory block. - :summing row-plus :into row-pluses - :summing row-minus :into row-minuses - :finally - ;; Apply the "Polynomial" to amp up strong signals, - ;; then subtract the 'negative evidence' from the 'positive evidence'. - (return - (- (funcall rectified-polynomial row-pluses) - (funcall rectified-polynomial row-minuses))))) - -(when nil - ;;; Tests for sum-one-memory - (sum-one-memory 'foo - '(((foo) (foo)) - ((bar) (bar))) ;; Input - '(((foo) (foo)) - ((bar) (bar))) ;; Memory - :rectified-polynomial (make-rectified-polynomial 2) - :target-row 0 :target-col 0 - :start-row 0 :start-col 0) - - ;; The above is like calling (you can try!): - ;; This outputs (2 0) - (sum-one-row 'foo - '((foo) (foo)) - '((foo) (foo)) - 0) - ;; This outputs (2 2) - (sum-one-row 'foo - '((bar) (bar)) - '((bar) (bar)) - nil) - ;; final output: 12 - (- (funcall (make-rectified-polynomial 2) 4) - (funcall (make-rectified-polynomial 2) 2)) - - - (sum-one-memory 'foo - '(((foo) (foo)) - ((baz) (baz))) - '(((foo) (foo)) - ((foo) (foo))) - :rectified-polynomial (make-rectified-polynomial 2) - :target-row 0 :target-col 0 - :start-row 0 :start-col 0) - ) - -;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; 4. Decision making ;;; -;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; -;;; This is the final layer. It asks every memory block for its -;;; opinion on the input and combines them into a binary T/NIL. -(defun sum-memories (item input memories - &rest keys &key &allow-other-keys) - ;; Identify the 'old' value that currently exists at the target coordinates. - (loop :with old := (elt (elt input - (+ (getf keys :start-row) - (getf keys :target-row))) - (+ (getf keys :start-col) - (getf keys :target-col))) - :for memory :in memories - ;; Sum up the scores from every memory block in the "brain". - :sum (apply 'sum-one-memory item input memory keys) - :into best-memory-yes - ;; Final threshold: If the total signal is positive, return True (T). - :finally (return (if (minusp best-memory-yes) - (values NIL old) - (values t old))))) - -(when nil - ;;; Test cases for sum-memories, eval the defparameters first! - - (defparameter *memories* - '((((foo) (foo)) ((bar) (bar))) - (((baz) (baz)) ((foo) (foo))) - (((qux) (qux)) ((qux) (qux))))) - - (defparameter *poly-2* (make-rectified-polynomial 2)) - - ;; Return T - (sum-memories 'foo - '(((foo) (foo)) ((bar) (bar))) ;; Input - *memories* ;; The Library of templates - :rectified-polynomial *poly-2* - :start-row 0 :start-col 0 - :target-row 0 :target-col 0) - - ;; Returns NIL - (sum-memories 'foo - '(((qux) (qux)) ((qux) (qux))) - *memories* - :rectified-polynomial *poly-2* - :start-row 0 :start-col 0 - :target-row 0 :target-col 0) - ;; returns NIL - (sum-memories 'foo - '(((baz) (baz)) ((foo) (foo))) - *memories* - :rectified-polynomial *poly-2* - :start-row 0 :start-col 0 - :target-row 0 :target-col 0) - ;; returns T (targetting second row) - (sum-memories 'foo - '(((baz) (baz)) ((foo) (foo))) - *memories* - :rectified-polynomial *poly-2* - :start-row 0 :start-col 0 - :target-row 1 :target-col 0) - ) - - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;; Game AI for a tic-tac-toe board! ;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - -(defun ttt-candidates (board training-data) - (loop :for r :from 0 :to 2 - :append (loop - :for c :from 0 :to 2 - :when (null (elt (elt *current-board* r) c)) ;; If the spot is empty - :collect - (list r c (sum-memories - 'foo - board - training-data - :rectified-polynomial (make-rectified-polynomial 2) - :start-row 0 :start-col 0 - :target-row r :target-col c))))) - -(defun play-ttt (board training-data) - (let* ((rated-positions (ttt-candidates board training-data)) - (candidates (remove-if-not (lambda (x) (nth 2 x)) rated-positions))) - (loop :for candidate :in candidates - :collect (cons (car candidate) (cadr candidate))))) - -(when nil - (let ((training-data - '(;; 1. Horizontal Win (Top Row) - (((foo) (foo) (foo)) - (( ) ( ) ( )) - (( ) ( ) ( ))) - - ;; 2. Vertical Win (Left Column) - (((foo) ( ) ( )) - ((foo) ( ) ( )) - ((foo) ( ) ( ))) - - ;; 3. Diagonal Win (Top-Left to Bottom-Right) - (((foo) ( ) ( )) - (( ) (foo) ( )) - (( ) ( ) (foo))) - - ;; 4. Diagonal Win (Top-Right to Bottom-Left) - ((( ) ( ) (foo)) - (( ) (foo) ( )) - ((foo) ( ) ( ))))) - (board - '(((foo) (foo) ( )) ;; Potential win at (0, 2) - ((bar) ( ) ( )) - (( ) (bar) ( ))))) - (play-ttt board training-data)) - ;; Output: '((0 . 2)) - ) diff --git a/ffnn.asd b/ffnn.asd new file mode 100644 index 0000000..1e5ac64 --- /dev/null +++ b/ffnn.asd @@ -0,0 +1,24 @@ +(defsystem "ffnn" + :version "0.0.1" + :author "" + :license "" + :depends-on () + :components ((:module "src" + :components + ((:file "package") + (:file "algo1") + (:file "algo2")))) + :description "" + :in-order-to ((test-op (test-op "ffnn/tests")))) + +(defsystem "ffnn/tests" + :author "" + :license "" + :depends-on ("ffnn" + "rove") + :components ((:module "tests" + :components + ((:file "algo1") + (:file "algo2")))) + :description "Test system for ffnn" + :perform (test-op (op c) (symbol-call :rove :run c))) diff --git a/package.lisp b/package.lisp new file mode 100644 index 0000000..4bbd1d2 --- /dev/null +++ b/package.lisp @@ -0,0 +1,4 @@ +(defpackage ffnn + (:use :cl)) +(in-package :ffnn) + diff --git a/src/algo1.lisp b/src/algo1.lisp new file mode 100644 index 0000000..c3ef924 --- /dev/null +++ b/src/algo1.lisp @@ -0,0 +1,224 @@ +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;; SYMBOLIC FEED-FORWARD NEURAL NETWORK ;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;; +;;;; (C) 2026 Screwlisp, CC-By-SA +;;;; +;;;; Originally published on https://screwlisp.small-web.org/fundamental/a-better-deep-learning-algorithm/ +;;;; +;;;; OVERVIEW: This implements a neural network designed for pattern +;;;; recognition within symbolic grids. Instead of using traditional +;;;; "Dense" layers of floating-point weights, it uses "Memory +;;;; Blocks". +;;;; +;;;; HOW IT WORKS: +;;;; 1. WINDOWING: The system slides a "lens" (the memory block size) over +;;;; the input data (the input grid). +;;;; +;;;; 2. CORRELATION: It performs a symbolic comparison between the memory +;;;; and the input. It checks for "Co-occurrence": +;;;; - If the feature is present in BOTH, it's a positive match. +;;;; - If the feature is absent in BOTH, it's also a positive match. +;;;; - Mismatches generate negative signals. +;;;; +;;;; 3. NON-LINEAR ACTIVATION: The resulting sums are passed through a +;;;; "Rectified Polynomial." This is the "secret sauce" of Deep Learning. +;;;; By squaring or cubing the positive signals (the pluses), we amplify +;;;; strong patterns and suppress weak noise. +;;;; +;;;; 4. INFERENCE: The system aggregates the "votes" of all memories. If the +;;;; net signal is positive, the network confirms the presence of the +;;;; requested item (T); otherwise, it rejects it (NIL). +;;;; +;;;; PERSPECTIVE: +;;;; Think of this as a "Fuzzy Grep" for 2D grids. It doesn't look for +;;;; exact matches, but rather "statistical resonance" between a known +;;;; template and a raw input patch. +;;;; ========================================================================== + +(defpackage ffnn.algo1 + (:use :cl) + (:export #:make-rectified-polynomial) + (:export #:sum-memories)) +(in-package :ffnn.algo1) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; 1. THE ACTIVATION FUNCTION ;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; +;;; This defines how a "neuron" scales its output. By squaring the +;;; result (degree 2), we make strong matches significantly more +;;; influential than weak ones. The DEGREE is the knob that can be +;;; cranked to 11 to control how the NN behaves. +(defun make-rectified-polynomial (degree) + (lambda (x) + ;; "Rectified": Only positive values pass. + (cond ((plusp x) (expt x degree)) + ;; Negative or zero signals are killed. + ((not (plusp x)) '0)))) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; 2. THE MAGNITUDE CALCULATOR ;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; +;;; This compares a row of input to a row of memory. It's essentially +;;; calculating "Overlap" and "Divergence." +(defun sum-one-row + (item row-in mem-in idx + &key (predicate 'member) predicate-keys &allow-other-keys) + (loop :for me :in mem-in ; Walk the memory column-to-column and + :for ro :in row-in ; row-to-row, _simultaneously_! + :for count :from 0 + ;; Transform presence of 'item' into +1 or -1 + :for m := (if (apply predicate item me predicate-keys) +1 -1) + :for c := (if (apply predicate item ro predicate-keys) +1 -1) + + ;; CASE A: We are at the 'Target' + ;; If it matches, we sum the memory signal (+m or -m) directly. + :when (and idx (= count idx)) + :sum (* +1 m) :into pluses + :when (and idx (= count idx)) + :sum (* -1 m) :into minuses + + ;; CASE B: We are at any other column (The context) + ;; + ;; Multiply the input (c) by the memory (m). If both match (+1 + ;; * +1) or both lack the item (-1 * -1), result is +1. This + ;; is the "correlation" or "weighting" step. + :unless (and idx (= count idx)) + :sum (* c m) :into pluses + :unless (and idx (= count idx)) + :sum (* c m) :into minuses + :finally + ;; Returns the total "evidence" for and against this row. + (return (list pluses minuses)))) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; 3. THE SPATIAL AGGREGATOR ;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; +;;; This loops through the rows of a single memory "block." Think of a +;;; memory block. +;;; +;;; This is like calling +;;; This outputs (2 0) +;;; (sum-one-row 'foo +;;; '((foo) (foo)) +;;; '((foo) (foo)) +;;; 0) +;;; ;; This outputs (2 2) +;;; (sum-one-row 'foo +;;; '((bar) (bar)) +;;; '((bar) (bar)) +;;; nil) +;;; ;; final output: 12 +;;; (- (funcall (make-rectified-polynomial 2) 4) +;;; (funcall (make-rectified-polynomial 2) 2)) +(defun sum-one-memory + (item input memory + &rest keys + &key target-row target-col + start-row start-col + rectified-polynomial &allow-other-keys) + (loop :for mem-row :in memory + :for count :from 0 + ;; 'nthcdr' is used to offset into the input matrix, + ;; allowing the network to look at specific "patches" of data. + :for whole-input-row :in (nthcdr start-row input) + :for input-row := (nthcdr start-col whole-input-row) + + ;; If the current row matches the row we are predicting, + ;; pass that column index down to sum-one-row. + :for idx := (when (= count target-row) target-col) + + ;; Execute the row-level comparison. + :for (row-plus row-minus) + := (apply 'sum-one-row item input-row mem-row idx keys) + + ;; Accumulate the results for the entire memory block. + :summing row-plus :into row-pluses + :summing row-minus :into row-minuses + :finally + ;; Apply the "Polynomial" to amp up strong signals, + ;; then subtract the 'negative evidence' from the 'positive evidence'. + (return + (- (funcall rectified-polynomial row-pluses) + (funcall rectified-polynomial row-minuses))))) + +;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; 4. Decision making ;;; +;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; +;;; This is the final layer. It asks every memory block for its +;;; opinion on the input and combines them into a binary T/NIL. +(defun sum-memories (item input memories + &rest keys &key &allow-other-keys) + ;; Identify the 'old' value that currently exists at the target coordinates. + (loop :with old := (elt (elt input + (+ (getf keys :start-row) + (getf keys :target-row))) + (+ (getf keys :start-col) + (getf keys :target-col))) + :for memory :in memories + ;; Sum up the scores from every memory block in the "brain". + :sum (apply 'sum-one-memory item input memory keys) + :into best-memory-yes + ;; Final threshold: If the total signal is positive, return True (T). + :finally (return (if (minusp best-memory-yes) + (values NIL old) + (values t old))))) + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;; Game AI for a tic-tac-toe board! ;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + +(defun ttt-candidates (board training-data) + (loop :for r :from 0 :to 2 + :append (loop + :for c :from 0 :to 2 + :when (null (elt (elt *current-board* r) c)) ;; If the spot is empty + :collect + (list r c (sum-memories + 'foo + board + training-data + :rectified-polynomial (make-rectified-polynomial 2) + :start-row 0 :start-col 0 + :target-row r :target-col c))))) + +(defun play-ttt (board training-data) + (let* ((rated-positions (ttt-candidates board training-data)) + (candidates (remove-if-not (lambda (x) (nth 2 x)) rated-positions))) + (loop :for candidate :in candidates + :collect (cons (car candidate) (cadr candidate))))) + +(when nil + (let ((training-data + '(;; 1. Horizontal Win (Top Row) + (((foo) (foo) (foo)) + (( ) ( ) ( )) + (( ) ( ) ( ))) + + ;; 2. Vertical Win (Left Column) + (((foo) ( ) ( )) + ((foo) ( ) ( )) + ((foo) ( ) ( ))) + + ;; 3. Diagonal Win (Top-Left to Bottom-Right) + (((foo) ( ) ( )) + (( ) (foo) ( )) + (( ) ( ) (foo))) + + ;; 4. Diagonal Win (Top-Right to Bottom-Left) + ((( ) ( ) (foo)) + (( ) (foo) ( )) + ((foo) ( ) ( ))))) + (board + '(((foo) (foo) ( )) ;; Potential win at (0, 2) + ((bar) ( ) ( )) + (( ) (bar) ( ))))) + (play-ttt board training-data)) + ;; Output: '((0 . 2)) + ) diff --git a/src/package.lisp b/src/package.lisp new file mode 100644 index 0000000..615c02e --- /dev/null +++ b/src/package.lisp @@ -0,0 +1,2 @@ +(defpackage ffnn + (:use :cl)) diff --git a/tests/algo1.lisp b/tests/algo1.lisp new file mode 100644 index 0000000..5d5f340 --- /dev/null +++ b/tests/algo1.lisp @@ -0,0 +1,70 @@ +(defpackage ffnn/tests/algo1 + (:use :cl + :ffnn + :ffnn.algo1 + :rove)) + +(in-package :ffnn/tests/algo1) + + +(deftest check-sum-one-row + (testing "sum-one-row" + (dolist (case '((foo ((foo) () (bar)) ((foo) (foo) (bar)) 1 (3 1)) + (foo ((bar) () (bar)) ((bar) (bar) (foo)) 1 (-1 1)) + (foo ((bar) () (bar)) ((bar) (bar) (bar)) 1 (1 3)) + (foo ((foo) () (bar)) ((foo) (foo) (bar)) nil (1 1)))) + (destructuring-bind (item input memory index expected) case + (ok (equal (ffnn.algo1::sum-one-row item input memory index) + expected)))))) + +(deftest check-sum-one-memory + (testing "Sum one memory" + (ok (equal (ffnn.algo1::sum-one-memory 'foo + '(((foo) (foo)) + ((bar) (bar))) ;; Input + '(((foo) (foo)) + ((bar) (bar))) ;; Memory + :rectified-polynomial (ffnn.algo1::make-rectified-polynomial 2) + :target-row 0 :target-col 0 + :start-row 0 :start-col 0) + 12)) + (ok (equal (ffnn.algo1::sum-one-memory 'foo + '(((foo) (foo)) + ((baz) (baz))) + '(((foo) (foo)) + ((foo) (foo))) + :rectified-polynomial (ffnn.algo1::make-rectified-polynomial 2) + :target-row 0 :target-col 0 + :start-row 0 :start-col 0) + 0)))) + +(deftest check-sum-memories + (testing "Sum memories" + (let ((memories '((((foo) (foo)) ((bar) (bar))) + (((baz) (baz)) ((foo) (foo))) + (((qux) (qux)) ((qux) (qux))))) + (poly-2 (ffnn.algo1::make-rectified-polynomial 2))) + (ok (sum-memories 'foo + '(((foo) (foo)) ((bar) (bar))) ;; Input + memories ;; The Library of templates + :rectified-polynomial poly-2 + :start-row 0 :start-col 0 + :target-row 0 :target-col 0)) + (ng (sum-memories 'foo + '(((qux) (qux)) ((qux) (qux))) + memories + :rectified-polynomial poly-2 + :start-row 0 :start-col 0 + :target-row 0 :target-col 0)) + (ng (sum-memories 'foo + '(((baz) (baz)) ((foo) (foo))) + memories + :rectified-polynomial poly-2 + :start-row 0 :start-col 0 + :target-row 0 :target-col 0)) + (ok (sum-memories 'foo + '(((baz) (baz)) ((foo) (foo))) + memories + :rectified-polynomial poly-2 + :start-row 0 :start-col 0 + :target-row 1 :target-col 0))))) -- cgit v1.2.3