1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
|
;;;; ==========================================================================
;;;; SYMBOLIC FEED-FORWARD NEURAL NETWORK (ASSOCIATIVE MEMORY)
;;;; ==========================================================================
;;;;
;;;; OVERVIEW: This module implements a neural network designed for
;;;; pattern recognition within symbolic grids. Instead of using
;;;; traditional "Dense" layers of floating-point weights, it uses
;;;; "Memory Blocks".
;;;;
;;;; HOW IT WORKS:
;;;; 1. WINDOWING: The system slides a "lens" (the memory block size) over
;;;; the input data (the input grid).
;;;;
;;;; 2. CORRELATION: It performs a symbolic comparison between the memory
;;;; and the input. It checks for "Co-occurrence":
;;;; - If the feature is present in BOTH, it's a positive match.
;;;; - If the feature is absent in BOTH, it's also a positive match.
;;;; - Mismatches generate negative signals.
;;;;
;;;; 3. NON-LINEAR ACTIVATION: The resulting sums are passed through a
;;;; "Rectified Polynomial." This is the "secret sauce" of Deep Learning.
;;;; By squaring or cubing the positive signals (the pluses), we amplify
;;;; strong patterns and suppress weak noise.
;;;;
;;;; 4. INFERENCE: The system aggregates the "votes" of all memories. If the
;;;; net signal is positive, the network confirms the presence of the
;;;; requested item (T); otherwise, it rejects it (NIL).
;;;;
;;;; PERSPECTIVE:
;;;; Think of this as a "Fuzzy Grep" for 2D grids. It doesn't look for
;;;; exact matches, but rather "statistical resonance" between a known
;;;; template and a raw input patch.
;;;; ==========================================================================
;;; --- 1. THE ACTIVATION FUNCTION ---
;;;
;;; This defines how a "neuron" scales its output. By squaring the
;;; result (degree 2), we make strong matches significantly more
;;; influential than weak ones. The DEGREE is the knob that can be
;;; cranked to 11 to control how the NN behaves.
(defun make-rectified-polynomial (degree)
(lambda (x)
;; "Rectified": Only positive values pass.
(cond ((plusp x) (expt x degree))
;; Negative or zero signals are killed.
((not (plusp x)) '0))))
;;; --- 2. THE MAGNITUDE CALCULATOR ---
;;;
;;; This compares a row of input to a row of memory. It's essentially
;;; calculating "Overlap" and "Divergence."
(defun sum-one-row
(item row-in mem-in idx
&key (predicate 'member) predicate-keys &allow-other-keys)
(loop :for me :in mem-in ; Walk the memory column-to-column and
:for ro :in row-in ; row-to-row, _simultaneously_!
:for count :from 0
;; Transform presence of 'item' into +1 or -1
:for m := (if (apply predicate item me predicate-keys) +1 -1)
:for c := (if (apply predicate item ro predicate-keys) +1 -1)
;; CASE A: We are at the 'Target'
;; If it matches, we sum the memory signal (+m or -m) directly.
:when (and idx (= count idx))
:sum (* +1 m) :into pluses
:when (and idx (= count idx))
:sum (* -1 m) :into minuses
;; CASE B: We are at any other column (The context)
;;
;; Multiply the input (c) by the memory (m). If both match (+1
;; * +1) or both lack the item (-1 * -1), result is +1. This
;; is the "correlation" or "weighting" step.
:unless (and idx (= count idx))
:sum (* c m) :into pluses
:unless (and idx (= count idx))
:sum (* c m) :into minuses
:finally
;; Returns the total "evidence" for and against this row.
(return (list pluses minuses))))
(when nil
;; Test to understand how sum-one-row works. A plus is added when
;; the symbol is in the memory row, if the symbol occurs on the
;; index, 2 pluses are added. Eval with SLIME (C-x C-e at the
;; closing brace.)
(sum-one-row 'foo
'((foo) () (bar))
'((foo) (foo) (bar))
1) ; (3 1) - strong sign that foo belongs there
(sum-one-row 'foo
'((bar) () (bar))
'((bar) (bar) (foo))
1) ; (-1 1) - undecided?
(sum-one-row 'foo
'((bar) () (bar))
'((bar) (bar) (bar))
1) ; (1 3) - allergy!
(sum-one-row 'foo
'((foo) () (bar))
'((foo) (foo) (bar))
nil) ; (3 1) - strong sign that foo belongs there
)
;;; --- 3. THE SPATIAL AGGREGATOR ---
;;;
;;; This loops through the rows of a single memory "block." Think of a
;;; memory block.
(defun sum-one-memory
(item input memory
&rest keys
&key target-row target-col
start-row start-col
rectified-polynomial &allow-other-keys)
(loop :for mem-row :in memory
:for count :from 0
;; 'nthcdr' is used to offset into the input matrix,
;; allowing the network to look at specific "patches" of data.
:for whole-input-row :in (nthcdr start-row input)
:for input-row := (nthcdr start-col whole-input-row)
;; If the current row matches the row we are predicting,
;; pass that column index down to sum-one-row.
:for idx := (when (= count target-row) target-col)
;; Execute the row-level comparison.
:for (row-plus row-minus)
:= (apply 'sum-one-row item input-row mem-row idx keys)
;; Accumulate the results for the entire memory block.
:summing row-plus :into row-pluses
:summing row-minus :into row-minuses
:finally
;; Apply the "Polynomial" to amp up strong signals,
;; then subtract the 'negative evidence' from the 'positive evidence'.
(return
(- (funcall rectified-polynomial row-pluses)
(funcall rectified-polynomial row-minuses)))))
(when nil
;;; Tests for sum-one-memory
(sum-one-memory 'foo
'(((foo) (foo))
((bar) (bar))) ;; Input
'(((foo) (foo))
((bar) (bar))) ;; Memory
:rectified-polynomial (make-rectified-polynomial 2)
:target-row 0 :target-col 0
:start-row 0 :start-col 0)
;; The above is like calling (you can try!):
;; This outputs (2 0)
(sum-one-row 'foo
'((foo) (foo))
'((foo) (foo))
0)
;; This outputs (2 2)
(sum-one-row 'foo
'((bar) (bar))
'((bar) (bar))
nil)
;; final output: 12
(- (funcall (make-rectified-polynomial 2) 4)
(funcall (make-rectified-polynomial 2) 2))
(sum-one-memory 'foo
'(((foo) (foo))
((baz) (baz)))
'(((foo) (foo))
((foo) (foo)))
:rectified-polynomial (make-rectified-polynomial 2)
:target-row 0 :target-col 0
:start-row 0 :start-col 0)
)
;;; --- 4. Decision making ---
;;;
;;; This is the final layer. It asks every memory block for its
;;; opinion on the input and combines them into a binary T/NIL.
(defun sum-memories (item input memories
&rest keys &key &allow-other-keys)
;; Identify the 'old' value that currently exists at the target coordinates.
(loop :with old := (elt (elt input
(+ (getf keys :start-row)
(getf keys :target-row)))
(+ (getf keys :start-col)
(getf keys :target-col)))
:for memory :in memories
;; Sum up the scores from every memory block in the "brain".
:sum (apply 'sum-one-memory item input memory keys)
:into best-memory-yes
;; Final threshold: If the total signal is positive, return True (T).
:finally (return (if (minusp best-memory-yes)
(values NIL old)
(values t old)))))
(when nil
;;; Test cases for sum-memories, eval the defparameters first!
(defparameter *memories*
'((((foo) (foo)) ((bar) (bar)))
(((baz) (baz)) ((foo) (foo)))
(((qux) (qux)) ((qux) (qux)))))
(defparameter *poly-2* (make-rectified-polynomial 2))
;; Return T
(sum-memories 'foo
'(((foo) (foo)) ((bar) (bar))) ;; Input
*memories* ;; The Library of templates
:rectified-polynomial *poly-2*
:start-row 0 :start-col 0
:target-row 0 :target-col 0)
;; Returns NIL
(sum-memories 'foo
'(((qux) (qux)) ((qux) (qux)))
*memories*
:rectified-polynomial *poly-2*
:start-row 0 :start-col 0
:target-row 0 :target-col 0)
;; returns NIL
(sum-memories 'foo
'(((baz) (baz)) ((foo) (foo)))
*memories*
:rectified-polynomial *poly-2*
:start-row 0 :start-col 0
:target-row 0 :target-col 0)
;; returns T (targetting second row)
(sum-memories 'foo
'(((baz) (baz)) ((foo) (foo)))
*memories*
:rectified-polynomial *poly-2*
:start-row 0 :start-col 0
:target-row 1 :target-col 0)
)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;; Game AI for a tic-tac-toe board! ;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ttt-candidates (board training-data)
(loop :for r :from 0 :to 2
:append (loop
:for c :from 0 :to 2
:when (null (elt (elt *current-board* r) c)) ;; If the spot is empty
:collect
(list r c (sum-memories
'foo
board
training-data
:rectified-polynomial (make-rectified-polynomial 2)
:start-row 0 :start-col 0
:target-row r :target-col c)))))
(defun play-ttt (board training-data)
(let* ((rated-positions (ttt-candidates board training-data))
(candidates (remove-if-not (lambda (x) (nth 2 x)) rated-positions)))
(loop :for candidate :in candidates
:collect (cons (car candidate) (cadr candidate)))))
(when nil
(let ((training-data
'(;; 1. Horizontal Win (Top Row)
(((foo) (foo) (foo))
(( ) ( ) ( ))
(( ) ( ) ( )))
;; 2. Vertical Win (Left Column)
(((foo) ( ) ( ))
((foo) ( ) ( ))
((foo) ( ) ( )))
;; 3. Diagonal Win (Top-Left to Bottom-Right)
(((foo) ( ) ( ))
(( ) (foo) ( ))
(( ) ( ) (foo)))
;; 4. Diagonal Win (Top-Right to Bottom-Left)
((( ) ( ) (foo))
(( ) (foo) ( ))
((foo) ( ) ( )))))
(board
'(((foo) (foo) ( )) ;; Potential win at (0, 2)
((bar) ( ) ( ))
(( ) (bar) ( )))))
(play-ttt board training-data))
;; Output: '((0 . 2))
)
|