-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathGraphOfLife.py
More file actions
1785 lines (1489 loc) · 75.6 KB
/
GraphOfLife.py
File metadata and controls
1785 lines (1489 loc) · 75.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GraphOfLife — Open-Ended Evolution on a Mutable Graph
==============================================================================
"""
from __future__ import annotations
from typing import Any, Dict, List, Tuple
from datetime import datetime
import hashlib
import json
import os
import shutil
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
# ----------------------------------------------------------------------------
# Configuration & Constants
# ----------------------------------------------------------------------------
# Total amount of tokens (conserved)
TOTAL_TOKENS = 50_000
# ---- Token Creation ----
CREATE_X_NEW_TOKENS_EACH_PHASE: int = 10
# Determinism
PROBABILISTIC_DECISIONS: bool = False
# ---- Blotto rule variants ----
# "ALLOCATE_AND_CONQUER" : highest allocator at node v implants its brain into v
# "ALLOCATE_AND_ROB" : winner-for-v *collects* all tokens allocated to v; no brain copying
# "ALLOCATE_AND_DO_NOTHING" : No Brain gets overwritten. But they dont want the links to disappear
BLOTTO_MODE: str = "ALLOCATE_AND_CONQUER"
ALLOW_REVOLUTIONS = True
FORCE_REVOLUTIONS = False
# ---- Blotto allocation scheduling ----
# "FULL_ALLOCATION" : one observation; each agent allocates all its tokens at once by relative scores
# "STEP_ALLOCATION_WEAKEST_FIRST": (current behavior) every agent with >=1 token allocates 1 token each round
# "STEP_ALLOCATION_STRONGEST_FIRST": only agents with the current per-round max remaining tokens allocate 1 token
# "STEP_ALLOCATION_AS_WISHED": they can choose when to allocate
BLOTTO_ALLOCATION_MODE: str = "STEP_ALLOCATION_STRONGEST_FIRST"
# ---- Reproduction decision variant ----
REPRO_CORE_FRACTIONS_ONLY: bool = False
# ---- Mutation policy toggles ----
MUTATE_ON_REPRO_COPY: bool = False
MUTATE_ON_BLOTTO_COPY: bool = False
MUTATE_ALL_AFTER_BLOTTO: bool = True
# ---- Mutation Hyperparameters ----
MUTATION_PROBABILITY: float = 0.25
MUTATION_NOISE_STD: float = 0.2
MUTATION_SPARSITY: float = 0.1
# ---- Input Noise (New) ----
# Amount of random inputs (Uniform -2 to 2) added to every observation
RANDOM_INPUT_AMOUNT: int = 5
# ---- Walkermode ----
# PSEUDO_RANDOM_ONE_PER_ITERATION -> one per iteration
# PSEUDO_RANDOM_ONE_PER_TOKEN -> one per token
# PSEUDO_RANDOM_ONE_PER_TOKEN_LOG -> one per log2(token)+1
# PSEUDO_RANDOM_ONE_PER_TOKEN_RANDOM_WALK -> one per token random walk
# PSEUDO_RANDOM_ONE_PER_TOKEN_RANDOM_WALK_LOG -> one per log2(token)+1 token random walk
# WALK_ON_OWN_PER_TOKEN -> Active navigation driven by Brain, steps = tokens
# WALK_ON_OWN_PER_LOG_TOKEN -> Active navigation driven by Brain, steps = log2(tokens)+1
# NO_WALKER -> no walker
WALKER_MODE: str = "PSEUDO_RANDOM_ONE_PER_ITERATION"
# ---- Walker/reach reset policy ----
RESET_REACH_ON_CONQUER: bool = True
# ---- Exchange Messages ----
EXCHANGE_MESSAGES: bool = True
MESSAGE_NUMBER_AMOUNT = 5
# ---- Token Redistribution Physics----
# "GLOBAL_UNIFORM": Tokens from removed nodes are distributed to all survivors (Manna from Heaven).
# "LOCAL_SCAVENGING": Tokens from removed nodes go to surviving neighbors. Isolated tokens go global. #TODO wrongly implemented
TOKEN_REDISTRIBUTION_MODE: str = "GLOBAL_UNIFORM"
# ---- Brain Architecture ----
BRAIN_HIDDEN_LAYERS: List[int] = [50, 45, 40, 35, 30]
# Visualization
DRAW: bool = True
DRAW_EVERY_X_ITERATIONS = 50
# Output directory
BASE_DIR = os.path.join(os.path.dirname(__file__), "GraphOfLifeOutputs")
os.makedirs(BASE_DIR, exist_ok=True)
# Output Heads
HEAD = {
"REPRO": slice(0, 4),
"LINK": slice(4, 6),
"SHIFT": slice(6, 8),
"RECONNECT": slice(8, 12),
"BLOTTO": 12,
"WALKER": slice(13, 15),
"MESSAGE": slice(15, 15 + MESSAGE_NUMBER_AMOUNT),
"WALKER_DIR": 15 + MESSAGE_NUMBER_AMOUNT,
"ALLOCATE_YN": slice(15 + MESSAGE_NUMBER_AMOUNT + 1, 15 + MESSAGE_NUMBER_AMOUNT + 3),
"REVOLUTION_YN": slice(15 + MESSAGE_NUMBER_AMOUNT + 3, 15 + MESSAGE_NUMBER_AMOUNT + 5),
}
# ----------------------------------------------------------------------------
# Mathematical Utilities
# ----------------------------------------------------------------------------
def _six_quantiles(sorted_vals: List[float]) -> List[float]:
"""
Compresses a distribution into 6 representative quantiles.
Used to give the brain a summary of neighborhood statistics.
"""
if not sorted_vals:
return [0.0] * 6
qs = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
idx = [(len(sorted_vals) - 1) * q for q in qs]
out: List[float] = []
for x in idx:
i0 = int(np.floor(x))
i1 = int(np.ceil(x))
if i0 == i1:
out.append(float(sorted_vals[i0]))
else:
w = x - i0
out.append(float((1 - w) * sorted_vals[i0] + w * sorted_vals[i1]))
return out
def _sigmoid(x: np.ndarray) -> np.ndarray:
"""Stable Sigmoid activation."""
pos = x >= 0
neg = ~pos
z = np.empty_like(x, dtype=float)
z[pos] = 1.0 / (1.0 + np.exp(-x[pos]))
ex = np.exp(x[neg])
z[neg] = ex / (1.0 + ex)
return z
# ----------------------------------------------------------------------------
# The Brain (Neural Substrate)
# ----------------------------------------------------------------------------
class Brain:
_next_brain_id = 1
rec = None
def __init__(self) -> None:
"""
Initializes a Feed-Forward Neural Network.
Input Layer Anatomy:
- Base (29): Local topology and wealth (Log-Scaled).
- Blotto Context (25): Competition metrics + Revolution Metrics.
- Walker Context (4): Spatial-temporal navigation data.
- Messages (20): Signals from neighbors.
- Noise (4): Entropy injection.
"""
# Base breakdown: 1 (obs_is_self) + 4 (scalars) + 24 (quantiles) = 29
# Contexts: 25 (Blotto) + 4 (Walker) = 29
# Total Static Inputs = 58
n_inputs = 58 + 4 * MESSAGE_NUMBER_AMOUNT + RANDOM_INPUT_AMOUNT
hidden_sizes = BRAIN_HIDDEN_LAYERS
# Outputs: 15 (Standard) + Msg + 3 (WalkerDir/AllocYN) + 2 (RevYN)
n_outputs = 15 + MESSAGE_NUMBER_AMOUNT + 3 + 2
assert n_inputs > 0 and n_outputs > 0
self.layer_sizes = [int(n_inputs)] + [int(h) for h in hidden_sizes] + [int(n_outputs)]
self.weights: List[np.ndarray] = []
self.biases: List[np.ndarray] = []
for fan_in, fan_out in zip(self.layer_sizes[:-1], self.layer_sizes[1:]):
W = np.random.normal(0.0, 1.0 / np.sqrt(fan_in), size=(fan_out, fan_in))
b = np.zeros((fan_out, 1))
self.weights.append(W)
self.biases.append(b)
self.brain_id: int = Brain._next_brain_id
self.parent_brain_id = None
Brain._next_brain_id += 1
def forward(self, x: np.ndarray | List[float]) -> np.ndarray:
a = np.asarray(x, dtype=float)
if a.ndim == 1:
a = a.reshape(-1, 1)
for li, (W, b) in enumerate(zip(self.weights, self.biases)):
z = W @ a + b
is_last = li == len(self.weights) - 1
a = z if is_last else _sigmoid(z)
return a.squeeze() if a.shape[1] == 1 else a
def copy(self) -> "Brain":
new_brain = Brain()
new_brain.weights = [w.copy() for w in self.weights]
new_brain.biases = [b.copy() for b in self.biases]
new_brain.parent_brain_id = self.brain_id
if Brain.rec: Brain.rec({"t": "copy", "from": int(self.brain_id), "to": int(new_brain.brain_id)})
return new_brain
def mutate(self) -> None:
"""Applies Gaussian noise and sparsity to weights."""
if np.random.random() > MUTATION_PROBABILITY:
return
old_id = self.brain_id
reset_fraction = float(np.clip(MUTATION_SPARSITY, 0.0, 1.0))
for i, (W, b) in enumerate(zip(self.weights, self.biases)):
fan_in = W.shape[1]
base_scale = 1.0 / np.sqrt(fan_in)
w_std = float(MUTATION_NOISE_STD) * base_scale
b_std = float(MUTATION_NOISE_STD) * base_scale
# Weight Perturbation
if MUTATION_SPARSITY > 0.0 and w_std > 0.0:
mask = np.random.random(W.shape) < MUTATION_SPARSITY
W = W + np.random.normal(0.0, w_std, size=W.shape) * mask
# Structural Reset (Rarely replace weights entirely)
if (MUTATION_SPARSITY > 0.0) and (reset_fraction > 0.0):
if np.random.random() < MUTATION_SPARSITY:
reset_mask = np.random.random(W.shape) < reset_fraction
if np.any(reset_mask):
W_new = np.random.normal(0.0, base_scale, size=W.shape)
W = np.where(reset_mask, W_new, W)
self.weights[i] = W.astype(float, copy=False)
# Bias Perturbation
if MUTATION_SPARSITY > 0.0 and b_std > 0.0:
maskb = np.random.random(b.shape) < MUTATION_SPARSITY
b = b + np.random.normal(0.0, b_std, size=b.shape) * maskb
if (MUTATION_SPARSITY > 0.0) and (reset_fraction > 0.0):
if np.random.random() < MUTATION_SPARSITY:
reset_maskb = np.random.random(b.shape) < reset_fraction
if np.any(reset_maskb):
b_new = np.random.normal(0.0, b_std if b_std > 0 else 0.01, size=b.shape)
b = np.where(reset_maskb, b_new, b)
self.biases[i] = b.astype(float, copy=False)
self.parent_brain_id = old_id
self.brain_id = Brain._next_brain_id
Brain._next_brain_id += 1
if Brain.rec:
Brain.rec({"t": "mut", "from": int(self.parent_brain_id), "to": int(self.brain_id)})
# ----------------------------------------------------------------------------
# The Arena (GraphOfLife)
# ----------------------------------------------------------------------------
class GraphOfLife:
def __init__(self, G_init: nx.Graph, total_tokens: int) -> None:
self.G = nx.Graph()
self.next_agent_id = 0
old2new: Dict[Any, int] = {}
# Initialize Topology
for n in G_init.nodes():
aid = self.next_agent_id
self.next_agent_id += 1
old2new[n] = aid
self.G.add_node(aid)
for u, v in G_init.edges():
self.G.add_edge(old2new[u], old2new[v])
# Initialize State
self.total_tokens = int(total_tokens)
self.tokens: Dict[int, int] = {aid: 0 for aid in self.G.nodes()}
self.brains: Dict[int, Brain] = {aid: Brain() for aid in self.G.nodes()}
self.reach_counts: Dict[int, Dict[int, int]] = {aid: {aid: 1} for aid in self.G.nodes()}
self.messages: Dict[int, Dict[int, List[float]]] = {aid: {} for aid in self.G.nodes()}
# Distribute Initial Wealth
N = self.G.number_of_nodes()
for aid in self.G.nodes():
self.tokens[aid] = int(self.total_tokens / N)
# Setup Logging
date_str = datetime.now().strftime("%Y_%m_%d")
prefix = f"GOL_{date_str}__"
existing_runs = []
for name in os.listdir(BASE_DIR):
full_path = os.path.join(BASE_DIR, name)
if not os.path.isdir(full_path): continue
if not name.startswith(prefix): continue
suffix = name[len(prefix):]
if len(suffix) == 3 and suffix.isdigit():
existing_runs.append(int(suffix))
next_idx = (max(existing_runs) + 1) if existing_runs else 1
folder_name = f"{prefix}{next_idx:03d}"
self.run_dir = os.path.join(BASE_DIR, folder_name)
os.makedirs(self.run_dir, exist_ok=True)
self._snapshot_source()
self._save_configuration()
self.genotype_events: List[Dict[str, int]] = []
Brain.rec = self.genotype_events.append
def _neighbors(self, u: int) -> List[int]:
return list(self.G.neighbors(u))
def _precompute_features(self) -> Tuple[
Dict[int, float],
Dict[int, List[int]],
Dict[int, List[float]],
Dict[int, List[float]],
Dict[int, float]
]:
"""
Calculates the Global Sensory Manifold (O(N)).
Crucially, applies Log-Norm scaling to all wealth and degree values.
This allows the brain to perceive orders of magnitude differences.
"""
# Log-Scale the Globals
log_tokens = {u: np.log1p(max(0, val)) for u, val in self.tokens.items()}
log_degrees = {u: np.log1p(float(self.G.degree[u])) for u in self.G.nodes()}
neighs = {u: list(self.G.neighbors(u)) for u in self.G.nodes()}
q_tok: Dict[int, List[float]] = {}
q_deg: Dict[int, List[float]] = {}
# Calculate Local Statistics
for u, N in neighs.items():
if not N:
q_tok[u] = [0.0] * 6
q_deg[u] = [0.0] * 6
continue
# Sort the already-logged values
n_log_tok = sorted(log_tokens[n] for n in N)
n_log_deg = sorted(log_degrees[n] for n in N)
q_tok[u] = _six_quantiles(n_log_tok)
q_deg[u] = _six_quantiles(n_log_deg)
return log_degrees, neighs, q_tok, q_deg, log_tokens
def _input_vec_fast(
self,
u: int,
v: int,
log_deg: Dict[int, float],
q_tok: Dict[int, List[float]],
q_deg: Dict[int, List[float]],
log_tok_map: Dict[int, float],
blotto_feats: List[float] | None = None, # 25 dimensions now
walker_feats: List[float] | None = None, # 4 dimensions
scale: float = 0.1,
) -> np.ndarray:
"""
Constructs the high-dimensional sensory vector for the Brain.
Enforces strict separation between Blotto and Walker contexts.
"""
# 1. Base Topology (29 dims)
own_obs = int(u == v)
own_t_log = log_tok_map.get(u, 0.0)
tgt_t_log = log_tok_map.get(v, 0.0)
own_d_log = log_deg[u]
tgt_d_log = log_deg[v]
base = [own_t_log, tgt_t_log, own_d_log, tgt_d_log] + q_tok[u] + q_tok[v] + q_deg[u] + q_deg[v]
# 2. Blotto Context (25 dims) - Competition Metrics + Revolution
if blotto_feats is None:
blotto_vec = [0.0] * 25
else:
blotto_vec = [np.log1p(max(0.0, float(x))) for x in blotto_feats]
# 3. Walker Context (4 dims) - Spatial-Temporal Awareness
if walker_feats is None:
walker_vec = [0.0] * 4
else:
walker_vec = [np.log1p(max(0.0, float(x))) for x in walker_feats]
# 4. Social Messaging
M = MESSAGE_NUMBER_AMOUNT
def _msg(src: int, dst: int) -> List[float]:
vec = self.messages.get(src, {}).get(dst, None)
if vec is None: return [0.0] * M
out = list(vec[:M])
if len(out) < M: out += [0.0] * (M - len(out))
return out
msg_feats = _msg(u, u) + _msg(u, v) + _msg(v, u) + _msg(v, v)
# 5. Thermodynamic Noise (Symmetry Breaking)
if RANDOM_INPUT_AMOUNT > 0:
noise = np.random.uniform(-2.0, 2.0, size=RANDOM_INPUT_AMOUNT).tolist()
else:
noise = []
# Concatenate all sensory streams
return np.array([own_obs] + base + blotto_vec + walker_vec + msg_feats + noise, dtype=float)
def _emit_messages(self, u: int, targets: List[int], Y: np.ndarray) -> None:
"""Propagates communication signals from the Brain to neighbors."""
if EXCHANGE_MESSAGES:
M = MESSAGE_NUMBER_AMOUNT
msg_rows = Y[HEAD["MESSAGE"], :]
if M == 1 and msg_rows.ndim == 1:
msg_rows = np.array([msg_rows])
msg_rows = np.tanh(msg_rows)
for j, v in enumerate(targets):
col = msg_rows[:, j].astype(float).tolist()
if len(col) < M:
col = col + [0.0] * (M - len(col))
elif len(col) > M:
col = col[:M]
self.messages.setdefault(u, {})[int(v)] = col
# ------------------------------------------------------------------------
# Phase 1: The Walker & Reproduction
# ------------------------------------------------------------------------
def _update_reach_counts_passive(self) -> None:
"""
Restored legacy logic for passive diffusion and random walks.
This handles all WALKER_MODE variants except the new 'WALK_ON_OWN'.
"""
G = self.G
new_maps: Dict[int, Dict[int, int]] = {}
nodes_list = list(G.nodes())
# --------------------------------------------------------------------
# Mode 1: Persistent Diffusion (One step per iteration)
# --------------------------------------------------------------------
if WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_ITERATION":
for u in nodes_list:
# 1. Prune dead nodes from history
prev = self.reach_counts.get(u, {u: 1})
prev = {w: c for w, c in prev.items() if G.has_node(w)}
# 2. If empty after prune, re-init basic
if not prev:
new_maps[u] = {u: 1}
continue
# 3. Single diffusion step
acc: Dict[int, int] = {}
for r, c in prev.items():
# Flow to neighbors
for w in list(G.neighbors(r)):
acc[w] = acc.get(w, 0) + int(c)
# Stay (self-loop)
acc[r] = acc.get(r, 0) + int(c)
new_maps[u] = acc or {u: 1}
self.reach_counts = new_maps
# --------------------------------------------------------------------
# Mode 2: Multi-step Diffusion (Renormalized 'Soft Attention')
# --------------------------------------------------------------------
elif WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_TOKEN" or WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_TOKEN_LOG":
# Pre-fetch neighbors to avoid repeated G.neighbors calls
adj_cache = {u: list(G.neighbors(u)) for u in nodes_list}
for u in nodes_list:
# 1. Initialize with FLOATs to support continuous diffusion
# All core neighbors start with max intensity (1.0).
current_counts: Dict[int, float] = {u: 1.0}
for v in adj_cache[u]:
current_counts[v] = 1.0
# 2. Determine horizon based on tokens
tok_count = int(self.tokens.get(u, 0))
if WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_TOKEN_LOG":
steps = int(np.log2(tok_count)) + 1 if tok_count > 0 else 0
else:
steps = tok_count
# 3. Iterate diffusion 'steps' times
for _ in range(steps):
next_counts: Dict[int, float] = {}
# Standard Diffusion: Mass at r spreads to r and neighbors
for r, count in current_counts.items():
# Optimization: Skip negligible contributions
if count < 1e-9:
continue
# Retrieve neighbors (safe fallback if node is new/distant)
r_neighbors = adj_cache.get(r)
if r_neighbors is None:
if G.has_node(r):
r_neighbors = list(G.neighbors(r))
else:
r_neighbors = []
# Add mass (accumulate)
# Self-loop
next_counts[r] = next_counts.get(r, 0.0) + count
# Neighbors
for w in r_neighbors:
next_counts[w] = next_counts.get(w, 0.0) + count
# --- RENORMALIZATION STEP ---
# Normalize so the 'hottest' node has value 1.0 to prevent overflow
if next_counts:
max_val = max(next_counts.values())
if max_val > 0:
scale = 1.0 / max_val
for k in next_counts:
next_counts[k] *= scale
current_counts = next_counts
new_maps[u] = current_counts
self.reach_counts = new_maps
# --------------------------------------------------------------------
# Mode 3: Monte Carlo Random Walk (Dirac Delta)
# --------------------------------------------------------------------
elif WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_TOKEN_RANDOM_WALK" or WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_TOKEN_RANDOM_WALK_LOG":
# 1. Pre-fetch adjacency
adj_cache = {n: list(G.neighbors(n)) for n in nodes_list}
for u in nodes_list:
# 2. Determine Walk Length
tokens = int(self.tokens.get(u, 0))
if WALKER_MODE == "PSEUDO_RANDOM_ONE_PER_TOKEN_RANDOM_WALK_LOG":
steps = int(np.log2(tokens)) + 2 if tokens > 0 else 1
else:
steps = tokens + 1
curr = u
# 3. Perform the Monte Carlo Random Walk
for _ in range(steps):
neighbors = adj_cache.get(curr, [])
if not neighbors:
break # Trapped
# Uniform random selection of neighbor
idx = np.random.randint(len(neighbors))
curr = neighbors[idx]
# 4. Record the final destination
new_maps[u] = {curr: 1}
self.reach_counts = new_maps
# --------------------------------------------------------------------
# Mode 4: Disabled
# --------------------------------------------------------------------
elif WALKER_MODE == "NO_WALKER":
for u in nodes_list:
new_maps[u] = {u: 1}
self.reach_counts = new_maps
else:
# Fallback for unknown modes (or if ACTIVE mode is set but this was called by mistake)
# Default to self-reach only to prevent crashes
for u in nodes_list:
new_maps[u] = {u: 1}
self.reach_counts = new_maps
def _perform_active_walk(
self,
u: int,
log_deg: Dict[int, float],
q_tok: Dict[int, List[float]],
q_deg: Dict[int, List[float]],
log_tok_map: Dict[int, float]
) -> List[int]:
"""
The Homunculus: An active, brain-driven walker.
The agent projects a viewpoint to traverse the graph, accumulating
spatial-temporal context to decide where to form new links.
"""
curr = u
tokens = int(self.tokens.get(u, 0))
# Calculate exploration budget (Log Wealth)
if "LOG" in WALKER_MODE:
total_steps = int(np.log2(tokens)) + 1 if tokens > 0 else 1
else:
total_steps = tokens
# Distance helper (robust to disconnected components)
def get_dist(source, target):
if source == target: return 0
try:
return nx.shortest_path_length(self.G, source=source, target=target)
except nx.NetworkXNoPath:
return 9999.0
for step_idx in range(total_steps):
if not self.G.has_node(curr): break
neighbors = list(self.G.neighbors(curr))
candidates = [curr] + neighbors
# --- Spatial-Temporal Context ---
steps_taken = step_idx
steps_left = total_steps - step_idx
dist_from_home_current = get_dist(u, curr)
X_cols = []
for cand in candidates:
if cand == curr:
dist_from_home_cand = dist_from_home_current
else:
dist_from_home_cand = get_dist(u, cand)
# Pack Context for the Brain
# 1. Past Effort, 2. Future Budget, 3. Current Range, 4. Projected Range
walker_context = [
float(steps_taken),
float(steps_left),
float(dist_from_home_current),
float(dist_from_home_cand)
]
# Activate Walker Cortex (blotto_feats=None)
vec = self._input_vec_fast(
curr, cand, log_deg, q_tok, q_deg, log_tok_map,
blotto_feats=None,
walker_feats=walker_context
)
X_cols.append(vec)
X = np.column_stack(X_cols)
Y = self.brains[u].forward(X)
# Decision: Where to step next?
dir_scores = Y[HEAD["WALKER_DIR"], :]
if PROBABILISTIC_DECISIONS:
vals = np.maximum(0.0, dir_scores)
s = float(vals.sum())
probs = (vals / s) if s > 0 else np.full_like(vals, 1.0 / len(vals))
idx = int(np.random.choice(len(candidates), p=probs))
else:
idx = int(np.argmax(dir_scores))
curr = candidates[idx]
# Return the final destination found by the walker
return [curr]
def reproduction_phase(self, t: int) -> str:
"""
Agents use their wealth to reproduce, modify topology, and create links
to distant nodes discovered by the Active Walker.
"""
log: Dict[str, Any] = {"phase": "reproduction", "pre_state": self._snapshot_graph(), "decisions": []}
# O(N) Precomputation of Sensory Manifold
log_deg, neighs, q_tok, q_deg, log_tok_map = self._precompute_features()
# 1. The Walker (Discovery)
walker_candidates_map: Dict[int, List[int]] = {}
if "WALK_ON_OWN" in WALKER_MODE:
for u in list(self.G.nodes()):
if self.tokens.get(u, 0) > 0:
cands = self._perform_active_walk(u, log_deg, q_tok, q_deg, log_tok_map)
walker_candidates_map[u] = cands
else:
self._update_reach_counts_passive()
for u in list(self.G.nodes()):
rc_map = self.reach_counts.get(u, {u: 1})
neighbors_u = set(neighs[u])
rc_items_far = [
(v, c) for v, c in rc_map.items()
if v != u and v not in neighbors_u and self.G.has_node(v) and c > 0
]
if rc_items_far:
nodes_far, weights_far = zip(*rc_items_far)
weights_arr = np.asarray(weights_far, dtype=float)
wsum = float(weights_arr.sum())
if wsum > 0.0:
probs_far = weights_arr / wsum
idx_far = int(np.random.choice(len(nodes_far), p=probs_far))
walker_candidates_map[u] = [int(nodes_far[idx_far])]
# 2. Decision Making
shifts_to_apply: List[Tuple[int, int, int]] = []
reconns_to_apply: List[Tuple[int, int, int]] = []
new_links_to_apply: List[Tuple[int, int]] = []
for u in list(self.G.nodes()):
t_u = int(self.tokens.get(u, 0))
if t_u <= 0: continue
# Candidates: Self + Neighbors + Walker Discovery
neighbors_u = list(neighs[u])
core_candidates = [u] + neighbors_u
w_cands = walker_candidates_map.get(u, [])
w_cands = [wc for wc in w_cands if self.G.has_node(wc)]
all_candidates = core_candidates + w_cands
# Observe Environment (Standard Cortex, No Special Context)
X_cols = [self._input_vec_fast(
u, v, log_deg, q_tok, q_deg, log_tok_map,
blotto_feats=None,
walker_feats=None
) for v in all_candidates]
X = np.column_stack(X_cols)
Y = self.brains[u].forward(X)
self._emit_messages(u, all_candidates, Y)
# Parse Neural Outputs
repro_logits_all = Y[HEAD["REPRO"], :]
link_logits_all = Y[HEAD["LINK"], :]
shift_logits_all = Y[HEAD["SHIFT"], :]
reconn_logits_all = Y[HEAD["RECONNECT"], :]
walker_logits_all = Y[HEAD["WALKER"], :]
# A. Reproduction (Gate)
repro_core = repro_logits_all[:, :len(core_candidates)]
if REPRO_CORE_FRACTIONS_ONLY:
# Direct investment calculation (Logic from Old Code)
frac_vec = np.mean(repro_core[2:4, :], axis=1)
vals = np.maximum(0.0, frac_vec)
s = float(np.sum(vals))
probs = (vals / s) if s > 0.0 else np.full_like(vals, 1.0 / len(vals))
child_tokens = int(np.floor(probs[0] * t_u))
else:
# Binary Gate Decision (Logic from Old Code)
yes_logit = float(np.mean(repro_core[0, :]))
no_logit = float(np.mean(repro_core[1, :]))
if PROBABILISTIC_DECISIONS:
y = max(0.0, yes_logit)
n = max(0.0, no_logit)
s = y + n
p_yes = (y / s) if s > 0.0 else 0.0
will_reproduce = (np.random.rand() < p_yes)
else:
will_reproduce = (yes_logit > no_logit)
if will_reproduce:
# Calculate Investment Fraction
frac_vec = np.mean(repro_core[2:4, :], axis=1)
vals = np.maximum(0.0, frac_vec)
s = float(np.sum(vals))
probs = (vals / s) if s > 0.0 else np.full_like(vals, 1.0 / len(vals))
child_tokens = int(np.floor(probs[0] * t_u))
else:
child_tokens = 0
child_tokens = max(0, min(int(t_u), int(child_tokens)))
rec: Dict[str, Any] = {
"agent_id": int(u), "tokens_before": int(t_u), "repro_tokens": int(child_tokens),
"child_created": False, "link_choices": [], "shift_choices": [],
"reconnect_choices": [], "walker_decisions": []
}
if child_tokens >= 1:
# B. Create Child
child_tokens = max(1, min(child_tokens, t_u))
keep_tokens = t_u - child_tokens
child_brain = self.brains[u].copy()
if MUTATE_ON_REPRO_COPY: child_brain.mutate()
cid = self.next_agent_id
self.next_agent_id += 1
self.G.add_node(cid)
# Link Child to Neighbors
chosen_links = []
for col_idx, v in enumerate(core_candidates):
yes_l = float(link_logits_all[0, col_idx])
no_l = float(link_logits_all[1, col_idx])
choose = bool(yes_l > no_l)
if choose: chosen_links.append(v)
rec["link_choices"].append({"candidate": int(v), "yes": yes_l, "no": no_l, "chosen": choose})
for v in chosen_links:
if v != cid and self.G.has_node(v): self.G.add_edge(cid, v)
self.tokens[cid] = child_tokens
self.brains[cid] = child_brain
#self.reach_counts[cid] = {int(cid): 1}
neighbors_cid = [int(w) for w in self.G.neighbors(cid)]
self.reach_counts[cid] = {int(cid): 1, **{nv: 1 for nv in neighbors_cid}}
self.messages[cid] = {}
self.tokens[u] = keep_tokens
rec.update({"child_created": True, "child_id": int(cid)})
# C. Shift Edges (Handover relations to child)
for idx, v in enumerate(neighbors_u):
col_idx = 1 + idx
sy, sn = float(shift_logits_all[0, col_idx]), float(shift_logits_all[1, col_idx])
shifted = bool(sy > sn)
rec["shift_choices"].append({"edge": (u, v), "shifted": shifted})
if shifted: shifts_to_apply.append((u, v, cid))
# D. Reconnect (Rewire existing edges)
reconnect_votes: List[Dict[str, float]] = []
for idx, v in enumerate(neighbors_u):
col_idx = 1 + idx
reconnect_votes.append({
"edge": (int(u), int(v)),
"no": float(reconn_logits_all[0, col_idx]),
"yes": float(reconn_logits_all[1, col_idx]),
"link": float(reconn_logits_all[2, col_idx]),
"target": float(reconn_logits_all[3, col_idx])
})
if reconnect_votes:
sum_no = sum(rv["no"] for rv in reconnect_votes)
sum_yes = sum(rv["yes"] for rv in reconnect_votes)
if PROBABILISTIC_DECISIONS:
y, n = max(0.0, sum_yes), max(0.0, sum_no)
p = (y / (y + n)) if (y + n) > 0 else 0.0
do_rec = bool(np.random.rand() < p)
else:
do_rec = bool(sum_yes > sum_no)
if do_rec:
scores = [rv["link"] for rv in reconnect_votes]
if PROBABILISTIC_DECISIONS:
vals = np.maximum(0.0, scores)
s = vals.sum()
probs = vals / s if s > 0 else np.ones(len(scores)) / len(scores)
idx_e = int(np.random.choice(len(reconnect_votes), p=probs))
else:
idx_e = int(np.argmax(scores))
old_v = int(reconnect_votes[idx_e]["edge"][1])
t_scores = [rv["target"] for rv in reconnect_votes]
if PROBABILISTIC_DECISIONS:
vals = np.maximum(0.0, t_scores)
s = vals.sum()
probs = vals / s if s > 0 else np.ones(len(t_scores)) / len(t_scores)
idx_t = int(np.random.choice(len(reconnect_votes), p=probs))
else:
idx_t = int(np.argmax(t_scores))
new_v = int(reconnect_votes[idx_t]["edge"][1])
if new_v != u and new_v != old_v and self.G.has_node(new_v):
reconns_to_apply.append((u, old_v, new_v))
rec["reconnect_choices"].append({"old": (int(u), int(old_v)), "new": int(new_v)})
# E. Walker Link Creation (Long-distance connections)
offset = len(core_candidates)
for i, far_node in enumerate(w_cands):
col_idx = offset + i
wy = float(walker_logits_all[0, col_idx])
wn = float(walker_logits_all[1, col_idx])
want_link = bool(wy > wn)
already = self.G.has_edge(u, far_node) or (u == far_node)
if want_link and not already and self.G.has_node(far_node):
new_links_to_apply.append((u, far_node))
rec["walker_decisions"].append({
"candidate": int(far_node), "created_link": want_link and not already
})
log["decisions"].append(rec)
# 3. Apply Topology Edits
for (u, v, cid) in shifts_to_apply:
if self.G.has_node(cid) and self.G.has_edge(u, v):
if not self.G.has_edge(cid, v) and cid != v: self.G.add_edge(cid, v)
self.G.remove_edge(u, v)
for (u, old_v, new_v) in reconns_to_apply:
if self.G.has_edge(u, old_v) and u != new_v:
self.G.remove_edge(u, old_v)
if not self.G.has_edge(u, new_v): self.G.add_edge(u, new_v)
for (u, v) in new_links_to_apply:
if self.G.has_node(u) and self.G.has_node(v) and u != v:
self.G.add_edge(u, v)
self.G.remove_edges_from(list(nx.selfloop_edges(self.G)))
log["cleanup"] = self._cleanup_and_redistribute()
log["post_state"] = self._snapshot_graph()
log["genotype_events"] = list(self.genotype_events)
self.genotype_events.clear()
if t % DRAW_EVERY_X_ITERATIONS == 0 and DRAW:
self._draw(f"Round {t} — After Phase 1", f"step_{2 * t :05d}_phase1.png")
return self._save_step_file(2 * t, log)
# ------------------------------------------------------------------------
# Phase 2: Blotto (Competition)
# ------------------------------------------------------------------------
def blotto_phase(self, t: int) -> str:
"""
Agents compete for node dominance.
They observe neighbors and 'bid' tokens to conquer or rob them.
"""
from collections import defaultdict
log: Dict[str, Any] = {
"phase": "blotto",
"pre_state": self._snapshot_graph(),
"allocations": [],
"incoming_offers": {},
"winners": {},
"pruned_edges": [],
}
tokens_before_phase = dict(self.tokens)
# --------------------------------------------------------------------
# 1. Precompute Sensory Manifold (New Code Logic)
# --------------------------------------------------------------------
log_deg, neighs, q_tok, q_deg, log_tok_map = self._precompute_features()
# --------------------------------------------------------------------
# 2. Observation Pass (Generate Messages)
# --------------------------------------------------------------------
for u in list(self.G.nodes()):
targets = [u] + neighs[u]
# Use New Code input vector (walker_feats=None)
X_cols = [self._input_vec_fast(
u, v, log_deg, q_tok, q_deg, log_tok_map,
blotto_feats=None,
walker_feats=None
) for v in targets]
X = np.column_stack(X_cols)
Y = self.brains[u].forward(X)
self._emit_messages(u, targets, Y)
# --------------------------------------------------------------------
# 3. Setup Allocation State
# --------------------------------------------------------------------
remaining = {u: int(self.tokens.get(u, 0)) for u in self.G.nodes()}
incoming_totals = {v: 0 for v in self.G.nodes()}
per_target_allocators: Dict[int, Dict[int, int]] = {v: {} for v in self.G.nodes()}
revolution_allocators: Dict[int, Dict[int, int]] = {v: {} for v in self.G.nodes()} # NEW: Revolution pool
u_sent_to_v = defaultdict(int)
edge_flow = {tuple(sorted(e)): 0 for e in self.G.edges()}
allocation_sequence = {u: [] for u in self.G.nodes()}
# Helper: Determine "King" of a node
def leader_info(v: int) -> Tuple[int, set[int]]:
allocs = per_target_allocators[v]
if not allocs: return 0, set()
m = max(allocs.values())
return m, {s for s, a in allocs.items() if a == m}
# Helper: Calculate hypothetical Revolution Winner (for Context)
def check_revolution_status(v: int) -> Tuple[bool, int | None]:
"""Returns (Did Revolution Win?, Winner ID) based on CURRENT state."""
if not ALLOW_REVOLUTIONS: return False, None
# 1. Identify the "Hegemon" / Standard King (Global Max Allocator)
allocs = per_target_allocators.get(v, {})
if not allocs: return False, None
max_val = max(allocs.values())
# Deterministic tie-breaking for input stability: pick largest Agent ID
hegemon_candidates = [a for a, amt in allocs.items() if amt == max_val]
hegemon = max(hegemon_candidates)
hegemon_tokens = max_val
# 2. Get Revolutionaries
revs = revolution_allocators.get(v, {})
if not revs: return False, None
# 3. Build the "Mob"
# Mob = All revolutionaries EXCEPT the Hegemon
# (Even if Hegemon is in revs, they are the target, so they are removed from the mob)
mob = []
for ag, tok in revs.items():
if ag != hegemon:
mob.append((ag, tok))
if not mob: return False, None
# 4. Sort Mob by tokens (Weakest -> Strongest)
mob.sort(key=lambda x: x[1])
# 5. The Mutiny Logic
current_lower_sum = 0
total_mob_tokens = sum(t for a, t in mob)
processed_index = 0
while processed_index < len(mob):
# Handle groups of identical token amounts
current_amount = mob[processed_index][1]
current_group = []
while processed_index < len(mob) and mob[processed_index][1] == current_amount:
current_group.append(mob[processed_index])
processed_index += 1