diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..b464c49
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..26747a4
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/neuroevolution.iml b/.idea/neuroevolution.iml
new file mode 100644
index 0000000..7f80a0b
--- /dev/null
+++ b/.idea/neuroevolution.iml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..35eb1dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/experiments/car_racing_test/run.py b/experiments/car_racing_test/run.py
new file mode 100644
index 0000000..ecae6bd
--- /dev/null
+++ b/experiments/car_racing_test/run.py
@@ -0,0 +1,12 @@
+import gymnasium as gym
+
+env = gym.make("CarRacing-v3", render_mode="human")
+obs, info = env.reset()
+
+for _ in range(1000):
+ action = env.action_space.sample()
+ _, reward, terminated, truncated, _ = env.step(action)
+ if terminated or truncated:
+ obs, info = env.reset()
+
+env.close()
diff --git a/experiments/genotype.json b/experiments/genotype_mapper/genotype.json
similarity index 100%
rename from experiments/genotype.json
rename to experiments/genotype_mapper/genotype.json
diff --git a/experiments/genotype_mapper.py b/experiments/genotype_mapper/genotype_mapper.py
similarity index 95%
rename from experiments/genotype_mapper.py
rename to experiments/genotype_mapper/genotype_mapper.py
index fcbf0d9..270a9fd 100644
--- a/experiments/genotype_mapper.py
+++ b/experiments/genotype_mapper/genotype_mapper.py
@@ -6,7 +6,7 @@ from typing import List, Dict, Tuple
# ---- Hilfsfunktionen ----
def generate_id():
- """Generiert eine eindeutige ID basierend auf Zufallszahlen."""
+ """generate random number as id. TODO: this should be uuidv4 instead of float"""
return random.random()
diff --git a/experiments/phenotype_genotype_map/__pycache__/pehno_geno_map.cpython-312.pyc b/experiments/phenotype_genotype_map/__pycache__/pehno_geno_map.cpython-312.pyc
new file mode 100644
index 0000000..0e7fd4c
Binary files /dev/null and b/experiments/phenotype_genotype_map/__pycache__/pehno_geno_map.cpython-312.pyc differ
diff --git a/experiments/phenotype_genotype_map/genotype.json b/experiments/phenotype_genotype_map/genotype.json
new file mode 100644
index 0000000..f762020
--- /dev/null
+++ b/experiments/phenotype_genotype_map/genotype.json
@@ -0,0 +1,258 @@
+{
+ "cortex": {
+ "id": 0.38985127736117664,
+ "sensor_ids": [
+ 0.5481697495393968
+ ],
+ "actuator_ids": [
+ 0.300862433948894
+ ],
+ "neuron_ids": [
+ 0.7892266733003815,
+ 0.08993126065168999,
+ 0.7353679413901013,
+ 0.7986671199471203,
+ 0.2619274118215478,
+ 0.1878355172703,
+ 0.5460586524022121,
+ 0.0325156080072021
+ ]
+ },
+ "sensor": {
+ "id": 0.5481697495393968,
+ "name": "rng",
+ "vector_length": 2,
+ "cx_id": 0.37250552293263184,
+ "fanout_ids": [
+ 0.7892266733003815,
+ 0.08993126065168999,
+ 0.7353679413901013,
+ 0.7986671199471203
+ ]
+ },
+ "actuator": {
+ "id": 0.300862433948894,
+ "name": "pts",
+ "vector_length": 1,
+ "cx_id": 0.37250552293263184,
+ "fanin_ids": [
+ 0.0325156080072021
+ ]
+ },
+ "neurons": [
+ {
+ "id": 0.7892266733003815,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ -0.2010596737648036,
+ -0.017559575650012982
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.08993126065168999,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ 0.49425647147649876,
+ 0.09556856915703738
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.7353679413901013,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ -0.3432015569352376,
+ -0.15663876804924903
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.7986671199471203,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ 0.44235413103542676,
+ -0.3014661028473905
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.2619274118215478,
+ "layer_index": 1,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.7892266733003815,
+ "weights": [
+ 0.4815260544600901
+ ]
+ },
+ {
+ "input_id": 0.08993126065168999,
+ "weights": [
+ -0.34742595611872107
+ ]
+ },
+ {
+ "input_id": 0.7353679413901013,
+ "weights": [
+ 0.1955465593022997
+ ]
+ },
+ {
+ "input_id": 0.7986671199471203,
+ "weights": [
+ 0.1420046463445398
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.804289241732289
+ ]
+ },
+ {
+ "id": 0.1878355172703,
+ "layer_index": 1,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.7892266733003815,
+ "weights": [
+ -0.26492297905129614
+ ]
+ },
+ {
+ "input_id": 0.08993126065168999,
+ "weights": [
+ -0.483071194536965
+ ]
+ },
+ {
+ "input_id": 0.7353679413901013,
+ "weights": [
+ 0.0016581996680702371
+ ]
+ },
+ {
+ "input_id": 0.7986671199471203,
+ "weights": [
+ 0.47010344086613354
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.804289241732289
+ ]
+ },
+ {
+ "id": 0.5460586524022121,
+ "layer_index": 1,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.7892266733003815,
+ "weights": [
+ 0.22583100100162312
+ ]
+ },
+ {
+ "input_id": 0.08993126065168999,
+ "weights": [
+ 0.22079487426614341
+ ]
+ },
+ {
+ "input_id": 0.7353679413901013,
+ "weights": [
+ 0.3514718848950448
+ ]
+ },
+ {
+ "input_id": 0.7986671199471203,
+ "weights": [
+ 0.03653443587296967
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.804289241732289
+ ]
+ },
+ {
+ "id": 0.0325156080072021,
+ "layer_index": 2,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.2619274118215478,
+ "weights": [
+ 0.4397546856440344
+ ]
+ },
+ {
+ "input_id": 0.1878355172703,
+ "weights": [
+ 0.024524621225712195
+ ]
+ },
+ {
+ "input_id": 0.5460586524022121,
+ "weights": [
+ 0.19908558570374346
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.300862433948894
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/experiments/phenotype_genotype_map/genotype_updated.json b/experiments/phenotype_genotype_map/genotype_updated.json
new file mode 100644
index 0000000..719275e
--- /dev/null
+++ b/experiments/phenotype_genotype_map/genotype_updated.json
@@ -0,0 +1,258 @@
+{
+ "cortex": {
+ "id": 0.38985127736117664,
+ "sensor_ids": [
+ 0.5481697495393968
+ ],
+ "actuator_ids": [
+ 0.300862433948894
+ ],
+ "neuron_ids": [
+ 0.7892266733003815,
+ 0.08993126065168999,
+ 0.7353679413901013,
+ 0.7986671199471203,
+ 0.2619274118215478,
+ 0.1878355172703,
+ 0.5460586524022121,
+ 0.0325156080072021
+ ]
+ },
+ "sensor": {
+ "id": 0.5481697495393968,
+ "name": "rng",
+ "vector_length": 2,
+ "cx_id": 0.37250552293263184,
+ "fanout_ids": [
+ 0.7892266733003815,
+ 0.08993126065168999,
+ 0.7353679413901013,
+ 0.7986671199471203
+ ]
+ },
+ "actuator": {
+ "id": 0.300862433948894,
+ "name": "pts",
+ "vector_length": 1,
+ "cx_id": 0.37250552293263184,
+ "fanin_ids": [
+ 0.0325156080072021
+ ]
+ },
+ "neurons": [
+ {
+ "id": 0.7892266733003815,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ -0.2010596737648036,
+ -0.017559575650012982
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.08993126065168999,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ 0.49425647147649876,
+ 0.09556856915703738
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.7353679413901013,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ -0.3432015569352376,
+ -0.15663876804924903
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.7986671199471203,
+ "layer_index": 0,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.5481697495393968,
+ "weights": [
+ 0.44235413103542676,
+ -0.3014661028473905
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.11787440750922895,
+ 0.9473188607259506,
+ 0.9045741659912035
+ ]
+ },
+ {
+ "id": 0.2619274118215478,
+ "layer_index": 1,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.7892266733003815,
+ "weights": [
+ 0.4815260544600901
+ ]
+ },
+ {
+ "input_id": 0.08993126065168999,
+ "weights": [
+ -0.34742595611872107
+ ]
+ },
+ {
+ "input_id": 0.7353679413901013,
+ "weights": [
+ 0.1955465593022997
+ ]
+ },
+ {
+ "input_id": 0.7986671199471203,
+ "weights": [
+ 0.1420046463445398
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.804289241732289
+ ]
+ },
+ {
+ "id": 0.1878355172703,
+ "layer_index": 1,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.7892266733003815,
+ "weights": [
+ -0.26492297905129614
+ ]
+ },
+ {
+ "input_id": 0.08993126065168999,
+ "weights": [
+ -0.483071194536965
+ ]
+ },
+ {
+ "input_id": 0.7353679413901013,
+ "weights": [
+ 0.0016581996680702371
+ ]
+ },
+ {
+ "input_id": 0.7986671199471203,
+ "weights": [
+ 0.47010344086613354
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.804289241732289
+ ]
+ },
+ {
+ "id": 0.5460586524022121,
+ "layer_index": 1,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.7892266733003815,
+ "weights": [
+ 0.22583100100162312
+ ]
+ },
+ {
+ "input_id": 0.08993126065168999,
+ "weights": [
+ 0.22079487426614341
+ ]
+ },
+ {
+ "input_id": 0.7353679413901013,
+ "weights": [
+ 0.3514718848950448
+ ]
+ },
+ {
+ "input_id": 0.7986671199471203,
+ "weights": [
+ 0.03653443587296967
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.804289241732289
+ ]
+ },
+ {
+ "id": 0.0325156080072021,
+ "layer_index": 2,
+ "cx_id": 0.37250552293263184,
+ "activation_function": "tanh",
+ "input_weights": [
+ {
+ "input_id": 0.2619274118215478,
+ "weights": [
+ 0.4397546856440344
+ ]
+ },
+ {
+ "input_id": 0.1878355172703,
+ "weights": [
+ 0.024524621225712195
+ ]
+ },
+ {
+ "input_id": 0.5460586524022121,
+ "weights": [
+ 0.19908558570374346
+ ]
+ }
+ ],
+ "output_ids": [
+ 0.300862433948894
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/experiments/phenotype_genotype_map/pehno_geno_map.py b/experiments/phenotype_genotype_map/pehno_geno_map.py
new file mode 100644
index 0000000..80274dc
--- /dev/null
+++ b/experiments/phenotype_genotype_map/pehno_geno_map.py
@@ -0,0 +1,410 @@
+import asyncio
+import json
+import math
+import random
+from collections import defaultdict
+from typing import Dict, List, Tuple, Any
+
+
+# utils
+def tanh(x: float) -> float:
+ return math.tanh(x)
+
+
+def rng_vector(vl: int) -> List[float]:
+ # wie sensor:rng(VL)
+ return [random.random() for _ in range(vl)]
+
+
+# actor base class
+class Actor:
+ """
+ base class for all our actors: sensors, actuators, neurons, cortex.
+ """
+ def __init__(self, name: str):
+ self.name = name
+ self.inbox: asyncio.Queue = asyncio.Queue()
+
+ async def send(self, msg: Tuple[Any, ...]):
+ """
+
+ """
+ await self.inbox.put(msg)
+
+ async def run(self):
+ """
+ this needs to be overridden in subclasses.
+ """
+ raise NotImplementedError
+
+
+# sensor
+class Sensor(Actor):
+ def __init__(self, sid: Any, cx_pid: "Actor", name: str, vl: int, fanout_pids: List["Actor"]):
+ super().__init__(f"Sensor-{sid}")
+ self.sid = sid
+ self.cx_pid = cx_pid
+ self.sname = name
+ self.vl = vl
+ self.fanout = fanout_pids
+
+ async def run(self):
+ while True:
+ msg = await self.inbox.get()
+ tag = msg[0]
+ if tag == "sync": # {Cx, sync}
+ # if there is a sync command, we sense from the environment
+ # and put the result in the inbox of the neurons connected to sensor.
+ print("sensing...")
+ # choose sensor funtion
+ if self.sname == "rng":
+ vec = rng_vector(self.vl)
+ else:
+ # place for own sensors (this will be replaced by scapes down the road)
+ vec = [0.0] * self.vl
+ # forward an alle Fanouts
+ for pid in self.fanout:
+ await pid.send(("forward", self.sid, vec))
+ elif tag == "terminate":
+ # terminate.
+ return
+
+
+# neuron
+class Neuron(Actor):
+ def __init__(self, nid: Any, cx_pid: "Actor", af_name: str,
+ input_idps: List[Tuple[Any, List[float]]], # [(input_id, weights)] (+ optional bias am Ende)
+ output_pids: List["Actor"]):
+ super().__init__(f"Neuron-{nid}")
+ self.nid = nid
+ self.cx_pid = cx_pid
+ self.af = tanh if af_name == "tanh" else tanh
+ # input-id → (weights, received_flag, last_value_vector_or_scalar)
+ self.inputs: Dict[Any, Dict[str, Any]] = {}
+ self.order: List[Any] = [] # for deterministic order
+ for entry in input_idps:
+ self.order.append(entry[0])
+ self.inputs[entry[0]] = {"weights": entry[1], "got": False, "val": None}
+ self.bias: float = 0.0 # optional bias
+ self.outputs = output_pids
+
+ async def run(self):
+ while True:
+ msg = await self.inbox.get()
+ tag = msg[0]
+ if tag == "forward":
+ # ("forward", from_id, value_vec_or_scalar)
+ _tag, from_id, data = msg
+ if from_id not in self.inputs:
+ # unexpected source - we continue and ignore it
+ continue
+ # we mark that we have values from this source to keep sequence
+ self.inputs[from_id]["got"] = True
+ self.inputs[from_id]["val"] = data
+
+ # when we have a signal from all sources: calculate dot product.
+ if all(self.inputs[i]["got"] for i in self.order):
+ acc = 0.0
+ for i in self.order:
+ w = self.inputs[i]["weights"]
+ v = self.inputs[i]["val"]
+ # first layer: v is vector -> dotproduct
+ if isinstance(v, list):
+ acc += sum(wj * vj for wj, vj in zip(w, v))
+ else:
+ # from layer 1: scalar -> 1 weight
+ acc += w[0] * float(v)
+ out = self.af(acc + self.bias)
+ # an alle Outputs als Vektor (wie Erlang: [Output])
+ for pid in self.outputs:
+ await pid.send(("forward", self.nid, [out]))
+ # reset "got"-flags so the next cycle can start
+ for i in self.order:
+ self.inputs[i]["got"] = False
+ self.inputs[i]["val"] = None
+
+ elif tag == "get_backup":
+ # ("get_backup", reply_to)
+ _tag, reply_to = msg
+ # we create (nid, [(input_id, weights), ..., ('bias', bias)])
+ idps = [(i, self.inputs[i]["weights"]) for i in self.order]
+ idps.append(("bias", self.bias))
+ # send back to cortex
+ await reply_to.send(("backup_from_neuron", self.nid, idps))
+
+ elif tag == "terminate":
+ return
+
+
+# ---------- Actuator ----------
+class Actuator(Actor):
+ def __init__(self, aid: Any, cx_pid: "Actor", name: str, fanin_ids: List[Any], expect_count: int):
+ super().__init__(f"Actuator-{aid}")
+ self.aid = aid
+ self.cx_pid = cx_pid
+ self.aname = name
+ self.fanin_ids = fanin_ids[:] # order matters!
+ self.expect = expect_count
+ self.received: Dict[Any, List[float]] = {}
+
+ async def run(self):
+ while True:
+ msg = await self.inbox.get()
+ tag = msg[0]
+ if tag == "forward":
+ _tag, from_id, vec = msg
+ self.received[from_id] = vec
+ if len(self.received) == self.expect:
+ # collect in order of fanin_ids
+ result: List[float] = []
+ for fid in self.fanin_ids:
+ result.extend(self.received[fid])
+ # actuator functions (here: pts → print)
+ if self.aname == "pts":
+ print(f"actuator:pts(Result): {result}")
+ # send sync message to cortex
+ await self.cx_pid.send(("sync_from_actuator", self.aid))
+ self.received.clear()
+ elif tag == "terminate":
+ return
+
+
+# cortex with report queue
+class Cortex(Actor):
+ def __init__(self, cid: Any, sensor_pids: List["Actor"], actuator_pids: List["Actor"],
+ neuron_pids: List["Actor"], total_steps: int, report_queue: asyncio.Queue):
+ super().__init__(f"Cortex-{cid}")
+ self.cid = cid
+ self.sensors = sensor_pids
+ self.actuators = actuator_pids
+ self.neurons = neuron_pids
+ self.total_steps = total_steps
+ self.awaiting_sync: set = set()
+ self.actuator_ids: List[Any] = []
+ self.report_queue = report_queue
+
+ async def run(self):
+ # start: trigger all sensors
+ for s in self.sensors:
+ await s.send(("sync",))
+
+ while True:
+ msg = await self.inbox.get()
+ tag = msg[0]
+
+ if tag == "register_actuators":
+ _, actuator_ids = msg
+ self.actuator_ids = list(actuator_ids)
+ self.awaiting_sync = set(self.actuator_ids)
+
+ elif tag == "sync_from_actuator":
+ _, aid = msg
+ if aid in self.awaiting_sync:
+ self.awaiting_sync.remove(aid)
+
+ if not self.awaiting_sync:
+ self.total_steps -= 1
+ if self.total_steps <= 0:
+ # collect backup
+ weights = []
+ for n in self.neurons:
+ await n.send(("get_backup", self))
+ remaining = len(self.neurons)
+ while remaining > 0:
+ bmsg = await self.inbox.get()
+ if bmsg[0] == "backup_from_neuron":
+ _, nid, idps = bmsg
+ weights.append((nid, idps))
+ remaining -= 1
+
+ # report result to exoself
+ await self.report_queue.put(("completed_backup", weights))
+
+ # terminate
+ for s in self.sensors:
+ await s.send(("terminate",))
+ for a in self.actuators:
+ await a.send(("terminate",))
+ for n in self.neurons:
+ await n.send(("terminate",))
+ return
+
+ # Nächster Zyklus
+ self.awaiting_sync = set(self.actuator_ids)
+ for s in self.sensors:
+ await s.send(("sync",))
+
+ elif tag == "completed_backup":
+ # placeholder
+ pass
+
+
+# exoself
+class Exoself:
+ def __init__(self, genotype: Dict[str, Any], total_steps: int = 10):
+ self.g = genotype
+ self.total_steps = total_steps
+ self.cx_actor: Cortex = None
+ self.pid_by_id: Dict[Any, Actor] = {}
+ self.report_queue: asyncio.Queue = asyncio.Queue()
+
+ # --- helpers for arrays of sensors and actuators ---
+ @staticmethod
+ def _as_list(maybe_list_or_item):
+ if maybe_list_or_item is None:
+ return []
+ if isinstance(maybe_list_or_item, list):
+ return maybe_list_or_item
+ return [maybe_list_or_item]
+
+ def _get_sensors_json(self) -> List[Dict[str, Any]]:
+ if "sensors" in self.g and isinstance(self.g["sensors"], list):
+ return self.g["sensors"]
+ elif "sensor" in self.g and isinstance(self.g["sensor"], dict):
+ return [self.g["sensor"]]
+ else:
+ return []
+
+ def _get_actuators_json(self) -> List[Dict[str, Any]]:
+ if "actuators" in self.g and isinstance(self.g["actuators"], list):
+ return self.g["actuators"]
+ elif "actuator" in self.g and isinstance(self.g["actuator"], dict):
+ return [self.g["actuator"]]
+ else:
+ return []
+
+ def _build_pid_map(self):
+ cx = self.g["cortex"]
+ self.cx_actor = Cortex(cx["id"], [], [], [], self.total_steps, self.report_queue)
+ self.pid_by_id[cx["id"]] = self.cx_actor
+
+ # placeholder for all known ids
+ for s in self._get_sensors_json():
+ self.pid_by_id[s["id"]] = None
+ for a in self._get_actuators_json():
+ self.pid_by_id[a["id"]] = None
+ for n in self.g["neurons"]:
+ self.pid_by_id[n["id"]] = None
+
+ def _link_and_spawn(self):
+ # order neurons by layers
+ layers = defaultdict(list)
+ for n in self.g["neurons"]:
+ layers[n["layer_index"]].append(n)
+ ordered_layers = [layers[i] for i in sorted(layers)]
+ if not ordered_layers:
+ raise ValueError("Keine Neuronen im Genotyp gefunden.")
+
+ # build actuators
+ actuators_json = self._get_actuators_json()
+ actuator_pid_by_id: Dict[Any, Actuator] = {}
+
+ # for last layer: we need list of neuron-ids (for matching)
+ last_layer = ordered_layers[-1]
+ last_layer_ids = {n["id"] for n in last_layer}
+
+ for a in actuators_json:
+ fanin_ids = self._as_list(a.get("fanin_ids", []))
+ # Safety: if genotype is empty at this place, take neurons of last layer
+ if not fanin_ids:
+ fanin_ids = list(last_layer_ids)
+ actuator = Actuator(a["id"], self.cx_actor, a["name"], fanin_ids, expect_count=len(fanin_ids))
+ self.pid_by_id[a["id"]] = actuator
+ actuator_pid_by_id[a["id"]] = actuator
+
+ actuator_actors = list(actuator_pid_by_id.values())
+
+ # build the neurons
+ neuron_pid_by_id: Dict[Any, Neuron] = {}
+ for layer in ordered_layers:
+ for n in layer:
+ input_idps = [(iw["input_id"], iw["weights"]) for iw in n["input_weights"]]
+ neuron = Neuron(n["id"], self.cx_actor, n["activation_function"], input_idps, [])
+ neuron_pid_by_id[n["id"]] = neuron
+ self.pid_by_id[n["id"]] = neuron
+
+ # set next layer as output for non output neurons
+ for li, layer in enumerate(ordered_layers[:-1]):
+ next_layer = ordered_layers[li + 1]
+ next_pids = [neuron_pid_by_id[nx["id"]] for nx in next_layer]
+ for n in layer:
+ neuron_pid_by_id[n["id"]].outputs = next_pids
+
+ # outputs for output layer: actuators
+ for n in ordered_layers[-1]:
+ outs: List[Actor] = []
+ nid = n["id"]
+ for a in actuators_json:
+ fanin_ids = set(self._as_list(a.get("fanin_ids", [])))
+ # default: if fanin empty, all last layer feed this actuator
+ if not fanin_ids or nid in fanin_ids:
+ outs.append(actuator_pid_by_id[a["id"]])
+ neuron_pid_by_id[nid].outputs = outs
+
+ # build sensors
+ sensors_json = self._get_sensors_json()
+ sensor_actors: List[Sensor] = []
+ first_layer = ordered_layers[0]
+ first_layer_pid_by_id = {n["id"]: neuron_pid_by_id[n["id"]] for n in first_layer}
+
+ for s in sensors_json:
+ # if the genotype contains fanout_ids we use exactly these
+ fanout_ids = self._as_list(s.get("fanout_ids", []))
+ if fanout_ids:
+ fanout_pids = [first_layer_pid_by_id[fid] for fid in fanout_ids if fid in first_layer_pid_by_id]
+ # if fanout_ids point to non-first-layer (unclean genotype), fallback:
+ if not fanout_pids:
+ fanout_pids = list(first_layer_pid_by_id.values())
+ else:
+ # default: all neurons in first layer
+ fanout_pids = list(first_layer_pid_by_id.values())
+
+ sensor = Sensor(s["id"], self.cx_actor, s["name"], s["vector_length"], fanout_pids)
+ self.pid_by_id[s["id"]] = sensor
+ sensor_actors.append(sensor)
+
+ # fill cortex
+ self.cx_actor.sensors = sensor_actors
+ self.cx_actor.actuators = actuator_actors
+ self.cx_actor.neurons = [neuron_pid_by_id[n["id"]] for n in self.g["neurons"]]
+
+ async def run(self) -> List[Tuple[Any, List[Tuple[Any, List[float]]]]]:
+ self._build_pid_map()
+ self._link_and_spawn()
+
+ # start tasks
+ tasks = [asyncio.create_task(self.cx_actor.run())]
+ for v in self.pid_by_id.values():
+ if isinstance(v, Actor) and v is not self.cx_actor:
+ tasks.append(asyncio.create_task(v.run()))
+
+ # register all actuator ids
+ actuator_ids = [a["id"] for a in self._get_actuators_json()]
+ await self.cx_actor.send(("register_actuators", actuator_ids))
+
+ # wait for result in report queue
+ tag, weights = await self.report_queue.get()
+ assert tag == "completed_backup"
+
+ # cleanup
+ for t in tasks:
+ if not t.done():
+ t.cancel()
+ return weights
+
+ @staticmethod
+ def update_genotype_with_backup(geno: Dict[str, Any],
+ backup: List[Tuple[Any, List[Tuple[Any, List[float]]]]]) -> Dict[str, Any]:
+ by_id = {n["id"]: n for n in geno["neurons"]}
+ for nid, idps in backup:
+ if nid not in by_id:
+ continue
+ new_iw = []
+ for item in idps:
+ if isinstance(item[0], str) and item[0] == "bias":
+ continue
+ input_id, weights = item
+ new_iw.append({"input_id": input_id, "weights": weights})
+ by_id[nid]["input_weights"] = new_iw
+ return geno
+
diff --git a/experiments/phenotype_genotype_map/run.py b/experiments/phenotype_genotype_map/run.py
new file mode 100644
index 0000000..74ca059
--- /dev/null
+++ b/experiments/phenotype_genotype_map/run.py
@@ -0,0 +1,19 @@
+import asyncio
+import json
+from pehno_geno_map import Exoself
+
+# load genotype from json
+with open("genotype.json") as f:
+ geno = json.load(f)
+
+# map phenotype and run for 5 cycles
+ex = Exoself(geno, total_steps=5)
+# get backup
+backup = asyncio.run(ex.run())
+
+print("Backup erhalten (gekürzt):", [(nid, len(idps)) for nid, idps in backup])
+
+# update genotype and safe
+u_geno = Exoself.update_genotype_with_backup(geno, backup)
+with open("genotype_updated.json", "w") as f:
+ json.dump(u_geno, f, indent=2)
diff --git a/experiments/neuron.py b/experiments/simple_nn/neuron.py
similarity index 100%
rename from experiments/neuron.py
rename to experiments/simple_nn/neuron.py
diff --git a/experiments/simplest_nn.py b/experiments/simple_nn/simplest_nn.py
similarity index 96%
rename from experiments/simplest_nn.py
rename to experiments/simple_nn/simplest_nn.py
index a7f0216..281e487 100644
--- a/experiments/simplest_nn.py
+++ b/experiments/simple_nn/simplest_nn.py
@@ -58,6 +58,7 @@ class Sensor(threading.Thread):
except queue.Empty:
continue
+
class Actuator(threading.Thread):
def __init__(self, actuator_queue):
super().__init__()
@@ -80,6 +81,7 @@ class Actuator(threading.Thread):
except queue.Empty:
continue
+
class Cortex(threading.Thread):
def __init__(self, sensor_queue, neuron_queue, actuator_queue):
super().__init__()
@@ -102,6 +104,7 @@ class Cortex(threading.Thread):
print("Unknown command. Please use 'sense_think_act' or 'terminate'.")
print("Cortex terminated.")
+
if __name__ == "__main__":
sensor_queue = queue.Queue()
neuron_queue = queue.Queue()
@@ -127,4 +130,4 @@ if __name__ == "__main__":
neuron.join()
actuator.join()
- print("System terminated.")
\ No newline at end of file
+ print("System terminated.")
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..b2707d9
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,4 @@
+gymnasium==1.2.1
+numpy>=1.23
+Box2D
+pygame
\ No newline at end of file