last changes
9
.gitignore
vendored
@@ -1,2 +1,9 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
.idea
|
.idea/
|
||||||
|
.venv/
|
||||||
|
|
||||||
|
mathema/runs/
|
||||||
|
|
||||||
|
sac/tb_*/
|
||||||
|
sac/td*/
|
||||||
|
neo4j_db/
|
||||||
BIN
Archiv.zip
Normal file
107
README.md
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# Neuroevolution
|
||||||
|
|
||||||
|
`mathema` ist ein experimentelles Neuroevolutions-Framework in Python, das agentenbasierte Architekturen mit evolutiven Mechanismen kombiniert und sich dabei
|
||||||
|
architektonisch am DXNN-System von Gene Sher orientiert.
|
||||||
|
Das Projekt ist modular aufgebaut und erlaubt die Evaluation evolvierender Agenten in verschiedenen Scapes (einer angepassten CarRacing-Umgebung).
|
||||||
|
|
||||||
|
Der Fokus liegt auf:
|
||||||
|
- evolutionären Lernprozessen
|
||||||
|
- populationsbasierten Trainingsläufen
|
||||||
|
- reproduzierbarer Evaluation
|
||||||
|
- klarer Trennung von Genotyp, Phänotyp und Umgebung
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Projektüberblick
|
||||||
|
|
||||||
|
Das Framework besteht aus mehreren logisch getrennten Komponenten:
|
||||||
|
|
||||||
|
- **core/**
|
||||||
|
Zentrale Evolutionslogik (Agenten, Genotypen, Mutationen, Selektion, Populationen)
|
||||||
|
|
||||||
|
- **scape/**
|
||||||
|
Umgebungen, in denen Agenten agieren (z. B. CarRacing)
|
||||||
|
|
||||||
|
- **eval / main-Skripte**
|
||||||
|
Training, Evaluation, Vergleich mehrerer Runs
|
||||||
|
|
||||||
|
- **utils/**
|
||||||
|
Hilfsfunktionen (Logging, Konfiguration, Seed-Handling, I/O)
|
||||||
|
|
||||||
|
- **archive/**
|
||||||
|
Ältere oder experimentelle Implementationen (nicht aktiv genutzt)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verzeichnisstruktur
|
||||||
|
mathema/
|
||||||
|
├── core/ # Evolutionskern (Agent, Genotyp, Population, Mutation)
|
||||||
|
├── scape/ # Umgebungen / Aufgabenräume
|
||||||
|
├── utils/ # Hilfsfunktionen
|
||||||
|
├── archive/ # Alte / experimentelle Module
|
||||||
|
├── eval_main.py # Evaluations- & Benchmark-Skript
|
||||||
|
├── car_racing_main.py # Einstiegspunkt für CarRacing-Experimente
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Zentrale Konzepte
|
||||||
|
|
||||||
|
### Agent
|
||||||
|
Ein Agent repräsentiert eine evolvierbare Einheit, die:
|
||||||
|
- einen Genotyp besitzt
|
||||||
|
- daraus einen Phänotyp (z. B. neuronales Netz) ableitet
|
||||||
|
- in einer Scape Aktionen ausführt
|
||||||
|
|
||||||
|
### Genotyp / Phänotyp
|
||||||
|
- Der Genotyp beschreibt die Struktur (z. B. Neuronen, Kanten, Parameter)
|
||||||
|
- Der Phänotyp ist die ausführbare Repräsentation (z. B. Policy / Controller)
|
||||||
|
|
||||||
|
### Neuroevolution
|
||||||
|
- Populationen mehrerer Agenten
|
||||||
|
- Mutation (Topologie & Parameter)
|
||||||
|
- Selektion auf Basis von Fitness
|
||||||
|
- optional populationsbasierte Strategien
|
||||||
|
|
||||||
|
### Scapes
|
||||||
|
Eine Scape definiert:
|
||||||
|
- Zustandsraum
|
||||||
|
- Aktionsraum
|
||||||
|
- Reward-/Fitness-Berechnung
|
||||||
|
- Episodenlogik
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Virtuelle Umgebung und Neo4j DB
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up -d # starte neo4j db
|
||||||
|
python -m venv .venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
```
|
||||||
|
Requirements installieren:
|
||||||
|
```bash
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
### CarRacing-Experiment starten
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python mathema/car_racing_main.py
|
||||||
|
```
|
||||||
|
startet eine Evolutionsrunde mit Agenten in der CarRacing-Umgebung.
|
||||||
|
|
||||||
|
### Mehrere Runs (Thesis-Tests)
|
||||||
|
```bash
|
||||||
|
python mathema/eval_main.py
|
||||||
|
````
|
||||||
|
Führt mehrere unabhängige Läufe aus und aggregiert Fitness-Statistiken.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -2,6 +2,9 @@ import asyncio
|
|||||||
|
|
||||||
|
|
||||||
class Actor:
|
class Actor:
|
||||||
|
"""
|
||||||
|
actor base class.
|
||||||
|
"""
|
||||||
def __init__(self, name: str):
|
def __init__(self, name: str):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.inbox = asyncio.Queue()
|
self.inbox = asyncio.Queue()
|
||||||
|
|||||||
@@ -7,6 +7,33 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class Actuator(Actor):
|
class Actuator(Actor):
|
||||||
|
"""
|
||||||
|
Actuator actor responsible for collecting outputs from upstream neurons
|
||||||
|
(fanin), assembling them into an action/output vector, interacting with
|
||||||
|
a scape (environment), and synchronizing the result back to the cortex.
|
||||||
|
|
||||||
|
Conceptually, an Actuator represents the *output layer* of a cortex/agent:
|
||||||
|
- It waits for `forward` messages from all expected fanin sources.
|
||||||
|
- Once all signals are received, they are concatenated in the order
|
||||||
|
defined by `fanin_ids` into a flat output vector.
|
||||||
|
- Depending on `aname`, the output is:
|
||||||
|
* used for debugging/testing ("pts"),
|
||||||
|
* sent directly as an action to a scape ("xor_SendOutput"),
|
||||||
|
* mapped to a car control action and sent to a CarRacing scape
|
||||||
|
("car_ApplyAction"),
|
||||||
|
* or ignored with a default fitness.
|
||||||
|
- After the interaction, the actuator reports the resulting fitness
|
||||||
|
and halt flag back to the cortex via a `"sync"` message.
|
||||||
|
|
||||||
|
Inbox message protocol:
|
||||||
|
- ("forward", from_id, vec):
|
||||||
|
`from_id` is the ID of the sending fanin neuron,
|
||||||
|
`vec` is its output vector.
|
||||||
|
- ("result", fitness, halt_flag):
|
||||||
|
Response from the scape after an action was applied.
|
||||||
|
- ("terminate",):
|
||||||
|
Terminates the actor.
|
||||||
|
"""
|
||||||
def __init__(self, aid, cx_pid, name, fanin_ids, expect_count, scape=None):
|
def __init__(self, aid, cx_pid, name, fanin_ids, expect_count, scape=None):
|
||||||
super().__init__(f"Actuator-{aid}")
|
super().__init__(f"Actuator-{aid}")
|
||||||
self.aid = aid
|
self.aid = aid
|
||||||
|
|||||||
@@ -6,6 +6,41 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class Cortex(Actor):
|
class Cortex(Actor):
|
||||||
|
"""
|
||||||
|
Cortex actor coordinating a network of Sensors, Neurons, and Actuators.
|
||||||
|
|
||||||
|
The Cortex is responsible for driving the network forward in discrete
|
||||||
|
computation cycles, collecting fitness feedback from all actuators, and
|
||||||
|
reporting evaluation results to an Exoself (supervisor) actor.
|
||||||
|
|
||||||
|
High-level behavior:
|
||||||
|
- At the start of an episode, the cortex triggers a new cycle:
|
||||||
|
1) It tells all neurons to prepare recurrent state for the new cycle
|
||||||
|
via ("cycle_start",).
|
||||||
|
2) It optionally triggers neurons via ("tick",) (scheduler hook).
|
||||||
|
3) It tells all sensors to produce outputs via ("sync",).
|
||||||
|
- Actuators eventually send back ("sync", aid, fitness, halt_flag).
|
||||||
|
- Once all actuators have synchronized for the current cycle, the cortex
|
||||||
|
either:
|
||||||
|
* ends the evaluation if any actuator requested a halt (halt_flag > 0),
|
||||||
|
and reports ("evaluation_completed", total_fitness, cycles, elapsed)
|
||||||
|
to the exoself, or
|
||||||
|
* starts the next cycle.
|
||||||
|
|
||||||
|
Message protocol (inbox):
|
||||||
|
- ("register_actuators", aids):
|
||||||
|
Provide/replace the set of actuator IDs that must sync each cycle.
|
||||||
|
Used when actuators are created dynamically or not known at init.
|
||||||
|
- ("sync", aid, fitness, halt_flag):
|
||||||
|
Fitness feedback from an actuator for the current cycle.
|
||||||
|
The cortex accumulates fitness and checks halt conditions.
|
||||||
|
- ("reactivate",):
|
||||||
|
Restart a new evaluation episode (reset counters and kick sensors).
|
||||||
|
- ("terminate",):
|
||||||
|
Terminate the cortex and cascade termination to sensors/neurons/actuators.
|
||||||
|
- ("backup_from_neuron", nid, idps...):
|
||||||
|
Forward neuron backup data upstream to the exoself.
|
||||||
|
"""
|
||||||
def __init__(self, cid, exoself_pid, sensor_pids, neuron_pids, actuator_pids):
|
def __init__(self, cid, exoself_pid, sensor_pids, neuron_pids, actuator_pids):
|
||||||
super().__init__(f"Cortex-{cid}")
|
super().__init__(f"Cortex-{cid}")
|
||||||
self.cid = cid
|
self.cid = cid
|
||||||
|
|||||||
@@ -12,6 +12,24 @@ def tanh(x): return math.tanh(x)
|
|||||||
|
|
||||||
|
|
||||||
class Neuron(Actor):
|
class Neuron(Actor):
|
||||||
|
"""
|
||||||
|
Neuron actor implementing a weighted-sum neuron with an activation function
|
||||||
|
and optional recurrent inputs.
|
||||||
|
|
||||||
|
The Neuron receives input vectors from upstream neurons, accumulates them
|
||||||
|
according to its weight configuration, applies an activation function,
|
||||||
|
and forwards the resulting output to downstream actors.
|
||||||
|
|
||||||
|
It supports:
|
||||||
|
- feed-forward and recurrent connections
|
||||||
|
- bias handling
|
||||||
|
- asynchronous message-based execution
|
||||||
|
- weight backup, restoration, and stochastic perturbation (mutation)
|
||||||
|
- cycle-based updates for recurrent networks
|
||||||
|
|
||||||
|
This actor is designed to be used inside a cortex/agent actor network,
|
||||||
|
where synchronization and evolution are coordinated externally.
|
||||||
|
"""
|
||||||
def __init__(self, nid, cx_pid, af_name, input_idps, output_pids, bias: Optional[float] = None):
|
def __init__(self, nid, cx_pid, af_name, input_idps, output_pids, bias: Optional[float] = None):
|
||||||
super().__init__(f"Neuron-{nid}")
|
super().__init__(f"Neuron-{nid}")
|
||||||
self.nid = nid
|
self.nid = nid
|
||||||
|
|||||||
@@ -6,6 +6,42 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class Sensor(Actor):
|
class Sensor(Actor):
|
||||||
|
"""
|
||||||
|
Sensor actor that produces an input vector for the network and forwards it
|
||||||
|
to downstream actors (fanout).
|
||||||
|
|
||||||
|
A Sensor is an *input node* in the actor-based neural architecture. It does
|
||||||
|
not compute from other neurons; instead, it generates observations either
|
||||||
|
from:
|
||||||
|
- a local source (e.g., random numbers), or
|
||||||
|
- an external scape/environment actor.
|
||||||
|
|
||||||
|
When the cortex triggers a sensor with a ("sync",) message, the sensor:
|
||||||
|
1) calls `_sense()` to obtain a vector,
|
||||||
|
2) broadcasts that vector to all downstream targets in `fanout` via
|
||||||
|
("forward", sid, vec).
|
||||||
|
|
||||||
|
Supported sensor types (controlled by `sname`):
|
||||||
|
- "rng":
|
||||||
|
Produces `vl` random floats in [0, 1).
|
||||||
|
- "xor_GetInput" (requires `scape`):
|
||||||
|
Requests an input vector from the scape and expects a ("percept", vec)
|
||||||
|
reply on its own inbox.
|
||||||
|
- "car_GetFeatures" (requires `scape`):
|
||||||
|
Requests a feature vector from the scape and normalizes it:
|
||||||
|
* clamps values to [-1, 1],
|
||||||
|
* pads with zeros or truncates to exactly `vl` elements.
|
||||||
|
- default:
|
||||||
|
Returns a zero vector of length `vl`.
|
||||||
|
|
||||||
|
Inbox message protocol:
|
||||||
|
- ("sync",):
|
||||||
|
Trigger sensing and forwarding to fanout.
|
||||||
|
- ("percept", vec):
|
||||||
|
Scape reply to a previous ("sense", sid, self) request (handled inside `_sense()`).
|
||||||
|
- ("terminate",):
|
||||||
|
Stop the actor.
|
||||||
|
"""
|
||||||
def __init__(self, sid, cx_pid, name, vector_length, fanout_pids, scape=None):
|
def __init__(self, sid, cx_pid, name, vector_length, fanout_pids, scape=None):
|
||||||
super().__init__(f"Sensor-{sid}")
|
super().__init__(f"Sensor-{sid}")
|
||||||
self.sid = sid
|
self.sid = sid
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
async def run_car_test(
|
async def run_car_test(
|
||||||
pop_id: str = "car_pop",
|
pop_id: str = "car_pop_transaction_test23",
|
||||||
gens: int = 200
|
gens: int = 200
|
||||||
):
|
):
|
||||||
monitor = await init_population((
|
monitor = await init_population((
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
from neo4j import AsyncGraphDatabase
|
from typing import LiteralString, cast
|
||||||
|
|
||||||
|
from neo4j import AsyncGraphDatabase, Query
|
||||||
|
|
||||||
NEO4J_CONSTRAINTS = [
|
NEO4J_CONSTRAINTS = [
|
||||||
"CREATE CONSTRAINT cortex_id IF NOT EXISTS FOR (n:cortex) REQUIRE n.id IS UNIQUE",
|
"CREATE CONSTRAINT cortex_id IF NOT EXISTS FOR (n:cortex) REQUIRE n.id IS UNIQUE",
|
||||||
@@ -30,37 +32,129 @@ class Neo4jDB:
|
|||||||
return await s.run(cypher, **params)
|
return await s.run(cypher, **params)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
async def run_read(self, cypher: str, **params):
|
async def execute_write(self, work):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Method: execute_write
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This method is used to execute a write operation on the database using the provided work.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
self: The current instance of the class.
|
||||||
|
work: The work to be executed as a write operation on the database.
|
||||||
|
|
||||||
|
"""
|
||||||
|
async with self._driver.session(database=self._database) as session:
|
||||||
|
return await session.execute_write(work)
|
||||||
|
|
||||||
|
async def run_read(self, cypher: LiteralString | Query, **params):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Method: run_read
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This method allows running a read operation with the provided cypher query and parameters using the underlying
|
||||||
|
driver session. It returns the result of the read operation.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- cypher: str or Query object representing the cypher query to be executed.
|
||||||
|
- **params: Additional keyword arguments that represent parameters to be passed to the cypher query.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Result of the read operation based on the provided cypher query and parameters.
|
||||||
|
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
return await s.run(cypher, **params)
|
return await s.run(cypher, **params)
|
||||||
|
|
||||||
async def read_single(self, cypher: str, **params):
|
async def read_single(self, cypher: LiteralString | Query, **params):
|
||||||
|
"""
|
||||||
|
Reads a single record from the database using the provided Cypher query and parameters.
|
||||||
|
|
||||||
|
:param cypher: The Cypher query to execute.
|
||||||
|
:param params: Additional parameters to be passed to the query.
|
||||||
|
|
||||||
|
:return: A single record retrieved from the database based on the given Cypher query and parameters.
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
res = await s.run(cypher, **params)
|
res = await s.run(cypher, **params)
|
||||||
return await res.single()
|
return await res.single()
|
||||||
|
|
||||||
async def read_all(self, cypher: str, **params):
|
async def read_all(self, cypher: LiteralString | Query, **params):
|
||||||
|
"""
|
||||||
|
Reads all records from the database based on the given cypher query and parameters.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
cypher (str): The Cypher query to execute.
|
||||||
|
**params: Additional parameters to pass to the Cypher query.
|
||||||
|
|
||||||
|
Return Type:
|
||||||
|
list: A list of retrieved records from the database.
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
res = await s.run(cypher, **params)
|
res = await s.run(cypher, **params)
|
||||||
return [r async for r in res]
|
return [r async for r in res]
|
||||||
|
|
||||||
async def run_consume(self, cypher: str, **params):
|
async def run_consume(self, cypher: LiteralString | Query, **params):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Run a Cypher query and consume the result.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
cypher : Union[LiteralString, Query]
|
||||||
|
The Cypher query to be executed.
|
||||||
|
**params : Any
|
||||||
|
Keyword arguments for query parameters.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
res = await s.run(cypher, **params)
|
res = await s.run(cypher, **params)
|
||||||
return await res.consume()
|
return await res.consume()
|
||||||
|
|
||||||
async def create_schema(self):
|
async def create_schema(self):
|
||||||
|
"""
|
||||||
|
Creates database schema by running Neo4j constraints queries.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- self: instance of the class
|
||||||
|
- database: name of the database to be used for creating schema
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- None
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
for stmt in NEO4J_CONSTRAINTS:
|
for stmt in NEO4J_CONSTRAINTS:
|
||||||
await s.run(stmt)
|
await s.run(cast(LiteralString, stmt))
|
||||||
|
|
||||||
async def purge_all_nodes(self):
|
async def purge_all_nodes(self):
|
||||||
|
"""
|
||||||
|
Purge all nodes in the database.
|
||||||
|
|
||||||
|
Method parameters:
|
||||||
|
- None
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- None
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
await s.run("MATCH (n) DETACH DELETE n")
|
await s.run("MATCH (n) DETACH DELETE n")
|
||||||
|
|
||||||
async def drop_schema(self):
|
async def drop_schema(self):
|
||||||
|
"""
|
||||||
|
Drop the current schema by dropping all constraints in the database.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
Return Type:
|
||||||
|
None
|
||||||
|
"""
|
||||||
async with self._driver.session(database=self._database) as s:
|
async with self._driver.session(database=self._database) as s:
|
||||||
res = await s.run("SHOW CONSTRAINTS")
|
res = await s.run("SHOW CONSTRAINTS")
|
||||||
async for record in res:
|
async for record in res:
|
||||||
name = record["name"]
|
name = record["name"]
|
||||||
await s.run(f"DROP CONSTRAINT {name} IF EXISTS")
|
await s.run(cast(LiteralString, f"DROP CONSTRAINT {name} IF EXISTS"))
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ from mathema.actors.cortex import Cortex
|
|||||||
from mathema.actors.sensor import Sensor
|
from mathema.actors.sensor import Sensor
|
||||||
from mathema.actors.neuron import Neuron
|
from mathema.actors.neuron import Neuron
|
||||||
from mathema.actors.actuator import Actuator
|
from mathema.actors.actuator import Actuator
|
||||||
from mathema.scape.scape import XorScape
|
|
||||||
from mathema.scape.car_racing import CarRacingScape
|
from mathema.scape.car_racing import CarRacingScape
|
||||||
from mathema.envs.openai_car_racing import CarRacing
|
from mathema.envs.openai_car_racing import CarRacing
|
||||||
|
|
||||||
@@ -20,6 +19,24 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class Exoself(Actor):
|
class Exoself(Actor):
|
||||||
|
"""
|
||||||
|
Exoself actor coordinating genotype-driven agent evaluation and learning.
|
||||||
|
|
||||||
|
The Exoself represents the *outer control loop* of an agent in the mathema
|
||||||
|
framework. It is responsible for:
|
||||||
|
- loading a genotype snapshot from persistent storage (Neo4j),
|
||||||
|
- constructing the executable phenotype (Sensors, Neurons, Actuators,
|
||||||
|
Cortex, and Scape),
|
||||||
|
- running repeated evaluation episodes,
|
||||||
|
- applying evolutionary weight perturbations,
|
||||||
|
- tracking and reporting fitness statistics,
|
||||||
|
- persisting improved parameters back to the genotype store.
|
||||||
|
|
||||||
|
Conceptually, Exoself corresponds to the “body/executive self” around a
|
||||||
|
cortex:
|
||||||
|
- the Cortex handles step-by-step execution and fitness accumulation,
|
||||||
|
- the Exoself handles episode-level control, learning, and persistence.
|
||||||
|
"""
|
||||||
def __init__(self, genotype: Dict[str, Any], file_name: Optional[str] = None):
|
def __init__(self, genotype: Dict[str, Any], file_name: Optional[str] = None):
|
||||||
super().__init__("Exoself")
|
super().__init__("Exoself")
|
||||||
self.monitor = None
|
self.monitor = None
|
||||||
@@ -46,7 +63,14 @@ class Exoself(Actor):
|
|||||||
self._perturbed: List[Neuron] = []
|
self._perturbed: List[Neuron] = []
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def start(cls, agent_id: str, monitor) -> "Exoself":
|
async def start(cls, agent_id: str, monitor):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Method start takes agent_id and monitor as parameters and is a class method. It initializes some attributes of
|
||||||
|
the class and creates a task to run the _runner coroutine. If an exception is caught during execution, a placeholder
|
||||||
|
_Dummy class is returned.
|
||||||
|
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
g = await load_genotype_snapshot(agent_id)
|
g = await load_genotype_snapshot(agent_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -73,8 +97,8 @@ class Exoself(Actor):
|
|||||||
elapsed = 0.0
|
elapsed = 0.0
|
||||||
try:
|
try:
|
||||||
fitness, evals, cycles, elapsed = await self.train_until_stop()
|
fitness, evals, cycles, elapsed = await self.train_until_stop()
|
||||||
except Exception as e:
|
except Exception as err:
|
||||||
log.error(f"[Exoself {self.agent_id}] CRASH in train_until_stop(): {e!r}")
|
log.error(f"[Exoself {self.agent_id}] CRASH in train_until_stop(): {err!r}")
|
||||||
fitness = float("-inf")
|
fitness = float("-inf")
|
||||||
evals = int(self.eval_acc)
|
evals = int(self.eval_acc)
|
||||||
cycles = int(self.cycle_acc)
|
cycles = int(self.cycle_acc)
|
||||||
@@ -82,8 +106,8 @@ class Exoself(Actor):
|
|||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
await monitor.cast(("terminated", self.agent_id, fitness, evals, cycles, elapsed))
|
await monitor.cast(("terminated", self.agent_id, fitness, evals, cycles, elapsed))
|
||||||
except Exception as e:
|
except Exception as err:
|
||||||
log.error(f"[Exoself {self.agent_id}] FAILED to notify monitor: {e!r}")
|
log.error(f"[Exoself {self.agent_id}] FAILED to notify monitor: {err!r}")
|
||||||
|
|
||||||
loop = asyncio.get_running_loop()
|
loop = asyncio.get_running_loop()
|
||||||
self._runner_task = loop.create_task(_runner(), name=f"Exoself-{self.agent_id}")
|
self._runner_task = loop.create_task(_runner(), name=f"Exoself-{self.agent_id}")
|
||||||
@@ -96,6 +120,10 @@ class Exoself(Actor):
|
|||||||
return Exoself(g, file_name=path)
|
return Exoself(g, file_name=path)
|
||||||
|
|
||||||
async def run(self):
|
async def run(self):
|
||||||
|
"""
|
||||||
|
run loop of the exoself. Builds the network (mapping from genotype to phenotype=
|
||||||
|
and waits for messages of the cortex.
|
||||||
|
"""
|
||||||
self.build_pid_map_and_spawn()
|
self.build_pid_map_and_spawn()
|
||||||
|
|
||||||
self._link_cortex()
|
self._link_cortex()
|
||||||
@@ -116,6 +144,20 @@ class Exoself(Actor):
|
|||||||
return
|
return
|
||||||
|
|
||||||
async def run_evaluation(self):
|
async def run_evaluation(self):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Description:
|
||||||
|
Method to run evaluation of exoself by building network and linking the components,
|
||||||
|
spawning PID map, linking cortex, and running sensor, neuron, actuator actors.
|
||||||
|
It processes messages from the inbox and terminates upon specific tags.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
Return:
|
||||||
|
Tuple containing evaluation results in the format (fitness: float, flag: int, cycles: int, elapsed: float)
|
||||||
|
|
||||||
|
"""
|
||||||
log.debug(f"exoself: build network and link...")
|
log.debug(f"exoself: build network and link...")
|
||||||
self.build_pid_map_and_spawn()
|
self.build_pid_map_and_spawn()
|
||||||
log.debug(f"exoself: link cortex...")
|
log.debug(f"exoself: link cortex...")
|
||||||
@@ -140,6 +182,17 @@ class Exoself(Actor):
|
|||||||
return float("-inf"), 0, 0, 0.0
|
return float("-inf"), 0, 0, 0.0
|
||||||
|
|
||||||
def build_pid_map_and_spawn(self):
|
def build_pid_map_and_spawn(self):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Builds the PID map for the Cortex actor and spawns Neuron, Actuator, and Sensor actors.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- self: reference to the class instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- None
|
||||||
|
|
||||||
|
"""
|
||||||
cx = self.g["cortex"]
|
cx = self.g["cortex"]
|
||||||
self.cx_actor = Cortex(
|
self.cx_actor = Cortex(
|
||||||
cid=cx["id"],
|
cid=cx["id"],
|
||||||
@@ -254,6 +307,7 @@ class Exoself(Actor):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
def _link_cortex(self):
|
def _link_cortex(self):
|
||||||
|
|
||||||
self.cx_actor.sensors = [a for a in self.sensor_actors if a]
|
self.cx_actor.sensors = [a for a in self.sensor_actors if a]
|
||||||
self.cx_actor.neurons = [a for a in self.neuron_actors if a]
|
self.cx_actor.neurons = [a for a in self.neuron_actors if a]
|
||||||
self.cx_actor.actuators = [a for a in self.actuator_actors if a]
|
self.cx_actor.actuators = [a for a in self.actuator_actors if a]
|
||||||
@@ -263,6 +317,23 @@ class Exoself(Actor):
|
|||||||
self.tasks.append(asyncio.create_task(self.cx_actor.run()))
|
self.tasks.append(asyncio.create_task(self.cx_actor.run()))
|
||||||
|
|
||||||
async def train_until_stop(self):
|
async def train_until_stop(self):
|
||||||
|
"""
|
||||||
|
|
||||||
|
train_until_stop method runs the training until the stop condition is met. It builds the PID map and spawns
|
||||||
|
necessary components, including sensor actors, neuron actors, and actuator actors. If an actuator scape is present,
|
||||||
|
it runs the actuator scape as well.
|
||||||
|
|
||||||
|
The method continuously waits for incoming messages from the inbox and processes them based on the message tag.
|
||||||
|
If the tag is "evaluation_completed," it calls the _on_evaluation_completed method with the received fitness, cycles,
|
||||||
|
and elapsed time. If the _on_evaluation_completed method returns a dictionary, the method returns a tuple containing
|
||||||
|
the highest fitness, evaluation accuracy, cycle accuracy, and time accuracy.
|
||||||
|
|
||||||
|
If the message tag is "terminate," the method calls the _terminate_all method to stop the training process and
|
||||||
|
returns a tuple with negative infinity for fitness and zeros for other metrics.
|
||||||
|
|
||||||
|
This method does not return any value explicitly during normal training execution.
|
||||||
|
|
||||||
|
"""
|
||||||
self.build_pid_map_and_spawn()
|
self.build_pid_map_and_spawn()
|
||||||
self._link_cortex()
|
self._link_cortex()
|
||||||
|
|
||||||
@@ -291,6 +362,30 @@ class Exoself(Actor):
|
|||||||
return float("-inf"), 0, 0, 0.0
|
return float("-inf"), 0, 0, 0.0
|
||||||
|
|
||||||
async def _on_evaluation_completed(self, fitness: float, cycles: int, elapsed: float):
|
async def _on_evaluation_completed(self, fitness: float, cycles: int, elapsed: float):
|
||||||
|
"""
|
||||||
|
This method _on_evaluation_completed is an asynchronous function that handles the completion
|
||||||
|
of an evaluation process.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- fitness: a float representing the fitness value obtained from the evaluation process.
|
||||||
|
- cycles: an integer indicating the number of cycles involved in the evaluation.
|
||||||
|
- elapsed: a float representing the elapsed time for the evaluation process.
|
||||||
|
|
||||||
|
This method updates internal counters and logs information about the evaluation process. It also performs
|
||||||
|
actions based on the evaluation results, such as updating the highest fitness value, backing up weights,
|
||||||
|
or restoring weights of neuron actors.
|
||||||
|
|
||||||
|
If the number of attempts reaches the maximum allowed attempts, it stops the evaluation process,
|
||||||
|
backs up the genotype, terminates all actors, and returns a dictionary containing information
|
||||||
|
about the best fitness value, evaluation count, cycle count, and accumulated time.
|
||||||
|
|
||||||
|
Finally, it calculates the perturbation probability, selects a subset of neuron actors for weight perturbation,
|
||||||
|
sends perturbation commands to selected neuron actors, and reactivates the cx_actor.
|
||||||
|
|
||||||
|
Note: This method does not have a return statement for successful execution. If an error occurs during the
|
||||||
|
episode_done message sending, it will ignore the exception. No additional
|
||||||
|
errors or exceptions are caught or handled in this method.
|
||||||
|
"""
|
||||||
self.eval_acc += 1
|
self.eval_acc += 1
|
||||||
self.cycle_acc += int(cycles)
|
self.cycle_acc += int(cycles)
|
||||||
self.time_acc += float(elapsed)
|
self.time_acc += float(elapsed)
|
||||||
|
|||||||
@@ -12,6 +12,16 @@ def generate_id() -> str:
|
|||||||
|
|
||||||
|
|
||||||
def get_InitSensor(morphology: MorphologyType):
|
def get_InitSensor(morphology: MorphologyType):
|
||||||
|
"""
|
||||||
|
Return the initial sensor configuration for a given morphology.
|
||||||
|
|
||||||
|
This helper selects a minimal starting set of sensors used to bootstrap a
|
||||||
|
new agent's morphology. It resolves the full sensor list for the provided
|
||||||
|
morphology and returns a list containing only the first sensor entry.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the resolved morphology provides no sensors.
|
||||||
|
"""
|
||||||
sensors = get_Sensors(morphology)
|
sensors = get_Sensors(morphology)
|
||||||
if not sensors:
|
if not sensors:
|
||||||
log.error("Morphology has no sensors.")
|
log.error("Morphology has no sensors.")
|
||||||
@@ -20,6 +30,17 @@ def get_InitSensor(morphology: MorphologyType):
|
|||||||
|
|
||||||
|
|
||||||
def get_InitActuator(morphology: MorphologyType):
|
def get_InitActuator(morphology: MorphologyType):
|
||||||
|
"""
|
||||||
|
Return the initial actuator configuration for a given morphology.
|
||||||
|
|
||||||
|
This helper selects a minimal starting set of actuators used to bootstrap
|
||||||
|
a new agent's morphology. It resolves the full actuator list for the
|
||||||
|
provided morphology and returns a list containing only the first actuator
|
||||||
|
entry.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the resolved morphology provides no actuators.
|
||||||
|
"""
|
||||||
actuators = get_Actuators(morphology)
|
actuators = get_Actuators(morphology)
|
||||||
if not actuators:
|
if not actuators:
|
||||||
log.error("Morphology has no actuators.")
|
log.error("Morphology has no actuators.")
|
||||||
@@ -28,22 +49,74 @@ def get_InitActuator(morphology: MorphologyType):
|
|||||||
|
|
||||||
|
|
||||||
def get_Sensors(morphology: MorphologyType) -> List[Dict[str, Any]]:
|
def get_Sensors(morphology: MorphologyType) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Resolve and return the list of sensor specifications for a morphology.
|
||||||
|
|
||||||
|
The morphology may be provided as:
|
||||||
|
- a callable that accepts a kind string ("sensors" or "actuators"),
|
||||||
|
- a registered string key mapping to a known morphology implementation,
|
||||||
|
- or a module-like object exposing a callable 'xor_mimic' function.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
morphology: Morphology selector (callable, string key, or module-like).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list of sensor specification dictionaries, each describing a sensor
|
||||||
|
actor (e.g., name, vector length, and associated scape).
|
||||||
|
"""
|
||||||
fn = _resolve_morphology(morphology)
|
fn = _resolve_morphology(morphology)
|
||||||
return fn("sensors")
|
return fn("sensors")
|
||||||
|
|
||||||
|
|
||||||
def get_Actuators(morphology: MorphologyType) -> List[Dict[str, Any]]:
|
def get_Actuators(morphology: MorphologyType) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Resolve and return the list of actuator specifications for a morphology.
|
||||||
|
|
||||||
|
The morphology may be provided as:
|
||||||
|
- a callable that accepts a kind string ("sensors" or "actuators"),
|
||||||
|
- a registered string key mapping to a known morphology implementation,
|
||||||
|
- or a module-like object exposing a callable 'xor_mimic' function.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
morphology: Morphology selector (callable, string key, or module-like).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list of actuator specification dictionaries, each describing an
|
||||||
|
actuator actor (e.g., name, vector length, and associated scape).
|
||||||
|
"""
|
||||||
fn = _resolve_morphology(morphology)
|
fn = _resolve_morphology(morphology)
|
||||||
return fn("actuators")
|
return fn("actuators")
|
||||||
|
|
||||||
|
|
||||||
def _resolve_morphology(morphology: MorphologyType) -> Callable[[str], List[Dict[str, Any]]]:
|
def _resolve_morphology(morphology: MorphologyType) -> Callable[[str], List[Dict[str, Any]]]:
|
||||||
|
"""
|
||||||
|
Resolve a morphology selector into a callable that can produce sensor or actuator specs.
|
||||||
|
|
||||||
|
This function normalizes different morphology representations into a
|
||||||
|
single callable interface: fn(kind) -> List[Dict[str, Any]]. Supported
|
||||||
|
inputs are:
|
||||||
|
|
||||||
|
- A callable: returned as-is.
|
||||||
|
- A string key: looked up in a registry of known morphologies.
|
||||||
|
- A module-like object: if it exposes a callable attribute 'xor_mimic',
|
||||||
|
that callable is used.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
morphology: Morphology selector (callable, string key, or module-like).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A callable that accepts "sensors" or "actuators" and returns the
|
||||||
|
corresponding specification list.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If a string key is provided but not registered.
|
||||||
|
TypeError: If morphology cannot be resolved to a valid callable.
|
||||||
|
"""
|
||||||
if callable(morphology):
|
if callable(morphology):
|
||||||
return morphology
|
return morphology
|
||||||
|
|
||||||
if isinstance(morphology, str):
|
if isinstance(morphology, str):
|
||||||
reg = {
|
reg = {
|
||||||
"xor_mimic": xor_mimic,
|
|
||||||
"car_racing_features": car_racing_features
|
"car_racing_features": car_racing_features
|
||||||
}
|
}
|
||||||
if morphology in reg:
|
if morphology in reg:
|
||||||
@@ -62,31 +135,23 @@ def _resolve_morphology(morphology: MorphologyType) -> Callable[[str], List[Dict
|
|||||||
raise TypeError("morphology must be a callable, a module with 'xor_mimic', or a registered string key")
|
raise TypeError("morphology must be a callable, a module with 'xor_mimic', or a registered string key")
|
||||||
|
|
||||||
|
|
||||||
def xor_mimic(kind: str) -> List[Dict[str, Any]]:
|
|
||||||
if kind == "sensors":
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
"name": "xor_GetInput",
|
|
||||||
"vector_length": 2,
|
|
||||||
"scape": "xor_sim"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
elif kind == "actuators":
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
"name": "xor_SendOutput",
|
|
||||||
"vector_length": 1,
|
|
||||||
"scape": "xor_sim"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
log.error(f"xor_mimic: unsupported kind '{kind}', expected 'sensors' or 'actuators'")
|
|
||||||
raise ValueError(f"xor_mimic: unsupported kind '{kind}', expected 'sensors' or 'actuators'")
|
|
||||||
|
|
||||||
|
|
||||||
def car_racing_features(kind: str) -> List[Dict[str, Any]]:
|
def car_racing_features(kind: str) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
car racing morphology
|
Provide a feature-based CarRacing morphology specification.
|
||||||
|
|
||||||
|
This morphology exposes:
|
||||||
|
- One sensor ("car_GetFeatures") producing a fixed-length feature vector
|
||||||
|
derived from a look-ahead horizon plus additional scalar features.
|
||||||
|
- One actuator ("car_ApplyAction") consuming a 3-element action vector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
kind: Either "sensors" or "actuators".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list containing a single specification dictionary for the requested kind.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If kind is not "sensors" or "actuators".
|
||||||
"""
|
"""
|
||||||
LOOK_AHEAD = 10
|
LOOK_AHEAD = 10
|
||||||
feature_len = LOOK_AHEAD + 6
|
feature_len = LOOK_AHEAD + 6
|
||||||
|
|||||||
@@ -21,31 +21,17 @@ from mathema.genotype.neo4j.genotype import (
|
|||||||
delete_agent,
|
delete_agent,
|
||||||
update_fingerprint,
|
update_fingerprint,
|
||||||
)
|
)
|
||||||
from mathema.genotype.neo4j.genotype_mutator import GenotypeMutator
|
from mathema.genotype.neo4j.genotype_mutator_tx import GenotypeMutator
|
||||||
from mathema.utils import stats
|
from mathema.utils import stats
|
||||||
from mathema.core.exoself import Exoself
|
from mathema.core.exoself import Exoself
|
||||||
|
from mathema.settings import (EFF, SURVIVAL_PERCENTAGE, SPECIE_SIZE_LIMIT,
|
||||||
|
INIT_SPECIE_SIZE, GENERATION_LIMIT, EVALUATIONS_LIMIT,
|
||||||
|
FITNESS_GOAL, INIT_POPULATION_ID)
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
OpTag = Literal["continue", "pause", "done"]
|
OpTag = Literal["continue", "pause", "done"]
|
||||||
SelectionAlgorithm = Literal["competition", "top3"]
|
SelectionAlgorithm = Literal["competition", "top3"]
|
||||||
|
|
||||||
EFF: float = 0.05
|
|
||||||
SURVIVAL_PERCENTAGE: float = 0.5
|
|
||||||
SPECIE_SIZE_LIMIT: int = 10
|
|
||||||
INIT_SPECIE_SIZE: int = 10
|
|
||||||
|
|
||||||
GENERATION_LIMIT: int = 1000
|
|
||||||
EVALUATIONS_LIMIT: int = 100_000
|
|
||||||
FITNESS_GOAL: float = 6000
|
|
||||||
|
|
||||||
INIT_POPULATION_ID: str = "test"
|
|
||||||
INIT_OP_MODE: str = "gt"
|
|
||||||
INIT_SELECTION_ALGO: SelectionAlgorithm = "competition"
|
|
||||||
INIT_CONSTRAINTS: list[dict] = [
|
|
||||||
{"morphology": "xor_mimic", "neural_afs": ["tanh"]},
|
|
||||||
]
|
|
||||||
|
|
||||||
EXOSELF_START: Optional[Callable[[str, "PopulationMonitor"], Awaitable[Any]]] = None
|
EXOSELF_START: Optional[Callable[[str, "PopulationMonitor"], Awaitable[Any]]] = None
|
||||||
|
|
||||||
|
|
||||||
@@ -92,6 +78,23 @@ class MonitorState:
|
|||||||
|
|
||||||
|
|
||||||
async def _population_aggregate(population_id: str) -> dict:
|
async def _population_aggregate(population_id: str) -> dict:
|
||||||
|
"""
|
||||||
|
Retrieve fitness values for agents in a population and calculate aggregate statistics.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
population_id (str): The unique identifier of the population.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary: A dictionary containing the following aggregate statistics:
|
||||||
|
- "cum_fitness": Sum of all fitness values.
|
||||||
|
- "avg": Average fitness value.
|
||||||
|
- "std": Standard deviation of fitness values.
|
||||||
|
- "best": Maximum fitness value.
|
||||||
|
- "min": Minimum fitness value.
|
||||||
|
- "n": Number of fitness values.
|
||||||
|
- "agents": Number of agents in the population.
|
||||||
|
|
||||||
|
"""
|
||||||
rows = await _read_all("""
|
rows = await _read_all("""
|
||||||
MATCH (a:agent {population_id:$pid})
|
MATCH (a:agent {population_id:$pid})
|
||||||
RETURN collect(coalesce(toFloat(a.fitness),0.0)) AS fs
|
RETURN collect(coalesce(toFloat(a.fitness),0.0)) AS fs
|
||||||
@@ -116,12 +119,170 @@ async def _population_aggregate(population_id: str) -> dict:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class PopulationMonitor:
|
async def _best_fitness_in_population(population_id: str) -> float:
|
||||||
"""
|
|
||||||
Orchestriert Generationen: spawn -> warten -> selektieren/mutieren -> next.
|
|
||||||
Expects exoself.start(agent_id, monitor) and exoself will cast(("terminated", aid, fitness, eval_acc, cycle_acc, time_acc)).
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
Get the best fitness score in a given population based on the maximum fitness value of all agents.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- population_id (str): The ID of the population to search for.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- float: The best fitness value found in the population, or 0.0 if no fitness values are found.
|
||||||
|
|
||||||
|
"""
|
||||||
|
rows = await _read_all("""
|
||||||
|
MATCH (a:agent {population_id:$pid})
|
||||||
|
RETURN max(toFloat(a.fitness)) AS best
|
||||||
|
""", pid=str(population_id))
|
||||||
|
return float(rows[0]["best"] or 0.0) if rows else 0.0
|
||||||
|
|
||||||
|
|
||||||
|
async def _calculate_energy_cost(population_id: str) -> float:
|
||||||
|
"""
|
||||||
|
Calculate the energy cost based on the fitness of agents and the number of neurons
|
||||||
|
in the cortex associated with the given population ID.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
population_id (str): The ID of the population for which the energy cost needs to be calculated.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: The calculated energy cost.
|
||||||
|
"""
|
||||||
|
rows = await _read_all("""
|
||||||
|
MATCH (a:agent {population_id:$pid})-[:OWNS]->(cx:cortex)
|
||||||
|
OPTIONAL MATCH (cx)-[:HAS]->(n:neuron)
|
||||||
|
RETURN sum(coalesce(toFloat(a.fitness),0.0)) AS totE,
|
||||||
|
count(n) AS totN
|
||||||
|
""", pid=str(population_id))
|
||||||
|
if not rows:
|
||||||
|
return 0.0
|
||||||
|
totE = float(rows[0]["totE"] or 0.0)
|
||||||
|
totN = int(rows[0]["totN"] or 0)
|
||||||
|
return (totE / totN) if totN > 0 else 0.0
|
||||||
|
|
||||||
|
|
||||||
|
async def _construct_agent_summaries(agent_ids: Sequence[str]):
|
||||||
|
"""
|
||||||
|
Constructs summaries for the given list of agent IDs.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
agent_ids (Sequence[str]): A list of agent IDs for which summaries are to be constructed.
|
||||||
|
|
||||||
|
Return Type:
|
||||||
|
List[Tuple[float, int, str]]: A list of tuples where each tuple contains the agent's fitness as a float,
|
||||||
|
the count of neurons as an integer, and the agent ID as a string.
|
||||||
|
"""
|
||||||
|
if not agent_ids:
|
||||||
|
return []
|
||||||
|
rows = await _read_all("""
|
||||||
|
UNWIND $ids AS aid
|
||||||
|
MATCH (a:agent {id:aid})-[:OWNS]->(cx:cortex)
|
||||||
|
OPTIONAL MATCH (cx)-[:HAS]->(n:neuron)
|
||||||
|
RETURN aid AS id,
|
||||||
|
toFloat(a.fitness) AS f,
|
||||||
|
count(n) AS k
|
||||||
|
""", ids=[str(x) for x in agent_ids])
|
||||||
|
out: List[Tuple[float, int, str]] = []
|
||||||
|
for r in rows:
|
||||||
|
f = float(r["f"]) if r["f"] is not None else 0.0
|
||||||
|
k = int(r["k"])
|
||||||
|
out.append((f, k, str(r["id"])))
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
async def _calculate_alotments(valid: List[Tuple[float, int, str]],
|
||||||
|
neural_energy_cost: float
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Calculate allotments based on fitness values and neural energy cost.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
valid (List[Tuple[float, int, str]]): A list of tuples containing fitness, total neurons, and agent ID.
|
||||||
|
neural_energy_cost (float): The energy cost for neural activity.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[List[Tuple[float, float, int, str]], float]: A tuple containing a list of tuples with allotments,
|
||||||
|
fitness values, total neurons, and agent IDs, and the total allotments for the new population.
|
||||||
|
"""
|
||||||
|
acc: List[Tuple[float, float, int, str]] = []
|
||||||
|
new_pop_acc = 0.0
|
||||||
|
for (fit, tn, aid) in valid:
|
||||||
|
neural_alot = (fit / neural_energy_cost) if neural_energy_cost > 0 else 0.0
|
||||||
|
mutant_alot = (neural_alot / max(tn, 1))
|
||||||
|
new_pop_acc += mutant_alot
|
||||||
|
acc.append((mutant_alot, fit, tn, aid))
|
||||||
|
log.debug(f"NewPopAcc: {new_pop_acc:.4f}")
|
||||||
|
return acc, new_pop_acc
|
||||||
|
|
||||||
|
|
||||||
|
async def _extract_specie_agent_ids(specie_id: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Extracts the IDs of agents associated with a given specie.
|
||||||
|
|
||||||
|
:param specie_id: The ID of the specie for which to retrieve agent IDs
|
||||||
|
:type specie_id: str
|
||||||
|
:return: List of agent IDs associated with the specie
|
||||||
|
:rtype: List[str]
|
||||||
|
"""
|
||||||
|
rows = await _read_all("""
|
||||||
|
MATCH (:specie {id:$sid})-[:HAS]->(a:agent) RETURN a.id AS id
|
||||||
|
""", sid=str(specie_id))
|
||||||
|
return [str(r["id"]) for r in rows]
|
||||||
|
|
||||||
|
|
||||||
|
async def _extract_specie_ids(population_id: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Extract specie IDs from Neo4j database for a given population ID.
|
||||||
|
|
||||||
|
:param population_id: str - The population ID for which to extract specie IDs.
|
||||||
|
:return: List[str] - A list of specie IDs associated with the given population ID.
|
||||||
|
|
||||||
|
"""
|
||||||
|
rows = await _read_all("""
|
||||||
|
MATCH (s:specie {population_id:$pid}) RETURN s.id AS id ORDER BY id
|
||||||
|
""", pid=str(population_id))
|
||||||
|
return [str(r["id"]) for r in rows]
|
||||||
|
|
||||||
|
|
||||||
|
async def _ensure_specie_node(specie_id: str, population_id: str, constraint_json: str):
|
||||||
|
"""
|
||||||
|
Ensure that the specie node exists. Used to keep the agent database coherent.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
specie_id (str): The ID of the specie to create.
|
||||||
|
constraint_json (str): The JSON string of the constraints for the specie.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
await _run("""
|
||||||
|
MERGE (s:specie {id:$sid})
|
||||||
|
SET s.population_id = $pid,
|
||||||
|
s.constraint_json = $cjson
|
||||||
|
""", sid=str(specie_id), pid=str(population_id), cjson=str(constraint_json))
|
||||||
|
|
||||||
|
|
||||||
|
async def _extract_agent_ids(population_id: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Extracts the agent IDs associated with a given population ID.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
population_id (str): The ID of the population to extract agent IDs for.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: A list of agent IDs as strings extracted from the database based on the provided population ID.
|
||||||
|
"""
|
||||||
|
rows = await _read_all("MATCH (a:agent {population_id:$pid}) RETURN a.id AS id ORDER BY id",
|
||||||
|
pid=str(population_id))
|
||||||
|
return [str(r["id"]) for r in rows]
|
||||||
|
|
||||||
|
|
||||||
|
async def _ensure_population_node(population_id: str) -> None:
|
||||||
|
await _run("MERGE (:population {id:$pid})", pid=str(population_id))
|
||||||
|
|
||||||
|
|
||||||
|
class PopulationMonitor:
|
||||||
def __init__(self, op_mode: str, population_id: str, selection_algorithm: SelectionAlgorithm):
|
def __init__(self, op_mode: str, population_id: str, selection_algorithm: SelectionAlgorithm):
|
||||||
self.state = MonitorState(op_mode, population_id, selection_algorithm)
|
self.state = MonitorState(op_mode, population_id, selection_algorithm)
|
||||||
self.inbox: asyncio.Queue = asyncio.Queue()
|
self.inbox: asyncio.Queue = asyncio.Queue()
|
||||||
@@ -136,6 +297,7 @@ class PopulationMonitor:
|
|||||||
self._t0 = None
|
self._t0 = None
|
||||||
self._best_so_far = float("-inf")
|
self._best_so_far = float("-inf")
|
||||||
self.train_time_sec = 30*60
|
self.train_time_sec = 30*60
|
||||||
|
self._deadline_task = None
|
||||||
|
|
||||||
# logging file handles
|
# logging file handles
|
||||||
self._episodes_f = None
|
self._episodes_f = None
|
||||||
@@ -144,6 +306,29 @@ class PopulationMonitor:
|
|||||||
@classmethod
|
@classmethod
|
||||||
async def start(cls, op_mode: str, population_id: str,
|
async def start(cls, op_mode: str, population_id: str,
|
||||||
selection_algorithm: SelectionAlgorithm) -> "PopulationMonitor":
|
selection_algorithm: SelectionAlgorithm) -> "PopulationMonitor":
|
||||||
|
"""
|
||||||
|
Create and start a new population monitor instance.
|
||||||
|
|
||||||
|
This class method initializes a PopulationMonitor for the given
|
||||||
|
population and selection algorithm, starts its asynchronous message
|
||||||
|
processing loop, and prepares all logging and bookkeeping required for a
|
||||||
|
training run.
|
||||||
|
|
||||||
|
Specifically, it:
|
||||||
|
1. Instantiates the monitor with the specified operation mode, population
|
||||||
|
identifier, and selection algorithm.
|
||||||
|
2. Launches the internal actor-style run loop as an asyncio task.
|
||||||
|
3. Registers an atexit hook to collect generation-level statistics for
|
||||||
|
post-run analysis.
|
||||||
|
4. Initializes timing, assigns a unique run identifier, and creates the
|
||||||
|
corresponding output directory.
|
||||||
|
5. Opens and initializes episode-level and progress-level CSV log files.
|
||||||
|
6. Starts a deadline task to enforce the configured training time limit.
|
||||||
|
7. Initializes and activates the first generation of the population.
|
||||||
|
|
||||||
|
The method returns the fully initialized and running PopulationMonitor
|
||||||
|
instance, which can then be controlled via its public interface.
|
||||||
|
"""
|
||||||
self = cls(op_mode, population_id, selection_algorithm)
|
self = cls(op_mode, population_id, selection_algorithm)
|
||||||
self._task = asyncio.create_task(self._run(), name=f"PopulationMonitor-{population_id}")
|
self._task = asyncio.create_task(self._run(), name=f"PopulationMonitor-{population_id}")
|
||||||
stats.register_atexit(population_id, lambda: list(self.state.rows))
|
stats.register_atexit(population_id, lambda: list(self.state.rows))
|
||||||
@@ -173,11 +358,48 @@ class PopulationMonitor:
|
|||||||
async def cast(self, msg: tuple) -> None:
|
async def cast(self, msg: tuple) -> None:
|
||||||
await self.inbox.put(msg)
|
await self.inbox.put(msg)
|
||||||
|
|
||||||
async def stop(self, mode: Literal["normal", "shutdown"] = "normal") -> None:
|
async def stop(self, mode: Literal["normal", "shutdown"] = "normal"):
|
||||||
|
"""
|
||||||
|
Request termination of the population monitor and await shutdown.
|
||||||
|
|
||||||
|
This method sends a stop command to the monitor's internal message loop
|
||||||
|
and blocks until the monitor has completed its graceful shutdown
|
||||||
|
procedure. The optional mode parameter indicates the reason for stopping
|
||||||
|
(e.g. normal completion versus external shutdown) and is forwarded to
|
||||||
|
the internal stop handler.
|
||||||
|
|
||||||
|
It provides a synchronous-style interface for external components to
|
||||||
|
ensure that all agents are terminated, logs are flushed, and final
|
||||||
|
results are persisted before control returns to the caller.
|
||||||
|
"""
|
||||||
await self.inbox.put(("stop", mode))
|
await self.inbox.put(("stop", mode))
|
||||||
await self._stopped_evt.wait()
|
await self._stopped_evt.wait()
|
||||||
|
|
||||||
async def _run(self) -> None:
|
async def _run(self):
|
||||||
|
"""
|
||||||
|
Main asynchronous message-processing loop of the population monitor.
|
||||||
|
|
||||||
|
This method continuously consumes messages from the internal inbox and
|
||||||
|
dispatches them to the appropriate handlers, coordinating the lifecycle
|
||||||
|
of an evolutionary run. It implements an actor-style control loop with
|
||||||
|
the following responsibilities:
|
||||||
|
|
||||||
|
1. Reacts to control messages:
|
||||||
|
- "stop": triggers graceful shutdown and finalization of the run.
|
||||||
|
- "pause": requests a pause after the current generation completes.
|
||||||
|
- "continue": resumes execution and initializes a new generation
|
||||||
|
after a pause.
|
||||||
|
2. Handles evaluation-related events:
|
||||||
|
- "episode_done": logs completion of a single evaluation episode.
|
||||||
|
- "terminated": processes termination of an agent and updates
|
||||||
|
generation-level state.
|
||||||
|
3. Maintains correct sequencing of generations and respects the current
|
||||||
|
operational mode (continue, pause, done).
|
||||||
|
|
||||||
|
The loop runs until a stop message is received or the task is cancelled.
|
||||||
|
On exit, it signals completion via an internal stopped-event to allow
|
||||||
|
other components to await full shutdown.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
msg = await self.inbox.get()
|
msg = await self.inbox.get()
|
||||||
@@ -199,14 +421,24 @@ class PopulationMonitor:
|
|||||||
elif tag == "terminated":
|
elif tag == "terminated":
|
||||||
await self._handle_agent_terminated(*msg[1:])
|
await self._handle_agent_terminated(*msg[1:])
|
||||||
else:
|
else:
|
||||||
|
|
||||||
pass
|
pass
|
||||||
finally:
|
finally:
|
||||||
self._stopped_evt.set()
|
self._stopped_evt.set()
|
||||||
|
|
||||||
async def _init_generation(self) -> None:
|
async def _init_generation(self) -> None:
|
||||||
|
"""
|
||||||
|
Initialize the agent generation process by extracting agent ids, setting initial values for the state object,
|
||||||
|
and starting agents asynchronously. If any errors occur during the agent initialization process,
|
||||||
|
the corresponding agent's fitness is set to negative infinity.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- self: The reference to the current object instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- None
|
||||||
|
"""
|
||||||
s = self.state
|
s = self.state
|
||||||
agent_ids = await self._extract_agent_ids(s.population_id)
|
agent_ids = await _extract_agent_ids(s.population_id)
|
||||||
s.agent_ids = agent_ids
|
s.agent_ids = agent_ids
|
||||||
s.tot_agents = len(agent_ids)
|
s.tot_agents = len(agent_ids)
|
||||||
s.agents_left = 0
|
s.agents_left = 0
|
||||||
@@ -227,7 +459,26 @@ class PopulationMonitor:
|
|||||||
log.info(f"*** Population monitor started: pop={s.population_id}, mode={s.op_mode}, "
|
log.info(f"*** Population monitor started: pop={s.population_id}, mode={s.op_mode}, "
|
||||||
f"selection={s.selection_algorithm}, agents={s.tot_agents}")
|
f"selection={s.selection_algorithm}, agents={s.tot_agents}")
|
||||||
|
|
||||||
async def _handle_stop(self, mode: str) -> None:
|
async def _handle_stop(self, mode: str):
|
||||||
|
"""
|
||||||
|
Gracefully stop and finalize the population monitoring run.
|
||||||
|
|
||||||
|
This method is invoked when the monitoring loop receives a stop signal.
|
||||||
|
It performs an orderly shutdown of the ongoing evolutionary run by:
|
||||||
|
|
||||||
|
1. Terminating all currently active agent handlers or actors.
|
||||||
|
2. Flushing and closing episode-level and progress-level log files.
|
||||||
|
3. Cancelling any active deadline or time-limit task.
|
||||||
|
4. Finalizing the global best-so-far fitness value based on the current
|
||||||
|
population state.
|
||||||
|
5. Writing a run summary to disk, including identifiers, training
|
||||||
|
duration, generation count, accumulated evaluation statistics, and
|
||||||
|
final best-so-far fitness.
|
||||||
|
6. Clearing internal file handles and logging the shutdown event.
|
||||||
|
|
||||||
|
The method ensures that partial results are safely persisted and that
|
||||||
|
all asynchronous resources are released before the run terminates.
|
||||||
|
"""
|
||||||
s = self.state
|
s = self.state
|
||||||
|
|
||||||
for (_aid, h) in list(s.active):
|
for (_aid, h) in list(s.active):
|
||||||
@@ -246,20 +497,76 @@ class PopulationMonitor:
|
|||||||
f.close()
|
f.close()
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
if getattr(self, "_deadline_task", None):
|
||||||
|
self._deadline_task.cancel()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
best = await _best_fitness_in_population(self.state.population_id)
|
||||||
|
self._best_so_far = max(self._best_so_far, float(best))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
summary = {
|
||||||
|
"run_id": self.run_id,
|
||||||
|
"population_id": self.state.population_id,
|
||||||
|
"train_time_sec": self.train_time_sec,
|
||||||
|
"gens": self.state.pop_gen,
|
||||||
|
"eval_acc": self.state.eval_acc,
|
||||||
|
"best_so_far": self._best_so_far,
|
||||||
|
"op_tag": self.state.op_tag,
|
||||||
|
}
|
||||||
|
with open(f"runs/{self.run_id}/summary.json", "w") as f:
|
||||||
|
json.dump(summary, f, indent=2)
|
||||||
|
|
||||||
self._episodes_f = None
|
self._episodes_f = None
|
||||||
self._progress_f = None
|
self._progress_f = None
|
||||||
log.info(f"*** Population_Monitor:{s.population_id} shutdown. op_tag={s.op_tag}, op_mode={s.op_mode}")
|
log.info(f"*** Population_Monitor:{s.population_id} shutdown. op_tag={s.op_tag}, op_mode={s.op_mode}")
|
||||||
|
|
||||||
await neo4j.close()
|
async def _handle_episode_done(self, agent_id: str, ep_return: float, eval_idx: int):
|
||||||
|
"""
|
||||||
|
Handle completion of a single evaluation episode for an agent.
|
||||||
|
|
||||||
async def _handle_episode_done(self, agent_id: str, ep_return: float, eval_idx: int) -> None:
|
This method is called whenever an agent finishes one evaluation episode
|
||||||
|
within the current generation. It records episode-level information for
|
||||||
|
later analysis and monitoring but does not alter population-level state
|
||||||
|
or control flow.
|
||||||
|
|
||||||
|
Specifically, it:
|
||||||
|
1. Computes the elapsed wall-clock time since the start of the run.
|
||||||
|
2. Logs the episode result (time, generation index, agent identifier,
|
||||||
|
evaluation index, and episode return) to the episode-level log file,
|
||||||
|
if enabled.
|
||||||
|
"""
|
||||||
s = self.state
|
s = self.state
|
||||||
t_sec = asyncio.get_running_loop().time() - (self._t0 or 0.0)
|
t_sec = asyncio.get_running_loop().time() - (self._t0 or 0.0)
|
||||||
if self._episodes_f is not None:
|
if self._episodes_f is not None:
|
||||||
self._episodes_f.write(f"{t_sec:.6f},{s.pop_gen+1},{agent_id},{eval_idx},{ep_return:.10f}\n")
|
self._episodes_f.write(f"{t_sec:.6f},{s.pop_gen},{agent_id},{eval_idx},{ep_return:.10f}\n")
|
||||||
|
|
||||||
async def _handle_agent_terminated(self, agent_id: str, fitness: float, agent_eval: int, agent_cycle: int,
|
async def _handle_agent_terminated(self, agent_id: str, fitness: float, agent_eval: int, agent_cycle: int,
|
||||||
agent_time: int) -> None:
|
agent_time: int):
|
||||||
|
"""
|
||||||
|
Handle termination of a single agent evaluation.
|
||||||
|
|
||||||
|
This method is invoked when an agent finishes its evaluation within the
|
||||||
|
current generation. It performs the following actions:
|
||||||
|
|
||||||
|
1. Accumulates per-agent evaluation statistics (evaluation count, cycle
|
||||||
|
count, and execution time) into generation-level counters.
|
||||||
|
2. Decrements the number of remaining active agents in the generation.
|
||||||
|
3. Persists the agent's final fitness value to the underlying genotype
|
||||||
|
storage.
|
||||||
|
4. Removes the terminated agent from the list of currently active agents.
|
||||||
|
5. Logs progress information, including how many agents are still active.
|
||||||
|
6. Triggers generation finalization once all agents in the generation
|
||||||
|
have completed their evaluations.
|
||||||
|
|
||||||
|
The method is fully asynchronous and is typically called by the actor
|
||||||
|
supervision or monitoring component when an agent signals termination.
|
||||||
|
"""
|
||||||
log.info(f"agent terminated: , {agent_id}, {fitness}, {agent_eval}, {agent_cycle}, {agent_time}")
|
log.info(f"agent terminated: , {agent_id}, {fitness}, {agent_eval}, {agent_cycle}, {agent_time}")
|
||||||
s = self.state
|
s = self.state
|
||||||
|
|
||||||
@@ -280,6 +587,31 @@ class PopulationMonitor:
|
|||||||
await self._generation_finished()
|
await self._generation_finished()
|
||||||
|
|
||||||
async def _generation_finished(self) -> None:
|
async def _generation_finished(self) -> None:
|
||||||
|
"""
|
||||||
|
Handle the end of a population generation.
|
||||||
|
|
||||||
|
This method is called once all agents of the current generation have
|
||||||
|
completed their evaluations. It performs the following steps:
|
||||||
|
|
||||||
|
1. Mutates and selects the next generation according to the configured
|
||||||
|
selection algorithm and species size limit.
|
||||||
|
2. Increments the generation counter and logs aggregated population
|
||||||
|
statistics (best, average, standard deviation of fitness).
|
||||||
|
3. Updates time-based metrics, including elapsed wall-clock time,
|
||||||
|
normalized training time, and the global best-so-far fitness.
|
||||||
|
4. Writes a progress entry to the progress log (CSV-style) and appends
|
||||||
|
a detailed statistics record to the in-memory history.
|
||||||
|
5. Signals the end of the generation via an asyncio.Event to unblock
|
||||||
|
dependent tasks.
|
||||||
|
6. Checks termination conditions, including generation limit,
|
||||||
|
evaluation limit, fitness goal, and wall-clock time limit.
|
||||||
|
7. Depending on the current operation tag and termination conditions,
|
||||||
|
either stops the run, pauses execution, or initializes the next
|
||||||
|
generation.
|
||||||
|
|
||||||
|
The method is fully asynchronous and intended to be called from the
|
||||||
|
population monitoring loop that coordinates evolutionary training.
|
||||||
|
"""
|
||||||
s = self.state
|
s = self.state
|
||||||
await self._mutate_population(s.population_id, SPECIE_SIZE_LIMIT, s.selection_algorithm)
|
await self._mutate_population(s.population_id, SPECIE_SIZE_LIMIT, s.selection_algorithm)
|
||||||
s.pop_gen += 1
|
s.pop_gen += 1
|
||||||
@@ -326,7 +658,7 @@ class PopulationMonitor:
|
|||||||
f"({elapsed:.1f}s >= {self.train_time_sec}s), stopping run"
|
f"({elapsed:.1f}s >= {self.train_time_sec}s), stopping run"
|
||||||
)
|
)
|
||||||
|
|
||||||
best = await self._best_fitness_in_population(s.population_id)
|
best = await _best_fitness_in_population(s.population_id)
|
||||||
end_condition = (s.pop_gen >= GENERATION_LIMIT) or (s.eval_acc >= EVALUATIONS_LIMIT) or (best > FITNESS_GOAL) or time_limit_reached
|
end_condition = (s.pop_gen >= GENERATION_LIMIT) or (s.eval_acc >= EVALUATIONS_LIMIT) or (best > FITNESS_GOAL) or time_limit_reached
|
||||||
if s.pop_gen >= GENERATION_LIMIT:
|
if s.pop_gen >= GENERATION_LIMIT:
|
||||||
log.info(f"reached generation limit {GENERATION_LIMIT}, stopping")
|
log.info(f"reached generation limit {GENERATION_LIMIT}, stopping")
|
||||||
@@ -345,45 +677,63 @@ class PopulationMonitor:
|
|||||||
|
|
||||||
await self._init_generation()
|
await self._init_generation()
|
||||||
|
|
||||||
async def _ensure_population_node(self, population_id: str) -> None:
|
|
||||||
await _run("MERGE (:population {id:$pid})", pid=str(population_id))
|
|
||||||
|
|
||||||
async def _ensure_specie_node(self, specie_id: str, population_id: str, constraint_json: str) -> None:
|
|
||||||
await _run("""
|
|
||||||
MERGE (s:specie {id:$sid})
|
|
||||||
SET s.population_id = $pid,
|
|
||||||
s.constraint_json = $cjson
|
|
||||||
""", sid=str(specie_id), pid=str(population_id), cjson=str(constraint_json))
|
|
||||||
|
|
||||||
async def _extract_agent_ids(self, population_id: str) -> List[str]:
|
|
||||||
rows = await _read_all("MATCH (a:agent {population_id:$pid}) RETURN a.id AS id ORDER BY id",
|
|
||||||
pid=str(population_id))
|
|
||||||
return [str(r["id"]) for r in rows]
|
|
||||||
|
|
||||||
async def _extract_specie_ids(self, population_id: str) -> List[str]:
|
|
||||||
rows = await _read_all("""
|
|
||||||
MATCH (s:specie {population_id:$pid}) RETURN s.id AS id ORDER BY id
|
|
||||||
""", pid=str(population_id))
|
|
||||||
return [str(r["id"]) for r in rows]
|
|
||||||
|
|
||||||
async def _extract_specie_agent_ids(self, specie_id: str) -> List[str]:
|
|
||||||
rows = await _read_all("""
|
|
||||||
MATCH (:specie {id:$sid})-[:HAS]->(a:agent) RETURN a.id AS id
|
|
||||||
""", sid=str(specie_id))
|
|
||||||
return [str(r["id"]) for r in rows]
|
|
||||||
|
|
||||||
async def _mutate_population(self, population_id: str, keep_tot: int,
|
async def _mutate_population(self, population_id: str, keep_tot: int,
|
||||||
selection_algorithm: SelectionAlgorithm) -> None:
|
selection_algorithm: SelectionAlgorithm):
|
||||||
energy_cost = await self._calculate_energy_cost(population_id)
|
"""
|
||||||
specie_ids = await self._extract_specie_ids(population_id)
|
Apply evolutionary mutation and selection to an entire population.
|
||||||
|
|
||||||
|
This method orchestrates the mutation step at the population level at the
|
||||||
|
end of a generation. It first computes shared contextual information,
|
||||||
|
such as the current energy cost of the population, and then iterates over
|
||||||
|
all species belonging to the population.
|
||||||
|
|
||||||
|
For each species, it delegates the actual selection and mutation process
|
||||||
|
to the species-level mutation routine, ensuring that the total number of
|
||||||
|
individuals retained respects the configured population size limit and
|
||||||
|
the chosen selection algorithm.
|
||||||
|
|
||||||
|
The method is asynchronous and intended to be invoked as part of the
|
||||||
|
generation finalization phase of the evolutionary loop.
|
||||||
|
"""
|
||||||
|
energy_cost = await _calculate_energy_cost(population_id)
|
||||||
|
specie_ids = await _extract_specie_ids(population_id)
|
||||||
for sid in specie_ids:
|
for sid in specie_ids:
|
||||||
await self._mutate_specie(sid, keep_tot, energy_cost, selection_algorithm)
|
await self._mutate_specie(sid, keep_tot, energy_cost, selection_algorithm)
|
||||||
|
|
||||||
async def _mutate_specie(self, specie_id: str, population_limit: int, neural_energy_cost: float,
|
async def _mutate_specie(self, specie_id: str, population_limit: int, neural_energy_cost: float,
|
||||||
selection_algorithm: SelectionAlgorithm) -> None:
|
selection_algorithm: SelectionAlgorithm):
|
||||||
agent_ids = await self._extract_specie_agent_ids(specie_id)
|
"""
|
||||||
|
Mutate and repopulate a single species according to the chosen selection strategy.
|
||||||
|
|
||||||
summaries = await self._construct_agent_summaries(agent_ids)
|
This method performs the end-of-generation evolutionary step for one
|
||||||
|
species ("specie") within a population. It:
|
||||||
|
|
||||||
|
1. Loads all agents belonging to the species and constructs fitness
|
||||||
|
summaries, sorting agents by fitness in descending order.
|
||||||
|
2. Selects survivors and removes non-survivors from both the genotype
|
||||||
|
store and the species membership relationship.
|
||||||
|
3. Creates new offspring agents to refill the species up to the target
|
||||||
|
population limit, using one of two selection algorithms:
|
||||||
|
- "competition": keeps a fraction of the best agents (SURVIVAL_PERCENTAGE),
|
||||||
|
ranks them by an efficiency-weighted score (fitness adjusted by
|
||||||
|
network size/complexity), deletes the rest, and produces offspring
|
||||||
|
via the competition routine.
|
||||||
|
- "top3": keeps only the top 3 agents, deletes all others, and produces
|
||||||
|
the required number of offspring from these champions.
|
||||||
|
4. Computes aggregate fitness statistics for the species (mean, standard
|
||||||
|
deviation, min, max) and updates the species node with these values as
|
||||||
|
well as the list of champion agent IDs.
|
||||||
|
5. Updates the species' innovation factor based on whether the current
|
||||||
|
best fitness exceeds the stored innovation threshold.
|
||||||
|
6. Refreshes fingerprints for newly created agents to keep derived
|
||||||
|
metadata consistent.
|
||||||
|
|
||||||
|
The method is asynchronous and is intended to be called from the
|
||||||
|
population-level mutation step after a generation has completed.
|
||||||
|
"""
|
||||||
|
agent_ids = await _extract_specie_agent_ids(specie_id)
|
||||||
|
|
||||||
|
summaries = await _construct_agent_summaries(agent_ids)
|
||||||
summaries.sort(key=lambda t: t[0], reverse=True)
|
summaries.sort(key=lambda t: t[0], reverse=True)
|
||||||
|
|
||||||
if not summaries:
|
if not summaries:
|
||||||
@@ -452,31 +802,63 @@ class PopulationMonitor:
|
|||||||
|
|
||||||
async def _competition(self, valid: List[Tuple[float, int, str]], population_limit: int,
|
async def _competition(self, valid: List[Tuple[float, int, str]], population_limit: int,
|
||||||
neural_energy_cost: float, specie_id: str) -> List[str]:
|
neural_energy_cost: float, specie_id: str) -> List[str]:
|
||||||
alot, est = await self._calculate_alotments(valid, neural_energy_cost)
|
"""
|
||||||
|
Perform competition-based reproduction for a species.
|
||||||
|
|
||||||
|
This method implements the competition selection strategy for a single
|
||||||
|
species. Given a set of valid (surviving) agents, it:
|
||||||
|
|
||||||
|
1. Computes reproduction allotments for each agent based on fitness and
|
||||||
|
neural energy cost, yielding a total estimated population size.
|
||||||
|
2. Derives a normalization factor to scale these allotments so that the
|
||||||
|
resulting number of survivors and offspring respects the configured
|
||||||
|
population size limit.
|
||||||
|
3. Delegates to the survivor-gathering routine to retain parent agents
|
||||||
|
and generate the appropriate number of mutant offspring.
|
||||||
|
|
||||||
|
The method returns a list of agent identifiers representing the new
|
||||||
|
species population after competition-based selection and mutation. It is
|
||||||
|
asynchronous and intended to be used during the species-level mutation
|
||||||
|
phase of the evolutionary cycle.
|
||||||
|
"""
|
||||||
|
alot, est = await _calculate_alotments(valid, neural_energy_cost)
|
||||||
normalizer = (est / population_limit) if population_limit > 0 else 1.0
|
normalizer = (est / population_limit) if population_limit > 0 else 1.0
|
||||||
log.debug(f"Population size normalizer: {normalizer:.4f}")
|
log.debug(f"Population size normalizer: {normalizer:.4f}")
|
||||||
return await self._gather_survivors(alot, normalizer, specie_id)
|
return await self._gather_survivors(alot, normalizer, specie_id)
|
||||||
|
|
||||||
async def _calculate_alotments(self, valid: List[Tuple[float, int, str]], neural_energy_cost: float
|
|
||||||
) -> Tuple[List[Tuple[float, float, int, str]], float]:
|
|
||||||
acc: List[Tuple[float, float, int, str]] = []
|
|
||||||
new_pop_acc = 0.0
|
|
||||||
for (fit, tn, aid) in valid:
|
|
||||||
neural_alot = (fit / neural_energy_cost) if neural_energy_cost > 0 else 0.0
|
|
||||||
mutant_alot = (neural_alot / max(tn, 1))
|
|
||||||
new_pop_acc += mutant_alot
|
|
||||||
acc.append((mutant_alot, fit, tn, aid))
|
|
||||||
log.debug(f"NewPopAcc: {new_pop_acc:.4f}")
|
|
||||||
return acc, new_pop_acc
|
|
||||||
|
|
||||||
async def _gather_survivors(self, alot: List[Tuple[float, float, int, str]], normalizer: float, specie_id: str) -> \
|
async def _gather_survivors(self, alot: List[Tuple[float, float, int, str]], normalizer: float, specie_id: str) -> \
|
||||||
List[str]:
|
List[str]:
|
||||||
|
"""
|
||||||
|
Collect survivors and generate offspring for a species based on normalized allotments.
|
||||||
|
|
||||||
|
This method takes a list of agents with precomputed allotment scores and
|
||||||
|
determines how many times each agent should survive or reproduce into
|
||||||
|
the next generation. For each agent, it:
|
||||||
|
|
||||||
|
1. Computes a reproduction count by normalizing the agent's allotment
|
||||||
|
value against a global normalizer.
|
||||||
|
2. Enforces a hard safety constraint to ensure that each listed agent
|
||||||
|
contributes at least one survivor to the next generation.
|
||||||
|
3. Ensures that the surviving agent remains linked to the species.
|
||||||
|
4. Creates the required number of mutant offspring clones for the agent
|
||||||
|
to satisfy its allotted reproduction count.
|
||||||
|
5. Removes agents that would otherwise receive zero allotment from the
|
||||||
|
species and deletes their genotype representation.
|
||||||
|
|
||||||
|
The method returns a list of agent identifiers representing the newly
|
||||||
|
formed species population, including both surviving parents and newly
|
||||||
|
created offspring. It is asynchronous and intended to be used as part of
|
||||||
|
the species-level reproduction process.
|
||||||
|
"""
|
||||||
new_ids: List[str] = []
|
new_ids: List[str] = []
|
||||||
for (ma, fit, tn, aid) in alot:
|
for (ma, fit, tn, aid) in alot:
|
||||||
count = int(round(ma / normalizer)) if normalizer > 0 else 0
|
count = int(round(ma / normalizer)) if normalizer > 0 else 0
|
||||||
|
# hard safety: keep at least one survivor
|
||||||
|
if count <= 0:
|
||||||
|
count = 1
|
||||||
log.info(f"Agent {aid}: normalized allotment = {count}")
|
log.info(f"Agent {aid}: normalized allotment = {count}")
|
||||||
|
# TODO: this is redundant!
|
||||||
if count >= 1:
|
if count >= 1:
|
||||||
|
|
||||||
await _run("""
|
await _run("""
|
||||||
MATCH (s:specie {id:$sid}), (a:agent {id:$aid})
|
MATCH (s:specie {id:$sid}), (a:agent {id:$aid})
|
||||||
MERGE (s)-[:HAS]->(a)
|
MERGE (s)-[:HAS]->(a)
|
||||||
@@ -497,6 +879,26 @@ class PopulationMonitor:
|
|||||||
return new_ids
|
return new_ids
|
||||||
|
|
||||||
async def _create_mutant_offspring(self, parent_id: str, specie_id: str) -> str:
|
async def _create_mutant_offspring(self, parent_id: str, specie_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Create a mutated offspring from a parent agent.
|
||||||
|
|
||||||
|
This method clones an existing agent to create a new offspring and
|
||||||
|
integrates it into the evolutionary population. It performs the
|
||||||
|
following steps:
|
||||||
|
|
||||||
|
1. Generates a new unique agent identifier and clones the parent agent's
|
||||||
|
genotype into the offspring.
|
||||||
|
2. Inherits and sets species and population identifiers for the cloned
|
||||||
|
agent and clears its fitness value to mark it as unevaluated.
|
||||||
|
3. Establishes the species membership relationship between the offspring
|
||||||
|
agent and the corresponding species.
|
||||||
|
4. Applies a single mutation step to the offspring's genotype to
|
||||||
|
introduce variation.
|
||||||
|
|
||||||
|
The method returns the identifier of the newly created mutant offspring.
|
||||||
|
It is asynchronous and intended to be used during the reproduction phase
|
||||||
|
of species-level evolution.
|
||||||
|
"""
|
||||||
clone_id = _new_id()
|
clone_id = _new_id()
|
||||||
|
|
||||||
await clone_agent(parent_id, clone_id)
|
await clone_agent(parent_id, clone_id)
|
||||||
@@ -520,17 +922,32 @@ class PopulationMonitor:
|
|||||||
await self._mutate_one_step(clone_id)
|
await self._mutate_one_step(clone_id)
|
||||||
return clone_id
|
return clone_id
|
||||||
|
|
||||||
async def _mutate_one_step(self, agent_id: str) -> None:
|
async def _mutate_one_step(self, agent_id: str):
|
||||||
|
"""
|
||||||
|
Apply a single random mutation step to an agent.
|
||||||
|
|
||||||
|
This method selects one mutation operator at random from the available
|
||||||
|
set of genotype mutation operations (e.g. weight mutation, bias
|
||||||
|
insertion/removal, structural link or neuron changes) and applies it to
|
||||||
|
the specified agent.
|
||||||
|
|
||||||
|
If the selected mutation operation fails, the method falls back to a
|
||||||
|
simple weight-mutation step as a safety measure. Any further failure is
|
||||||
|
silently ignored to ensure that the evolutionary process can continue
|
||||||
|
without interrupting the run.
|
||||||
|
|
||||||
|
The method is asynchronous and intended to introduce variation during
|
||||||
|
offspring creation.
|
||||||
|
"""
|
||||||
ops = [
|
ops = [
|
||||||
self.mutator.mutate_weights,
|
self.mutator.mutate_weights_tx,
|
||||||
self.mutator.add_bias,
|
self.mutator.add_bias_tx,
|
||||||
self.mutator.remove_bias,
|
self.mutator.remove_bias_tx,
|
||||||
self.mutator.add_inlink,
|
self.mutator.add_inlink_tx,
|
||||||
self.mutator.add_outlink,
|
self.mutator.add_outlink_tx,
|
||||||
self.mutator.add_neuron,
|
self.mutator.add_neuron_tx,
|
||||||
self.mutator.outsplice,
|
self.mutator.outsplice_tx,
|
||||||
self.mutator.add_actuator,
|
self.mutator.add_actuator_tx,
|
||||||
]
|
]
|
||||||
op = random.choice(ops)
|
op = random.choice(ops)
|
||||||
try:
|
try:
|
||||||
@@ -538,65 +955,57 @@ class PopulationMonitor:
|
|||||||
except Exception:
|
except Exception:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.mutator.mutate_weights(agent_id)
|
await self.mutator.mutate_weights_tx(agent_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
async def _top3(self, valid_ids: List[str], offspring_needed: int, specie_id: str) -> List[str]:
|
async def _top3(self, valid_ids: List[str], offspring_needed: int, specie_id: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Generate offspring using the top-3 selection strategy.
|
||||||
|
|
||||||
|
This method implements the "top3" reproduction strategy for a species.
|
||||||
|
It retains the three best-performing agents unchanged and fills the
|
||||||
|
remaining population slots by repeatedly selecting one of these top
|
||||||
|
agents at random and creating a mutated offspring from it.
|
||||||
|
|
||||||
|
The method returns a list of agent identifiers representing the new
|
||||||
|
species population, consisting of the original top agents and their
|
||||||
|
offspring. It is asynchronous and intended to be used during the
|
||||||
|
species-level mutation phase of the evolutionary cycle.
|
||||||
|
"""
|
||||||
new_ids = list(valid_ids)
|
new_ids = list(valid_ids)
|
||||||
for _ in range(offspring_needed):
|
for _ in range(offspring_needed):
|
||||||
parent = random.choice(valid_ids)
|
parent = random.choice(valid_ids)
|
||||||
cid = await self._create_mutant_offspring(parent, specie_id)
|
cid = await self._create_mutant_offspring(parent, specie_id)
|
||||||
new_ids.append(cid)
|
new_ids.append(cid)
|
||||||
|
|
||||||
return new_ids
|
return new_ids
|
||||||
|
|
||||||
async def _construct_agent_summaries(self, agent_ids: Sequence[str]) -> List[Tuple[float, int, str]]:
|
|
||||||
if not agent_ids:
|
|
||||||
return []
|
|
||||||
rows = await _read_all("""
|
|
||||||
UNWIND $ids AS aid
|
|
||||||
MATCH (a:agent {id:aid})-[:OWNS]->(cx:cortex)
|
|
||||||
OPTIONAL MATCH (cx)-[:HAS]->(n:neuron)
|
|
||||||
RETURN aid AS id,
|
|
||||||
toFloat(a.fitness) AS f,
|
|
||||||
count(n) AS k
|
|
||||||
""", ids=[str(x) for x in agent_ids])
|
|
||||||
out: List[Tuple[float, int, str]] = []
|
|
||||||
for r in rows:
|
|
||||||
f = float(r["f"]) if r["f"] is not None else 0.0
|
|
||||||
k = int(r["k"])
|
|
||||||
out.append((f, k, str(r["id"])))
|
|
||||||
return out
|
|
||||||
|
|
||||||
async def _calculate_energy_cost(self, population_id: str) -> float:
|
|
||||||
rows = await _read_all("""
|
|
||||||
MATCH (a:agent {population_id:$pid})-[:OWNS]->(cx:cortex)
|
|
||||||
OPTIONAL MATCH (cx)-[:HAS]->(n:neuron)
|
|
||||||
RETURN sum(coalesce(toFloat(a.fitness),0.0)) AS totE,
|
|
||||||
count(n) AS totN
|
|
||||||
""", pid=str(population_id))
|
|
||||||
if not rows:
|
|
||||||
return 0.0
|
|
||||||
totE = float(rows[0]["totE"] or 0.0)
|
|
||||||
totN = int(rows[0]["totN"] or 0)
|
|
||||||
return (totE / totN) if totN > 0 else 0.0
|
|
||||||
|
|
||||||
async def _best_fitness_in_population(self, population_id: str) -> float:
|
|
||||||
rows = await _read_all("""
|
|
||||||
MATCH (a:agent {population_id:$pid})
|
|
||||||
RETURN max(toFloat(a.fitness)) AS best
|
|
||||||
""", pid=str(population_id))
|
|
||||||
return float(rows[0]["best"] or 0.0) if rows else 0.0
|
|
||||||
|
|
||||||
|
|
||||||
async def init_population(params: Tuple[str, List[dict], str, SelectionAlgorithm]) -> PopulationMonitor:
|
async def init_population(params: Tuple[str, List[dict], str, SelectionAlgorithm]) -> PopulationMonitor:
|
||||||
"""
|
"""
|
||||||
params = (Population_Id, Specie_Constraints, OpMode, Selection_Algorithm)
|
Initialize a new evolutionary population and start its monitor.
|
||||||
- erzeugt Population/Spezies-Knoten
|
|
||||||
- konstruiert Agents (über dein construct_agent)
|
This function creates a fresh population in the genotype store based on
|
||||||
- verknüpft Spezies->Agent, setzt agent.population_id
|
the provided species constraints and launches a PopulationMonitor to
|
||||||
- startet den Monitor
|
manage its evolutionary process. It performs the following steps:
|
||||||
|
|
||||||
|
1. Deletes any existing population with the given identifier to ensure a
|
||||||
|
clean initialization.
|
||||||
|
2. Creates a new population node in the datastore.
|
||||||
|
3. For each specified species constraint:
|
||||||
|
- Creates a new species node associated with the population.
|
||||||
|
- Stores the species constraint configuration and initializes its
|
||||||
|
innovation factor.
|
||||||
|
- Creates an initial set of agents for the species, constructing their
|
||||||
|
genotypes according to the given constraints.
|
||||||
|
- Assigns population and species identifiers to each agent, clears
|
||||||
|
fitness values, and establishes species membership relationships.
|
||||||
|
4. Starts a PopulationMonitor for the initialized population, using the
|
||||||
|
specified operation mode and selection algorithm.
|
||||||
|
|
||||||
|
The function returns the running PopulationMonitor instance, which
|
||||||
|
coordinates evaluation, mutation, and logging for the newly created
|
||||||
|
population.
|
||||||
"""
|
"""
|
||||||
population_id, specie_constraints, op_mode, selection_algorithm = params
|
population_id, specie_constraints, op_mode, selection_algorithm = params
|
||||||
|
|
||||||
@@ -639,10 +1048,39 @@ async def init_population(params: Tuple[str, List[dict], str, SelectionAlgorithm
|
|||||||
|
|
||||||
async def continue_(op_mode: str, selection_algorithm: SelectionAlgorithm,
|
async def continue_(op_mode: str, selection_algorithm: SelectionAlgorithm,
|
||||||
population_id: str = INIT_POPULATION_ID) -> PopulationMonitor:
|
population_id: str = INIT_POPULATION_ID) -> PopulationMonitor:
|
||||||
|
"""
|
||||||
|
Resume or start a population monitor for an existing population.
|
||||||
|
|
||||||
|
This convenience function starts a PopulationMonitor for the specified
|
||||||
|
population using the given operation mode and selection algorithm,
|
||||||
|
without reinitializing or modifying the underlying population structure.
|
||||||
|
|
||||||
|
It is intended to continue an existing evolutionary run or to attach a
|
||||||
|
new monitor to a pre-existing population state. The function returns the
|
||||||
|
running PopulationMonitor instance, which immediately begins coordinating
|
||||||
|
evaluation and evolution according to the provided parameters.
|
||||||
|
"""
|
||||||
return await PopulationMonitor.start(op_mode, population_id, selection_algorithm)
|
return await PopulationMonitor.start(op_mode, population_id, selection_algorithm)
|
||||||
|
|
||||||
|
|
||||||
async def delete_population(population_id: str) -> None:
|
async def delete_population(population_id: str):
|
||||||
|
"""
|
||||||
|
Delete a population and all associated evolutionary entities.
|
||||||
|
|
||||||
|
This function removes a population and all data linked to it from the
|
||||||
|
genotype store. It performs a cascading deletion that includes:
|
||||||
|
|
||||||
|
1. The population node itself.
|
||||||
|
2. All species belonging to the population.
|
||||||
|
3. All agents within those species.
|
||||||
|
4. All cortical structures owned by the agents, including neurons,
|
||||||
|
sensors, and actuators.
|
||||||
|
|
||||||
|
All relationships are detached prior to deletion to ensure referential
|
||||||
|
integrity. The operation is destructive and intended to be used during
|
||||||
|
population reinitialization or cleanup before starting a new evolutionary
|
||||||
|
run.
|
||||||
|
"""
|
||||||
await _run("""
|
await _run("""
|
||||||
MATCH (p:population {id:$pid})
|
MATCH (p:population {id:$pid})
|
||||||
OPTIONAL MATCH (s:specie {population_id:$pid})
|
OPTIONAL MATCH (s:specie {population_id:$pid})
|
||||||
@@ -652,7 +1090,3 @@ async def delete_population(population_id: str) -> None:
|
|||||||
OPTIONAL MATCH (cx)-[:HAS]->(act:actuator)
|
OPTIONAL MATCH (cx)-[:HAS]->(act:actuator)
|
||||||
DETACH DELETE p, s, a, cx, n, sen, act
|
DETACH DELETE p, s, a, cx, n, sen, act
|
||||||
""", pid=str(population_id))
|
""", pid=str(population_id))
|
||||||
|
|
||||||
|
|
||||||
async def test() -> PopulationMonitor:
|
|
||||||
return await init_population((INIT_POPULATION_ID, INIT_CONSTRAINTS, INIT_OP_MODE, INIT_SELECTION_ALGO))
|
|
||||||
|
|||||||
@@ -1,111 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import os
|
|
||||||
from typing import Any, Dict, List, Tuple, Optional
|
|
||||||
|
|
||||||
from mathema.core import morphology
|
|
||||||
from mathema.genotype.neo4j.genotype import construct, print_genotype
|
|
||||||
from mathema.core.exoself import Exoself
|
|
||||||
|
|
||||||
|
|
||||||
class Trainer:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
morphology_spec=morphology,
|
|
||||||
hidden_layer_densities: List[int] = None,
|
|
||||||
*,
|
|
||||||
max_attempts: int = 5,
|
|
||||||
eval_limit: float = float("inf"),
|
|
||||||
fitness_target: float = float("inf"),
|
|
||||||
experimental_file: Optional[str] = "experimental.json",
|
|
||||||
best_file: Optional[str] = "best.json",
|
|
||||||
exoself_steps_per_eval: int = 0,
|
|
||||||
):
|
|
||||||
self.morphology_spec = morphology_spec
|
|
||||||
self.hds = hidden_layer_densities or []
|
|
||||||
self.max_attempts = max_attempts
|
|
||||||
self.eval_limit = eval_limit
|
|
||||||
self.fitness_target = fitness_target
|
|
||||||
self.experimental_file = experimental_file
|
|
||||||
self.best_file = best_file
|
|
||||||
self.exoself_steps_per_eval = exoself_steps_per_eval
|
|
||||||
|
|
||||||
self.best_fitness = float("-inf")
|
|
||||||
self.best_genotype: Optional[Dict[str, Any]] = None
|
|
||||||
|
|
||||||
self.eval_acc = 0
|
|
||||||
self.cycle_acc = 0
|
|
||||||
self.time_acc = 0.0
|
|
||||||
|
|
||||||
async def _run_one_attempt(self) -> Tuple[float, int, int, float]:
|
|
||||||
print("constructing genotype...")
|
|
||||||
geno = construct(
|
|
||||||
self.morphology_spec,
|
|
||||||
self.hds,
|
|
||||||
file_name=self.experimental_file, # <-- schreibt Startnetz nach experimental.json
|
|
||||||
add_bias=True
|
|
||||||
)
|
|
||||||
fitness, evals, cycles, elapsed = await self._evaluate_with_exoself(geno)
|
|
||||||
return fitness, evals, cycles, elapsed
|
|
||||||
|
|
||||||
async def _evaluate_with_exoself(self, genotype: Dict[str, Any]) -> Tuple[float, int, int, float]:
|
|
||||||
print("creating exoself...")
|
|
||||||
ex = Exoself(genotype, file_name=self.experimental_file)
|
|
||||||
best_fitness, evals, cycles, elapsed = await ex.train_until_stop()
|
|
||||||
return best_fitness, evals, cycles, elapsed
|
|
||||||
|
|
||||||
async def go(self):
|
|
||||||
attempt = 1
|
|
||||||
while True:
|
|
||||||
print(".........")
|
|
||||||
print("current attempt: ", attempt)
|
|
||||||
print(".........")
|
|
||||||
|
|
||||||
if attempt > self.max_attempts or self.eval_acc >= self.eval_limit or self.best_fitness >= self.fitness_target:
|
|
||||||
# Abschlussausgabe wie im Buch
|
|
||||||
if self.best_file and os.path.exists(self.best_file):
|
|
||||||
print_genotype(self.best_file)
|
|
||||||
print(
|
|
||||||
f" Morphology: {getattr(self.morphology_spec, '__name__', str(self.morphology_spec))} | "
|
|
||||||
f"Best Fitness: {self.best_fitness} | EvalAcc: {self.eval_acc}"
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"best_fitness": self.best_fitness,
|
|
||||||
"eval_acc": self.eval_acc,
|
|
||||||
"cycle_acc": self.cycle_acc,
|
|
||||||
"time_acc": self.time_acc,
|
|
||||||
"best_file": self.best_file,
|
|
||||||
}
|
|
||||||
|
|
||||||
print("RUN ONE ATTEMPT!")
|
|
||||||
fitness, evals, cycles, elapsed = await self._run_one_attempt()
|
|
||||||
|
|
||||||
print("update akkus...")
|
|
||||||
|
|
||||||
self.eval_acc += evals
|
|
||||||
self.cycle_acc += cycles
|
|
||||||
self.time_acc += elapsed
|
|
||||||
|
|
||||||
# Besser als bisher?
|
|
||||||
if fitness > self.best_fitness:
|
|
||||||
self.best_fitness = fitness
|
|
||||||
if self.best_file and self.experimental_file and os.path.exists(self.experimental_file):
|
|
||||||
os.replace(self.experimental_file, self.best_file)
|
|
||||||
attempt = 1
|
|
||||||
else:
|
|
||||||
attempt += 1
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
trainer = Trainer(
|
|
||||||
morphology_spec=morphology,
|
|
||||||
hidden_layer_densities=[2],
|
|
||||||
max_attempts=200,
|
|
||||||
eval_limit=float("inf"),
|
|
||||||
fitness_target=99.9,
|
|
||||||
experimental_file="experimental.json",
|
|
||||||
best_file="best.json",
|
|
||||||
exoself_steps_per_eval=0,
|
|
||||||
)
|
|
||||||
|
|
||||||
asyncio.run(trainer.go())
|
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
import math
|
import math
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import logging
|
import logging
|
||||||
|
import pygame
|
||||||
|
|
||||||
import Box2D
|
import Box2D
|
||||||
from Box2D import (b2FixtureDef, b2PolygonShape, b2ContactListener)
|
from Box2D import (b2FixtureDef, b2PolygonShape, b2ContactListener)
|
||||||
@@ -20,8 +21,6 @@ except Exception:
|
|||||||
|
|
||||||
from gymnasium.envs.box2d.car_dynamics import Car
|
from gymnasium.envs.box2d.car_dynamics import Car
|
||||||
|
|
||||||
import pygame
|
|
||||||
|
|
||||||
DEBUG_DRAWING = False
|
DEBUG_DRAWING = False
|
||||||
LOOK_AHEAD = 10
|
LOOK_AHEAD = 10
|
||||||
|
|
||||||
@@ -98,9 +97,9 @@ class MyState:
|
|||||||
|
|
||||||
|
|
||||||
class FrictionDetector(b2ContactListener):
|
class FrictionDetector(b2ContactListener):
|
||||||
def __init__(self, env):
|
def __init__(self, car_env):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.env = env
|
self.env = car_env
|
||||||
|
|
||||||
def BeginContact(self, contact):
|
def BeginContact(self, contact):
|
||||||
self._contact(contact, True)
|
self._contact(contact, True)
|
||||||
@@ -142,6 +141,17 @@ class FrictionDetector(b2ContactListener):
|
|||||||
self.env.on_road = len(obj.tiles) > 0
|
self.env.on_road = len(obj.tiles) > 0
|
||||||
|
|
||||||
|
|
||||||
|
def _world_to_screen(x, y, zoom, angle, scroll_x, scroll_y):
|
||||||
|
ca, sa = math.cos(angle), math.sin(angle)
|
||||||
|
|
||||||
|
rx = (x - scroll_x) * ca + (y - scroll_y) * sa
|
||||||
|
ry = -(x - scroll_x) * sa + (y - scroll_y) * ca
|
||||||
|
|
||||||
|
sx = int(WINDOW_W / 2 + rx * zoom)
|
||||||
|
sy = int(WINDOW_H / 4 + ry * zoom)
|
||||||
|
return sx, sy
|
||||||
|
|
||||||
|
|
||||||
class CarRacing:
|
class CarRacing:
|
||||||
metadata = {
|
metadata = {
|
||||||
"render_modes": ["human", "rgb_array", None],
|
"render_modes": ["human", "rgb_array", None],
|
||||||
@@ -150,6 +160,7 @@ class CarRacing:
|
|||||||
|
|
||||||
def __init__(self, seed_value: int = 5, render_mode: str | None = "human"):
|
def __init__(self, seed_value: int = 5, render_mode: str | None = "human"):
|
||||||
|
|
||||||
|
self.road_poly = None
|
||||||
self.offroad_frames = None
|
self.offroad_frames = None
|
||||||
if seeding is not None:
|
if seeding is not None:
|
||||||
self.np_random, _ = seeding.np_random(seed_value)
|
self.np_random, _ = seeding.np_random(seed_value)
|
||||||
@@ -216,9 +227,9 @@ class CarRacing:
|
|||||||
if self._pg is None:
|
if self._pg is None:
|
||||||
self._pg = self._PygameCtx()
|
self._pg = self._PygameCtx()
|
||||||
if not self._pg.initialized:
|
if not self._pg.initialized:
|
||||||
|
import pygame
|
||||||
if not pygame.get_init():
|
if not pygame.get_init():
|
||||||
pygame.init()
|
pygame.init()
|
||||||
flags = 0
|
|
||||||
if self.render_mode == "human":
|
if self.render_mode == "human":
|
||||||
self._pg.screen = pygame.display.set_mode((WINDOW_W, WINDOW_H))
|
self._pg.screen = pygame.display.set_mode((WINDOW_W, WINDOW_H))
|
||||||
else:
|
else:
|
||||||
@@ -232,23 +243,13 @@ class CarRacing:
|
|||||||
self._pg.font = None
|
self._pg.font = None
|
||||||
self._pg.initialized = True
|
self._pg.initialized = True
|
||||||
|
|
||||||
def _world_to_screen(self, x, y, zoom, angle, scroll_x, scroll_y):
|
|
||||||
ca, sa = math.cos(angle), math.sin(angle)
|
|
||||||
|
|
||||||
rx = (x - scroll_x) * ca + (y - scroll_y) * sa
|
|
||||||
ry = -(x - scroll_x) * sa + (y - scroll_y) * ca
|
|
||||||
|
|
||||||
sx = int(WINDOW_W / 2 + rx * zoom)
|
|
||||||
sy = int(WINDOW_H / 4 + ry * zoom)
|
|
||||||
return sx, sy
|
|
||||||
|
|
||||||
def get_feature_vector(self, lookahead: int = LOOK_AHEAD) -> list[float]:
|
def get_feature_vector(self, lookahead: int = LOOK_AHEAD) -> list[float]:
|
||||||
my_s: MyState = self.my_state
|
my_s: MyState = self.my_state
|
||||||
vec = my_s.as_feature_vector(lookahead).tolist()
|
vec = my_s.as_feature_vector(lookahead).tolist()
|
||||||
return vec
|
return vec
|
||||||
|
|
||||||
def _draw_polygon_world(self, poly, color, zoom, angle, scroll_x, scroll_y):
|
def _draw_polygon_world(self, poly, color, zoom, angle, scroll_x, scroll_y):
|
||||||
pts = [self._world_to_screen(px, py, zoom, angle, scroll_x, scroll_y) for (px, py) in poly]
|
pts = [_world_to_screen(px, py, zoom, angle, scroll_x, scroll_y) for (px, py) in poly]
|
||||||
pygame.draw.polygon(self._pg.screen, f2c(color), pts)
|
pygame.draw.polygon(self._pg.screen, f2c(color), pts)
|
||||||
|
|
||||||
def _draw_body(self, body, color=(0.7, 0.7, 0.7), zoom=1.0, angle=0.0, scroll_x=0.0, scroll_y=0.0):
|
def _draw_body(self, body, color=(0.7, 0.7, 0.7), zoom=1.0, angle=0.0, scroll_x=0.0, scroll_y=0.0):
|
||||||
@@ -258,7 +259,7 @@ class CarRacing:
|
|||||||
shape = fixture.shape
|
shape = fixture.shape
|
||||||
if isinstance(shape, b2PolygonShape):
|
if isinstance(shape, b2PolygonShape):
|
||||||
verts = [body.transform * v for v in shape.vertices]
|
verts = [body.transform * v for v in shape.vertices]
|
||||||
pts = [self._world_to_screen(v[0], v[1], zoom, angle, scroll_x, scroll_y) for v in verts]
|
pts = [_world_to_screen(v[0], v[1], zoom, angle, scroll_x, scroll_y) for v in verts]
|
||||||
pygame.draw.polygon(self._pg.screen, col, pts, width=0)
|
pygame.draw.polygon(self._pg.screen, col, pts, width=0)
|
||||||
|
|
||||||
def _destroy(self):
|
def _destroy(self):
|
||||||
@@ -432,8 +433,8 @@ class CarRacing:
|
|||||||
self.track = track
|
self.track = track
|
||||||
|
|
||||||
self.original_road_poly = [((list(poly)), list(color)) for (poly, color) in self.road_poly]
|
self.original_road_poly = [((list(poly)), list(color)) for (poly, color) in self.road_poly]
|
||||||
self.ctrl_pts = np.array(list(map(lambda x: x[2:], self.track)))
|
self.ctrl_pts = np.array(list(map(lambda x_coord: x_coord[2:], self.track)))
|
||||||
self.angles = np.array(list(map(lambda x: x[1], self.track)))
|
self.angles = np.array(list(map(lambda x_coord: x_coord[1], self.track)))
|
||||||
self.outward_vectors = [np.array([np.cos(theta), np.sin(theta)]) for theta in self.angles]
|
self.outward_vectors = [np.array([np.cos(theta), np.sin(theta)]) for theta in self.angles]
|
||||||
angle_deltas = self.angles - np.roll(self.angles, 1)
|
angle_deltas = self.angles - np.roll(self.angles, 1)
|
||||||
self.angle_deltas = np.array(list(map(standardize_angle, angle_deltas)))
|
self.angle_deltas = np.array(list(map(standardize_angle, angle_deltas)))
|
||||||
@@ -468,7 +469,7 @@ class CarRacing:
|
|||||||
self._no_progress_steps = 0
|
self._no_progress_steps = 0
|
||||||
self._stall_steps = 0
|
self._stall_steps = 0
|
||||||
|
|
||||||
def reset(self, *, seed: int | None = None, options: dict | None = None):
|
def reset(self, *, seed: int | None = None):
|
||||||
if seed is not None:
|
if seed is not None:
|
||||||
|
|
||||||
if seeding is not None:
|
if seeding is not None:
|
||||||
@@ -476,8 +477,9 @@ class CarRacing:
|
|||||||
else:
|
else:
|
||||||
self.np_random = np.random.RandomState(seed)
|
self.np_random = np.random.RandomState(seed)
|
||||||
self._build_new_episode()
|
self._build_new_episode()
|
||||||
obs = self._get_observation()
|
obs, _, _, _, info = self.step(
|
||||||
info = {}
|
np.array([0.0, 0.0, 0.0], dtype=np.float32)
|
||||||
|
)
|
||||||
return obs, info
|
return obs, info
|
||||||
|
|
||||||
def fast_reset(self):
|
def fast_reset(self):
|
||||||
@@ -509,12 +511,12 @@ class CarRacing:
|
|||||||
|
|
||||||
return self.step(np.array([0.0, 0.0, 0.0], dtype=np.float32))
|
return self.step(np.array([0.0, 0.0, 0.0], dtype=np.float32))
|
||||||
|
|
||||||
def step(self, action):
|
def step(self, env_action):
|
||||||
|
|
||||||
if action is not None:
|
if env_action is not None:
|
||||||
self.car.steer(-float(action[0]))
|
self.car.steer(-float(env_action[0]))
|
||||||
self.car.gas(float(action[1]))
|
self.car.gas(float(env_action[1]))
|
||||||
self.car.brake(float(action[2]))
|
self.car.brake(float(env_action[2]))
|
||||||
|
|
||||||
self.car.step(1.0 / FPS)
|
self.car.step(1.0 / FPS)
|
||||||
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
|
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
|
||||||
@@ -522,27 +524,27 @@ class CarRacing:
|
|||||||
|
|
||||||
self.steps += 1
|
self.steps += 1
|
||||||
|
|
||||||
terminated = False
|
env_terminated = False
|
||||||
truncated = False
|
env_truncated = False
|
||||||
|
|
||||||
if action is not None:
|
if env_action is not None:
|
||||||
|
|
||||||
self.reward -= 5.0 / FPS
|
self.reward -= 5.0 / FPS
|
||||||
|
|
||||||
if self.tile_visited_count == len(self.track):
|
if self.tile_visited_count == len(self.track):
|
||||||
terminated = True
|
env_terminated = True
|
||||||
|
|
||||||
x, y = self.car.hull.position
|
x, y = self.car.hull.position
|
||||||
if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD:
|
if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD:
|
||||||
self.reward -= 100.0
|
self.reward -= 100.0
|
||||||
terminated = True
|
env_terminated = True
|
||||||
|
|
||||||
if not self.on_road:
|
if not self.on_road:
|
||||||
self.offroad_frames += 1
|
self.offroad_frames += 1
|
||||||
self.reward -= self.offroad_penalty_per_frame / FPS
|
self.reward -= self.offroad_penalty_per_frame / FPS
|
||||||
if self.offroad_frames > self.offroad_grace_frames:
|
if self.offroad_frames > self.offroad_grace_frames:
|
||||||
self.reward -= 20.0
|
self.reward -= 20.0
|
||||||
terminated = True
|
env_terminated = True
|
||||||
else:
|
else:
|
||||||
self.offroad_frames = 0
|
self.offroad_frames = 0
|
||||||
|
|
||||||
@@ -552,7 +554,7 @@ class CarRacing:
|
|||||||
else:
|
else:
|
||||||
self._no_progress_steps += 1
|
self._no_progress_steps += 1
|
||||||
if self._no_progress_steps >= NO_PROGRESS_STEPS:
|
if self._no_progress_steps >= NO_PROGRESS_STEPS:
|
||||||
truncated = True
|
env_truncated = True
|
||||||
|
|
||||||
step_reward = self.reward - self.prev_reward
|
step_reward = self.reward - self.prev_reward
|
||||||
self.prev_reward = self.reward
|
self.prev_reward = self.reward
|
||||||
@@ -605,11 +607,10 @@ class CarRacing:
|
|||||||
|
|
||||||
obs = self._get_observation()
|
obs = self._get_observation()
|
||||||
info = {"features": self.my_state}
|
info = {"features": self.my_state}
|
||||||
return obs, step_reward, terminated, truncated, info
|
return obs, step_reward, env_terminated, env_truncated, info
|
||||||
|
|
||||||
def _get_observation(self):
|
def _get_observation(self):
|
||||||
|
return np.array(self.get_feature_vector(), dtype=np.float32)
|
||||||
return None
|
|
||||||
|
|
||||||
def render(self):
|
def render(self):
|
||||||
self._ensure_pygame()
|
self._ensure_pygame()
|
||||||
@@ -636,10 +637,10 @@ class CarRacing:
|
|||||||
for y in range(-20, 20, 2):
|
for y in range(-20, 20, 2):
|
||||||
x0, y0 = k * x + 0, k * y + 0
|
x0, y0 = k * x + 0, k * y + 0
|
||||||
x1, y1 = k * x + k, k * y + k
|
x1, y1 = k * x + k, k * y + k
|
||||||
p0 = self._world_to_screen(x0, y0, zoom, angle, scroll_x, scroll_y)
|
p0 = _world_to_screen(x0, y0, zoom, angle, scroll_x, scroll_y)
|
||||||
p1 = self._world_to_screen(x1, y0, zoom, angle, scroll_x, scroll_y)
|
p1 = _world_to_screen(x1, y0, zoom, angle, scroll_x, scroll_y)
|
||||||
p2 = self._world_to_screen(x1, y1, zoom, angle, scroll_x, scroll_y)
|
p2 = _world_to_screen(x1, y1, zoom, angle, scroll_x, scroll_y)
|
||||||
p3 = self._world_to_screen(x0, y1, zoom, angle, scroll_x, scroll_y)
|
p3 = _world_to_screen(x0, y1, zoom, angle, scroll_x, scroll_y)
|
||||||
pygame.draw.polygon(self._pg.screen, grid_color, [p0, p1, p2, p3])
|
pygame.draw.polygon(self._pg.screen, grid_color, [p0, p1, p2, p3])
|
||||||
|
|
||||||
for poly, color in self.road_poly:
|
for poly, color in self.road_poly:
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ import logging
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
from mathema.core.population_monitor import init_population
|
from mathema.core.population_monitor import init_population
|
||||||
|
from mathema.genotype.neo4j.genotype import neo4j
|
||||||
from mathema.utils.logging_config import setup_logging
|
from mathema.utils.logging_config import setup_logging
|
||||||
|
|
||||||
setup_logging()
|
setup_logging()
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
N_RUNS = 10
|
N_RUNS = 20
|
||||||
|
|
||||||
|
|
||||||
async def run_single_car_experiment(run_idx: int):
|
async def run_single_car_experiment(run_idx: int):
|
||||||
@@ -23,12 +24,32 @@ async def run_single_car_experiment(run_idx: int):
|
|||||||
"competition",
|
"competition",
|
||||||
))
|
))
|
||||||
|
|
||||||
# 👉 warten, bis der Monitor sich selbst beendet
|
try:
|
||||||
await monitor._stopped_evt.wait()
|
# ⏱️ max. 35 Minuten warten (30 min Training + Puffer)
|
||||||
|
await asyncio.wait_for(
|
||||||
|
monitor._stopped_evt.wait(),
|
||||||
|
timeout=35 * 60
|
||||||
|
)
|
||||||
|
|
||||||
# optional: letzte Stats loggen
|
except asyncio.TimeoutError:
|
||||||
|
log.error(
|
||||||
|
f"[RUN {run_idx:02d}] TIMEOUT after 35min – forcing shutdown"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await monitor.stop("shutdown")
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(
|
||||||
|
f"[RUN {run_idx:02d}] failed to stop monitor cleanly: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Post-run logging ---
|
||||||
s = monitor.state
|
s = monitor.state
|
||||||
best = await monitor._best_fitness_in_population(s.population_id)
|
try:
|
||||||
|
best = await monitor._best_fitness_in_population(s.population_id)
|
||||||
|
except Exception:
|
||||||
|
best = float("nan")
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
f"=== END RUN {run_idx + 1}/{N_RUNS} "
|
f"=== END RUN {run_idx + 1}/{N_RUNS} "
|
||||||
f"gens={s.pop_gen} best_fitness={best:.6f} evals={s.eval_acc} ==="
|
f"gens={s.pop_gen} best_fitness={best:.6f} evals={s.eval_acc} ==="
|
||||||
@@ -37,11 +58,15 @@ async def run_single_car_experiment(run_idx: int):
|
|||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
try:
|
||||||
for i in range(N_RUNS):
|
for i in range(N_RUNS):
|
||||||
await run_single_car_experiment(i)
|
await run_single_car_experiment(i)
|
||||||
|
log.info("=== ALL RUNS FINISHED ===")
|
||||||
log.info("=== ALL RUNS FINISHED ===")
|
finally:
|
||||||
|
try:
|
||||||
|
await neo4j.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
|
|
||||||
from mathema.core.exoself import Exoself
|
|
||||||
from mathema.genotype.neo4j.genotype import load_genotype_snapshot
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
print("i am here!")
|
|
||||||
snapshot = await load_genotype_snapshot("08bf4d92d8c0438295399f8f2a8fef1a")
|
|
||||||
|
|
||||||
print("gathered snapshot")
|
|
||||||
print(snapshot)
|
|
||||||
|
|
||||||
print("------- build exoself ---------")
|
|
||||||
exo = Exoself(snapshot)
|
|
||||||
|
|
||||||
print("-------- building processes ---------")
|
|
||||||
exo.build_pid_map_and_spawn()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -10,7 +10,6 @@ import uuid
|
|||||||
from typing import Dict, Any, List
|
from typing import Dict, Any, List
|
||||||
|
|
||||||
from mathema.core.db import Neo4jDB
|
from mathema.core.db import Neo4jDB
|
||||||
from mathema.genotype.neo4j.genotype_mutator import GenotypeMutator
|
|
||||||
|
|
||||||
neo4j = Neo4jDB(
|
neo4j = Neo4jDB(
|
||||||
user="neo4j",
|
user="neo4j",
|
||||||
@@ -25,6 +24,17 @@ def now_unique():
|
|||||||
|
|
||||||
|
|
||||||
async def construct_agent(specie_id: any, agent_id: any, spec_con: Dict[str, Any]):
|
async def construct_agent(specie_id: any, agent_id: any, spec_con: Dict[str, Any]):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Constructs an agent with the provided parameters and stores it in the database.
|
||||||
|
|
||||||
|
:param specie_id: Identifier of the species the agent belongs to.
|
||||||
|
:param agent_id: Identifier of the agent being constructed.
|
||||||
|
:param spec_con: Dictionary containing specifications for the agent creation.
|
||||||
|
|
||||||
|
:return: Dictionary representing the constructed agent.
|
||||||
|
|
||||||
|
"""
|
||||||
random.seed(time.time())
|
random.seed(time.time())
|
||||||
generation = 0
|
generation = 0
|
||||||
cx_id = await construct_cortex(agent_id, generation, spec_con)
|
cx_id = await construct_cortex(agent_id, generation, spec_con)
|
||||||
@@ -89,7 +99,20 @@ async def construct_agent(specie_id: any, agent_id: any, spec_con: Dict[str, Any
|
|||||||
return agent
|
return agent
|
||||||
|
|
||||||
|
|
||||||
async def construct_cortex(agent_id, generation: any, spec_con: Dict[str, Any]):
|
async def construct_cortex(_, generation: any, spec_con: Dict[str, Any]):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Async method to construct a cortex with given generation and specification configuration.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
_ : Ignored parameter
|
||||||
|
generation: any - The generation of the cortex
|
||||||
|
spec_con: Dict[str, Any] - The specification configuration for the cortex
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str - The unique identifier of the constructed cortex
|
||||||
|
|
||||||
|
"""
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
|
|
||||||
morphology_mod = import_module("mathema.core.morphology")
|
morphology_mod = import_module("mathema.core.morphology")
|
||||||
@@ -122,8 +145,9 @@ async def construct_cortex(agent_id, generation: any, spec_con: Dict[str, Any]):
|
|||||||
})
|
})
|
||||||
|
|
||||||
neuron_ids, neurons = await construct_initial_neurolayer(cx_uid, generation, spec_con, sensors, actuators)
|
neuron_ids, neurons = await construct_initial_neurolayer(cx_uid, generation, spec_con, sensors, actuators)
|
||||||
sensor_ids = [s["id"] for s in sensors]
|
|
||||||
actuator_ids = [a["id"] for a in actuators]
|
# sensor_ids = [s["id"] for s in sensors]
|
||||||
|
# actuator_ids = [a["id"] for a in actuators]
|
||||||
|
|
||||||
await write_cortex({"id": str(cx_uid)})
|
await write_cortex({"id": str(cx_uid)})
|
||||||
await write_neurons(neurons)
|
await write_neurons(neurons)
|
||||||
@@ -136,6 +160,24 @@ async def construct_cortex(agent_id, generation: any, spec_con: Dict[str, Any]):
|
|||||||
|
|
||||||
|
|
||||||
async def construct_initial_neurolayer(cx_id, generation, spec_con, sensors, actuators):
|
async def construct_initial_neurolayer(cx_id, generation, spec_con, sensors, actuators):
|
||||||
|
"""
|
||||||
|
Async function to construct initial neural layer.
|
||||||
|
The initial neuro layer is constructed as follows:
|
||||||
|
choose a random sensor with free capacity and connect it to
|
||||||
|
a given neuron. Choose a random actuator with free capacity and
|
||||||
|
connect the neuron to it.
|
||||||
|
|
||||||
|
Params:
|
||||||
|
- cx_id (str): The ID of the context.
|
||||||
|
- generation (int): The generation of the layer.
|
||||||
|
- spec_con (dict): The specification of the layer.
|
||||||
|
- sensors (list): List of sensors.
|
||||||
|
- actuators (list): List of actuators.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- neuron_ids (list): List of neuron IDs.
|
||||||
|
- neurons (list): List of constructed neurons.
|
||||||
|
"""
|
||||||
neuron_ids = []
|
neuron_ids = []
|
||||||
neurons = []
|
neurons = []
|
||||||
for actuator in actuators:
|
for actuator in actuators:
|
||||||
@@ -168,7 +210,14 @@ async def construct_initial_neurolayer(cx_id, generation, spec_con, sensors, act
|
|||||||
|
|
||||||
async def link_units(sensors, neurons, actuators, cortex):
|
async def link_units(sensors, neurons, actuators, cortex):
|
||||||
"""
|
"""
|
||||||
link all units of the network with correct weights
|
Link units in the brain model with sensors, neurons, actuators, and the cortex.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- sensors (List[dict]): List of sensor data dictionaries.
|
||||||
|
- neurons (List[dict]): List of neuron data dictionaries.
|
||||||
|
- actuators (List[dict]): List of actuator data dictionaries.
|
||||||
|
- cortex (dict): Cortex data dictionary.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
s2n_rows = []
|
s2n_rows = []
|
||||||
for n in neurons:
|
for n in neurons:
|
||||||
@@ -235,13 +284,26 @@ async def link_units(sensors, neurons, actuators, cortex):
|
|||||||
MERGE (cx)-[:HAS]->(a)
|
MERGE (cx)-[:HAS]->(a)
|
||||||
""", rows=[{"id": str(a["id"])} for a in actuators], cx_id=str(cortex["id"]))
|
""", rows=[{"id": str(a["id"])} for a in actuators], cx_id=str(cortex["id"]))
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
async def construct_neuron(_, generation, spec_con, n_id, input_specs, output_specs, output_ids, layer_index):
|
||||||
async def construct_neuron(cx_id, generation, spec_con, n_id, input_specs, output_specs, output_ids, layer_index):
|
|
||||||
"""
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
This method constructs a neuron object with the given parameters.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- _: placeholder for the global activation function shared between neurons
|
||||||
|
- generation: the current generation of the neuron
|
||||||
|
- spec_con: dictionary containing configuration specifications
|
||||||
|
- n_id: unique identifier for the neuron
|
||||||
|
- input_specs: list of input specifications for the neuron
|
||||||
|
- output_specs: list of output specifications for the neuron
|
||||||
|
- output_ids: list of unique identifiers for the neuron's outputs
|
||||||
|
- layer_index: index of the layer where the neuron is located
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- neuron: dictionary representing the constructed neuron with the provided parameters
|
||||||
|
|
||||||
|
"""
|
||||||
bias = None
|
bias = None
|
||||||
|
|
||||||
neuron = {
|
neuron = {
|
||||||
@@ -283,7 +345,14 @@ async def write_actuators(actuators):
|
|||||||
|
|
||||||
async def write_neuron(neuron):
|
async def write_neuron(neuron):
|
||||||
"""
|
"""
|
||||||
write neuron to database
|
|
||||||
|
Write data of a neuron to the Neo4j database.
|
||||||
|
|
||||||
|
:param neuron: Dictionary representing the neuron data to be written.
|
||||||
|
:type neuron: dict
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
await neo4j.run_consume("""
|
await neo4j.run_consume("""
|
||||||
MERGE (n:neuron {id: $id})
|
MERGE (n:neuron {id: $id})
|
||||||
@@ -302,7 +371,10 @@ async def write_neuron(neuron):
|
|||||||
|
|
||||||
async def write_sensor(sensor):
|
async def write_sensor(sensor):
|
||||||
"""
|
"""
|
||||||
write sensor to database
|
Method to write sensor data to Neo4j.
|
||||||
|
|
||||||
|
:param sensor: dictionary containing sensor data
|
||||||
|
:type sensor: dict
|
||||||
"""
|
"""
|
||||||
await neo4j.run_consume("""
|
await neo4j.run_consume("""
|
||||||
MERGE (s:sensor {id: $id})
|
MERGE (s:sensor {id: $id})
|
||||||
@@ -321,7 +393,16 @@ async def write_sensor(sensor):
|
|||||||
|
|
||||||
async def write_actuator(actuator):
|
async def write_actuator(actuator):
|
||||||
"""
|
"""
|
||||||
write actuator to database
|
Write actuator node in the graph database.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- actuator (dict): A dictionary containing information about the actuator.
|
||||||
|
It should have the following keys:
|
||||||
|
- id (str): The unique identifier of the actuator.
|
||||||
|
- name (str): The name of the actuator.
|
||||||
|
- scape (str): The scape of the actuator.
|
||||||
|
- vector_length (int): The length of the vector.
|
||||||
|
- generation (int): The generation information of the actuator.
|
||||||
"""
|
"""
|
||||||
await neo4j.run_consume("""
|
await neo4j.run_consume("""
|
||||||
MERGE (a:actuator {id: $id})
|
MERGE (a:actuator {id: $id})
|
||||||
@@ -340,9 +421,16 @@ async def write_actuator(actuator):
|
|||||||
|
|
||||||
async def compute_pattern(cx_id: str):
|
async def compute_pattern(cx_id: str):
|
||||||
"""
|
"""
|
||||||
Liefert (pattern_ids, pattern_counts):
|
|
||||||
- pattern_ids: [{ "layer_index": L, "neuron_ids": [..] }, ...] (IDs als Strings, stabil sortiert)
|
Compute pattern for a given cortex ID.
|
||||||
- pattern: [{ "layer_index": L, "count": K }, ...]
|
|
||||||
|
Arguments:
|
||||||
|
- cx_id (str): The ID of the cortex for which to compute the pattern.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- pattern_ids (list): A list of dictionaries containing the layer index and neuron IDs.
|
||||||
|
- pattern (list): A list of dictionaries containing the layer index and the count of neuron IDs in that layer.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
rows = await neo4j.read_all("""
|
rows = await neo4j.read_all("""
|
||||||
MATCH (cx:cortex {id:$cx_id})-[:HAS]->(n:neuron)
|
MATCH (cx:cortex {id:$cx_id})-[:HAS]->(n:neuron)
|
||||||
@@ -362,8 +450,18 @@ async def compute_pattern(cx_id: str):
|
|||||||
|
|
||||||
async def compute_generalized_io(cx_id: str):
|
async def compute_generalized_io(cx_id: str):
|
||||||
"""
|
"""
|
||||||
Liefert zwei Listen (generalized_sensors, generalized_actuators) ohne IDs/Cortex-Bezug.
|
|
||||||
Je Element: nur (name, scape, vector_length). Stabil sortiert.
|
Async method to compute the generalized input and output for a given cortex ID.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
cx_id (str): ID of the cortex.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[List[Dict[str, Union[str, int]]], List[Dict[str, Union[str, int]]]]:
|
||||||
|
A tuple containing a list of dictionaries representing the normalized sensor data
|
||||||
|
and a list of dictionaries representing the normalized actuator data. Each dictionary
|
||||||
|
contains keys 'name' (str), 'scape' (str), and 'vector_length' (int).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
s_rows = await neo4j.read_all("""
|
s_rows = await neo4j.read_all("""
|
||||||
MATCH (cx:cortex {id:$cx_id})-[:HAS]->(s:sensor)
|
MATCH (cx:cortex {id:$cx_id})-[:HAS]->(s:sensor)
|
||||||
@@ -384,7 +482,7 @@ async def compute_generalized_io(cx_id: str):
|
|||||||
return normalize(s_rows), normalize(a_rows)
|
return normalize(s_rows), normalize(a_rows)
|
||||||
|
|
||||||
|
|
||||||
async def compute_generalized_evo_hist(agent_id: str):
|
async def compute_generalized_evo_hist(_):
|
||||||
"""
|
"""
|
||||||
place holder until mutator works
|
place holder until mutator works
|
||||||
"""
|
"""
|
||||||
@@ -392,6 +490,15 @@ async def compute_generalized_evo_hist(agent_id: str):
|
|||||||
|
|
||||||
|
|
||||||
async def update_fingerprint(agent_id: str):
|
async def update_fingerprint(agent_id: str):
|
||||||
|
"""
|
||||||
|
Update fingerprint data for a given agent in the Neo4j database.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
agent_id (str): The unique identifier for the agent.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
rows = await neo4j.read_all("""
|
rows = await neo4j.read_all("""
|
||||||
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
RETURN cx.id AS cx_id
|
RETURN cx.id AS cx_id
|
||||||
@@ -439,15 +546,21 @@ async def update_fingerprint(agent_id: str):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def speciate(self, agent_id):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
async def clone_agent(agent_id: Any, clone_agent_id: Any) -> Any:
|
async def clone_agent(agent_id: Any, clone_agent_id: Any) -> Any:
|
||||||
"""
|
"""
|
||||||
Klont den kompletten Genotyp (Cortex, Sensoren, Neuronen, Aktuatoren + Kanten)
|
|
||||||
eines Agents unter neuer Agent-ID `clone_agent_id`.
|
Async method to clone an existing agent in Neo4j database.
|
||||||
Gibt die Clone-Agent-ID zurück.
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id: Identifier of the existing agent to be cloned.
|
||||||
|
- clone_agent_id: Identifier of the cloned agent.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- None
|
||||||
|
|
||||||
|
This method clones the specified agent along with its associated data in the Neo4j database.
|
||||||
|
If the specified agent is not found, a ValueError will be raised.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
aid = str(agent_id)
|
aid = str(agent_id)
|
||||||
cid = str(clone_agent_id)
|
cid = str(clone_agent_id)
|
||||||
@@ -476,19 +589,21 @@ async def clone_agent(agent_id: Any, clone_agent_id: Any) -> Any:
|
|||||||
pattern_json = arow.get("pattern_json") or "[]"
|
pattern_json = arow.get("pattern_json") or "[]"
|
||||||
pattern_layers = arow.get("pattern_layers") or []
|
pattern_layers = arow.get("pattern_layers") or []
|
||||||
pattern_counts = arow.get("pattern_counts") or []
|
pattern_counts = arow.get("pattern_counts") or []
|
||||||
evo_hist = arow.get("evo_hist") or []
|
evo_hist = [json.dumps({"op": "clone_from", "parent": aid}, separators=(",", ":"))]
|
||||||
population_id = arow.get("population_id")
|
population_id = arow.get("population_id")
|
||||||
fitness = arow.get("fitness")
|
fitness = arow.get("fitness")
|
||||||
src_cx = str(arow["cxid"])
|
src_cx = str(arow["cxid"])
|
||||||
|
|
||||||
srows = await neo4j.read_all("""
|
srows = await neo4j.read_all("""
|
||||||
MATCH (:cortex {id:$cx})-[:HAS]->(s:sensor)
|
MATCH (:cortex {id:$cx})-[:HAS]->(s:sensor)
|
||||||
RETURN s.id AS id, s.name AS name, s.scape AS scape, toInteger(s.vector_length) AS vl, toInteger(s.generation) AS gen
|
RETURN s.id AS id, s.name AS name, s.scape AS scape, toInteger(s.vector_length) AS vl,
|
||||||
|
toInteger(s.generation) AS gen
|
||||||
""", cx=src_cx)
|
""", cx=src_cx)
|
||||||
|
|
||||||
arows2 = await neo4j.read_all("""
|
arows2 = await neo4j.read_all("""
|
||||||
MATCH (:cortex {id:$cx})-[:HAS]->(a:actuator)
|
MATCH (:cortex {id:$cx})-[:HAS]->(a:actuator)
|
||||||
RETURN a.id AS id, a.name AS name, a.scape AS scape, toInteger(a.vector_length) AS vl, toInteger(a.generation) AS gen
|
RETURN a.id AS id, a.name AS name, a.scape AS scape, toInteger(a.vector_length) AS vl,
|
||||||
|
toInteger(a.generation) AS gen
|
||||||
""", cx=src_cx)
|
""", cx=src_cx)
|
||||||
|
|
||||||
nrows = await neo4j.read_all("""
|
nrows = await neo4j.read_all("""
|
||||||
@@ -510,6 +625,11 @@ async def clone_agent(agent_id: Any, clone_agent_id: Any) -> Any:
|
|||||||
RETURN n.id AS nid, a.id AS aid, r.weights AS weights
|
RETURN n.id AS nid, a.id AS aid, r.weights AS weights
|
||||||
""", cx=src_cx)
|
""", cx=src_cx)
|
||||||
|
|
||||||
|
n2n = await neo4j.read_all("""
|
||||||
|
MATCH (:cortex {id:$cx})-[:HAS]->(src:neuron)-[r:FORWARD]->(dst:neuron)<-[:HAS]-(:cortex {id:$cx})
|
||||||
|
RETURN src.id AS sid, dst.id AS did, r.weights AS weights, coalesce(r.recurrent,false) AS recurrent
|
||||||
|
""", cx=src_cx)
|
||||||
|
|
||||||
def new_id() -> str:
|
def new_id() -> str:
|
||||||
return uuid.uuid4().hex
|
return uuid.uuid4().hex
|
||||||
|
|
||||||
@@ -625,6 +745,20 @@ async def clone_agent(agent_id: Any, clone_agent_id: Any) -> Any:
|
|||||||
"weights": [float(x) for x in (r.get("weights") or [])],
|
"weights": [float(x) for x in (r.get("weights") or [])],
|
||||||
} for r in n2a])
|
} for r in n2a])
|
||||||
|
|
||||||
|
if n2n:
|
||||||
|
await neo4j.run_consume("""
|
||||||
|
UNWIND $rows AS row
|
||||||
|
MATCH (src:neuron {id: row.from_id}), (dst:neuron {id: row.to_id})
|
||||||
|
MERGE (src)-[r:FORWARD]->(dst)
|
||||||
|
SET r.weights = [x IN row.weights | toFloat(x)],
|
||||||
|
r.recurrent = row.recurrent
|
||||||
|
""", rows=[{
|
||||||
|
"from_id": id_map[str(r["sid"])],
|
||||||
|
"to_id": id_map[str(r["did"])],
|
||||||
|
"weights": [float(x) for x in (r.get("weights") or [])],
|
||||||
|
"recurrent": bool(r.get("recurrent", False)),
|
||||||
|
} for r in n2n])
|
||||||
|
|
||||||
await neo4j.run_consume("""
|
await neo4j.run_consume("""
|
||||||
MERGE (a:agent {id:$aid})
|
MERGE (a:agent {id:$aid})
|
||||||
SET a.generation = toInteger($generation),
|
SET a.generation = toInteger($generation),
|
||||||
@@ -661,6 +795,14 @@ async def clone_agent(agent_id: Any, clone_agent_id: Any) -> Any:
|
|||||||
|
|
||||||
|
|
||||||
async def _get_cortex_id_of_agent(agent_id: str) -> str:
|
async def _get_cortex_id_of_agent(agent_id: str) -> str:
|
||||||
|
"""
|
||||||
|
|
||||||
|
This method retrieves the cortex ID of a given agent ID.
|
||||||
|
|
||||||
|
:param agent_id: A string representing the ID of the agent.
|
||||||
|
:return: A string representing the cortex ID of the agent.
|
||||||
|
|
||||||
|
"""
|
||||||
rows = await neo4j.read_all(
|
rows = await neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -674,6 +816,15 @@ async def _get_cortex_id_of_agent(agent_id: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
async def _list_ids_under_cortex(cx_id: str):
|
async def _list_ids_under_cortex(cx_id: str):
|
||||||
|
"""
|
||||||
|
List all the IDs under the given cortex ID.
|
||||||
|
|
||||||
|
:param cx_id: The ID of the cortex.
|
||||||
|
:type cx_id: str
|
||||||
|
|
||||||
|
:returns: A tuple containing three lists, each list is a collection of IDs for sensors, neurons, and actuators under the given cortex ID respectively.
|
||||||
|
:rtype: tuple
|
||||||
|
"""
|
||||||
s_rows = await neo4j.read_all(
|
s_rows = await neo4j.read_all(
|
||||||
"MATCH (cx:cortex {id:$cx})-[:HAS]->(s:sensor) RETURN s.id AS id ORDER BY id",
|
"MATCH (cx:cortex {id:$cx})-[:HAS]->(s:sensor) RETURN s.id AS id ORDER BY id",
|
||||||
cx=str(cx_id),
|
cx=str(cx_id),
|
||||||
@@ -690,6 +841,12 @@ async def _list_ids_under_cortex(cx_id: str):
|
|||||||
|
|
||||||
|
|
||||||
async def _count_n2a(aid: str) -> int:
|
async def _count_n2a(aid: str) -> int:
|
||||||
|
"""
|
||||||
|
Count the number of FORWARD relationships from a neuron to an actuator with the specified ID.
|
||||||
|
|
||||||
|
:param aid: The ID of the actuator.
|
||||||
|
:return: The count of FORWARD relationships (int).
|
||||||
|
"""
|
||||||
rows = await neo4j.read_all("""
|
rows = await neo4j.read_all("""
|
||||||
MATCH (:neuron)-[r:FORWARD]->(a:actuator {id:$aid})
|
MATCH (:neuron)-[r:FORWARD]->(a:actuator {id:$aid})
|
||||||
RETURN count(r) AS k
|
RETURN count(r) AS k
|
||||||
@@ -698,6 +855,15 @@ async def _count_n2a(aid: str) -> int:
|
|||||||
|
|
||||||
|
|
||||||
async def _get_one_n2a_link(aid: str):
|
async def _get_one_n2a_link(aid: str):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Async method to get one Neuron to Actuator link based on the provided Actuator ID.
|
||||||
|
|
||||||
|
:param aid: str - The ID of the actuator for which the neuron to actuator link is to be retrieved.
|
||||||
|
|
||||||
|
:return: The neuron ID associated with the actuator ID provided. Returns None if no link is found.
|
||||||
|
|
||||||
|
"""
|
||||||
rows = await neo4j.read_all(
|
rows = await neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (n:neuron)-[:FORWARD]->(a:actuator {id:$aid})
|
MATCH (n:neuron)-[:FORWARD]->(a:actuator {id:$aid})
|
||||||
@@ -710,153 +876,6 @@ async def _get_one_n2a_link(aid: str):
|
|||||||
return rows[0]["nid"] if rows else None
|
return rows[0]["nid"] if rows else None
|
||||||
|
|
||||||
|
|
||||||
async def test():
|
|
||||||
specie_id = "test"
|
|
||||||
agent_id = "test"
|
|
||||||
clone_agent_id = "test_clone"
|
|
||||||
spec_con = {"morphology": "xor_mimic", "neural_afs": ["tanh", "cos", "gauss", "abs"]}
|
|
||||||
|
|
||||||
await construct_agent(specie_id, agent_id, spec_con)
|
|
||||||
await clone_agent(agent_id, clone_agent_id)
|
|
||||||
|
|
||||||
await neo4j.close()
|
|
||||||
|
|
||||||
|
|
||||||
async def test_mut_operators():
|
|
||||||
specie_id = "test"
|
|
||||||
agent_id = "test"
|
|
||||||
clone_agent_id = "test_clone"
|
|
||||||
spec_con = {"morphology": "xor_mimic", "neural_afs": ["tanh", "cos", "gauss", "abs"]}
|
|
||||||
|
|
||||||
genotype_mutator = GenotypeMutator(neo4j)
|
|
||||||
|
|
||||||
print("[TEST] construct_agent")
|
|
||||||
await construct_agent(specie_id, agent_id, spec_con)
|
|
||||||
|
|
||||||
cx_id = await _get_cortex_id_of_agent(agent_id)
|
|
||||||
sensors, neurons, actuators = await _list_ids_under_cortex(cx_id)
|
|
||||||
print(f"[TEST] cortex={cx_id} | S={len(sensors)} N={len(neurons)} A={len(actuators)}")
|
|
||||||
|
|
||||||
print("[TEST] link neuron->neuron (self-loop)")
|
|
||||||
n0 = neurons[0]
|
|
||||||
await genotype_mutator.link_from_element_to_element(agent_id, n0, n0)
|
|
||||||
rows = await neo4j.read_all(
|
|
||||||
"""
|
|
||||||
MATCH (:neuron {id:$nid})-[r:FORWARD]->(:neuron {id:$nid})
|
|
||||||
RETURN count(r) AS k
|
|
||||||
""",
|
|
||||||
nid=str(n0),
|
|
||||||
)
|
|
||||||
assert int(rows[0]["k"]) == 1, "Expected self-loop to exist"
|
|
||||||
|
|
||||||
print("[TEST] link sensor->neuron (first non-duplicate)")
|
|
||||||
linked_ok = False
|
|
||||||
for sid in sensors:
|
|
||||||
for nid in neurons[::-1]:
|
|
||||||
try:
|
|
||||||
await genotype_mutator.link_from_element_to_element(agent_id, sid, nid)
|
|
||||||
linked_ok = True
|
|
||||||
chosen_s, chosen_n = sid, nid
|
|
||||||
break
|
|
||||||
except ValueError as e:
|
|
||||||
|
|
||||||
if "already exists" in str(e):
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
if linked_ok:
|
|
||||||
break
|
|
||||||
print(
|
|
||||||
f"[TEST] S->N linked: {linked_ok} ({chosen_s} -> {chosen_n})" if linked_ok else "[TEST] no new S->N link possible")
|
|
||||||
|
|
||||||
print("[TEST] link neuron->actuator (expect full, then free one slot)")
|
|
||||||
a0 = actuators[0]
|
|
||||||
n_try = neurons[-1]
|
|
||||||
try:
|
|
||||||
await genotype_mutator.link_from_element_to_element(agent_id, n_try, a0)
|
|
||||||
|
|
||||||
print("[TEST] N->A linked without freeing capacity (actuator had space)")
|
|
||||||
except ValueError as e:
|
|
||||||
if "fully connected" in str(e):
|
|
||||||
print("[TEST] actuator full as expected; cutting one existing N->A to free capacity")
|
|
||||||
victim_n = await _get_one_n2a_link(a0)
|
|
||||||
assert victim_n is not None, "No existing N->A to cut, unexpected"
|
|
||||||
await genotype_mutator.cut_link(victim_n, a0)
|
|
||||||
|
|
||||||
k_after = await _count_n2a(a0)
|
|
||||||
rows_vl = await neo4j.read_all(
|
|
||||||
"MATCH (a:actuator {id:$aid}) RETURN toInteger(a.vector_length) AS vl",
|
|
||||||
aid=str(a0),
|
|
||||||
)
|
|
||||||
vl = int(rows_vl[0]["vl"])
|
|
||||||
assert k_after < vl, "Capacity not freed as expected"
|
|
||||||
|
|
||||||
await genotype_mutator.link_from_element_to_element(agent_id, n_try, a0)
|
|
||||||
print(f"[TEST] N->A linked after freeing capacity ({n_try} -> {a0})")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
print("[TEST] mutate weights")
|
|
||||||
n_id = await genotype_mutator.mutate_weights(agent_id)
|
|
||||||
print(f"[TEST] mutated weights: {n_id}")
|
|
||||||
|
|
||||||
print("[TEST] get activation function from neuron")
|
|
||||||
print(await genotype_mutator.get_spec_neural_afs(agent_id))
|
|
||||||
print("------------------------------------------")
|
|
||||||
|
|
||||||
print("[TEST] cut neuron->neuron (self-loop)")
|
|
||||||
await genotype_mutator.cut_link(n0, n0)
|
|
||||||
rows = await neo4j.read_all(
|
|
||||||
"""
|
|
||||||
MATCH (:neuron {id:$nid})-[r:FORWARD]->(:neuron {id:$nid})
|
|
||||||
RETURN count(r) AS k
|
|
||||||
""",
|
|
||||||
nid=str(n0),
|
|
||||||
)
|
|
||||||
assert int(rows[0]["k"]) == 0, "Expected self-loop to be cut"
|
|
||||||
|
|
||||||
await update_fingerprint(agent_id)
|
|
||||||
|
|
||||||
print("[TEST] clone_agent")
|
|
||||||
await clone_agent(agent_id, clone_agent_id)
|
|
||||||
|
|
||||||
await neo4j.close()
|
|
||||||
print("[TEST] done.")
|
|
||||||
|
|
||||||
|
|
||||||
async def test_add_neuron():
|
|
||||||
specie_id = "test"
|
|
||||||
agent_id = "test"
|
|
||||||
clone_agent_id = "test_clone"
|
|
||||||
spec_con = {"morphology": "xor_mimic", "neural_afs": ["tanh", "cos", "gauss", "abs"]}
|
|
||||||
|
|
||||||
genotype_mutator = GenotypeMutator(neo4j)
|
|
||||||
|
|
||||||
print("[TEST] construct_agent")
|
|
||||||
await construct_agent(specie_id, agent_id, spec_con)
|
|
||||||
|
|
||||||
print("cloning agent")
|
|
||||||
await clone_agent(agent_id, clone_agent_id)
|
|
||||||
|
|
||||||
print("mutating cloned agent: adding neuron")
|
|
||||||
await genotype_mutator.outsplice(clone_agent_id)
|
|
||||||
|
|
||||||
print("mutating weights")
|
|
||||||
await genotype_mutator.mutate_weights(clone_agent_id)
|
|
||||||
|
|
||||||
print("add bias")
|
|
||||||
await genotype_mutator.add_bias(clone_agent_id)
|
|
||||||
|
|
||||||
print("add sensor")
|
|
||||||
await genotype_mutator.add_sensor(clone_agent_id)
|
|
||||||
|
|
||||||
await neo4j.close()
|
|
||||||
|
|
||||||
|
|
||||||
async def create_test():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def generate_ids(vector_length):
|
def generate_ids(vector_length):
|
||||||
return [uuid.uuid4().hex for _ in range(vector_length)]
|
return [uuid.uuid4().hex for _ in range(vector_length)]
|
||||||
|
|
||||||
@@ -876,8 +895,13 @@ def create_input_weights(input_specs):
|
|||||||
|
|
||||||
async def delete_agent(agent_id: Any):
|
async def delete_agent(agent_id: Any):
|
||||||
"""
|
"""
|
||||||
Löscht einen Agent und seinen gesamten Genotyp (Cortex, Neuronen, Sensoren, Aktuatoren + Kanten)
|
Delete agent from the graph database along with related cortex, neurons, sensors, and actuators.
|
||||||
anhand der Agent-ID.
|
|
||||||
|
Parameters:
|
||||||
|
agent_id (Any): The unique identifier of the agent to be deleted.
|
||||||
|
|
||||||
|
Return Type:
|
||||||
|
None
|
||||||
"""
|
"""
|
||||||
aid = str(agent_id)
|
aid = str(agent_id)
|
||||||
|
|
||||||
@@ -901,18 +925,24 @@ def _generate_neuron_af(afs: List[Any]):
|
|||||||
return random.choice(afs)
|
return random.choice(afs)
|
||||||
|
|
||||||
|
|
||||||
def _generalize_evo_hist(evo_hist):
|
def _generalize_evo_hist(_):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
async def print_agent(agent_id: Any):
|
async def print_agent(agent_id: Any):
|
||||||
"""
|
"""
|
||||||
Gibt den kompletten Genotyp eines Agenten formatiert aus:
|
|
||||||
- Agent-Props
|
Async method print_agent to print details of an agent based on the provided agent_id.
|
||||||
- Cortex
|
|
||||||
- Sensoren
|
Parameters:
|
||||||
- Neuronen
|
- agent_id: Any - the identifier of the agent to print details for
|
||||||
- Aktuatoren
|
|
||||||
|
The method executes various queries to retrieve information about the agent, its cortex, sensors, neurons,
|
||||||
|
actuators, as well as the connections between sensors/neurons and neurons/actuators.
|
||||||
|
It then prints the retrieved data in a formatted manner.
|
||||||
|
|
||||||
|
This method does not return a value, it directly prints the information to the console.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
aid = str(agent_id)
|
aid = str(agent_id)
|
||||||
|
|
||||||
@@ -987,17 +1017,15 @@ async def print_agent(agent_id: Any):
|
|||||||
|
|
||||||
async def load_genotype_snapshot(agent_id: str) -> Dict[str, Any]:
|
async def load_genotype_snapshot(agent_id: str) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Liefert eine JSON-ähnliche Struktur für das Exoself:
|
|
||||||
{
|
Method to load genotype snapshot for a given agent.
|
||||||
"cortex": {"id": ...},
|
|
||||||
"sensors": [{id, name, scape, vector_length}],
|
Parameters:
|
||||||
"actuators": [{id, name, scape, vector_length, fanin_ids}],
|
- agent_id (str): The identifier of the agent.
|
||||||
"neurons": [{
|
|
||||||
"id", "activation_function", "layer_index",
|
Returns:
|
||||||
"bias": float|None,
|
- Dict[str, Any]: A dictionary containing the genotype snapshot information for the agent.
|
||||||
"input_weights": [{ "input_id": str, "weights": [float,...] }]
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
"""
|
"""
|
||||||
rows = await neo4j.read_all("""
|
rows = await neo4j.read_all("""
|
||||||
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -1078,9 +1106,19 @@ async def persist_neuron_backups(
|
|||||||
edge_rows: List[Dict[str, Any]],
|
edge_rows: List[Dict[str, Any]],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Schreibt die von Exoself/Neuronen gelieferten Backups:
|
|
||||||
- bias_rows: [{ "nid": str, "bias": float }]
|
This method persists neuron backups to a Neo4j database by updating the b
|
||||||
- edge_rows: [{ "from_id": str, "to_id": str, "weights": [float,...] }]
|
ias and edge weights of neurons based on the provided input data.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- bias_rows: A list of dictionaries where each dictionary represents a
|
||||||
|
neuron's ID and its corresponding bias value.
|
||||||
|
- edge_rows: A list of dictionaries where each dictionary represents the source neuron ID,
|
||||||
|
destination neuron ID, and the weights of the edges between them.
|
||||||
|
|
||||||
|
Return Type:
|
||||||
|
None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if bias_rows:
|
if bias_rows:
|
||||||
await neo4j.run_consume("""
|
await neo4j.run_consume("""
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
"""
|
|
||||||
read and write api for exoself
|
|
||||||
"""
|
|
||||||
@@ -56,7 +56,20 @@ class GenotypeMutator:
|
|||||||
self.neo4j = neo4j
|
self.neo4j = neo4j
|
||||||
|
|
||||||
async def _pick_random_neuron_id(self, agent_id: str) -> str:
|
async def _pick_random_neuron_id(self, agent_id: str) -> str:
|
||||||
"""Wählt zufällig ein Neuron unter dem Cortex des Agents aus."""
|
"""
|
||||||
|
|
||||||
|
This method picks a random neuron ID related to a specific agent from the Neo4j database.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): The ID of the agent for which a random neuron ID will be selected.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: A randomly selected neuron ID related to the specified agent.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If no neurons are found for the specified agent ID.
|
||||||
|
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all(
|
rows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)-[:HAS]->(n:neuron)
|
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)-[:HAS]->(n:neuron)
|
||||||
@@ -69,6 +82,16 @@ class GenotypeMutator:
|
|||||||
return random.choice([r["nid"] for r in rows])
|
return random.choice([r["nid"] for r in rows])
|
||||||
|
|
||||||
async def _append_evo(self, agent_id: str, entry: dict):
|
async def _append_evo(self, agent_id: str, entry: dict):
|
||||||
|
"""
|
||||||
|
Append a new entry to the evolution history of a specific agent.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): The unique identifier of the agent.
|
||||||
|
- entry (dict): The entry to be appended to the evolution history.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
s = json.dumps(entry, separators=(",", ":"))
|
s = json.dumps(entry, separators=(",", ":"))
|
||||||
await self.neo4j.run_consume(
|
await self.neo4j.run_consume(
|
||||||
"""
|
"""
|
||||||
@@ -80,6 +103,17 @@ class GenotypeMutator:
|
|||||||
)
|
)
|
||||||
|
|
||||||
async def _get_cortex_id_of_neuron(self, nid: str) -> str:
|
async def _get_cortex_id_of_neuron(self, nid: str) -> str:
|
||||||
|
"""
|
||||||
|
|
||||||
|
This method retrieves the cortex ID of a neuron based on the provided neuron ID.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- nid (str): The ID of the neuron to query for cortex ID.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: The cortex ID associated with the provided neuron ID.
|
||||||
|
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all(
|
rows = await self.neo4j.read_all(
|
||||||
"MATCH (cx:cortex)-[:HAS]->(n:neuron {id:$nid}) RETURN cx.id AS cxid",
|
"MATCH (cx:cortex)-[:HAS]->(n:neuron {id:$nid}) RETURN cx.id AS cxid",
|
||||||
nid=str(nid),
|
nid=str(nid),
|
||||||
@@ -89,7 +123,17 @@ class GenotypeMutator:
|
|||||||
return rows[0]["cxid"]
|
return rows[0]["cxid"]
|
||||||
|
|
||||||
async def get_spec_neural_afs(self, agent_id: str) -> list[str]:
|
async def get_spec_neural_afs(self, agent_id: str) -> list[str]:
|
||||||
"""Holt die Liste erlaubter Aktivierungsfunktionen aus a.spec_con_json.neural_afs."""
|
"""
|
||||||
|
|
||||||
|
async def get_spec_neural_afs(self, agent_id: str) -> list[str]:
|
||||||
|
Retrieve the list of specified neural activation functions for a given agent.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
agent_id (str): The unique identifier of the agent.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[str]: A list of neural activation functions specified for the agent, as strings.
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all(
|
rows = await self.neo4j.read_all(
|
||||||
"MATCH (a:agent {id:$aid}) RETURN a.spec_con_json AS scj",
|
"MATCH (a:agent {id:$aid}) RETURN a.spec_con_json AS scj",
|
||||||
aid=str(agent_id),
|
aid=str(agent_id),
|
||||||
@@ -105,6 +149,20 @@ class GenotypeMutator:
|
|||||||
return list({str(x) for x in afs})
|
return list({str(x) for x in afs})
|
||||||
|
|
||||||
async def _get_elem_type(self, elem_id: str):
|
async def _get_elem_type(self, elem_id: str):
|
||||||
|
"""
|
||||||
|
|
||||||
|
Retrieve the type of element based on the given element ID.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- elem_id (str): The unique identifier of the element
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: The type of the element ('neuron', 'sensor', 'actuator', or 'unknown')
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If the element with the specified ID is not found or unlabeled
|
||||||
|
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all("""
|
rows = await self.neo4j.read_all("""
|
||||||
MATCH (e {id:$id}) RETURN
|
MATCH (e {id:$id}) RETURN
|
||||||
CASE
|
CASE
|
||||||
@@ -120,7 +178,15 @@ class GenotypeMutator:
|
|||||||
return rows[0]["t"]
|
return rows[0]["t"]
|
||||||
|
|
||||||
async def _get_layer_index_or_none(self, elem_id: str):
|
async def _get_layer_index_or_none(self, elem_id: str):
|
||||||
|
"""
|
||||||
|
Get the layer index of the neuron with the specified element ID.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- elem_id: str, the ID of the neuron element.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- int or None, the layer index of the neuron if found, otherwise None.
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all("""
|
rows = await self.neo4j.read_all("""
|
||||||
MATCH (n:neuron {id:$id}) RETURN toInteger(n.layer_index) AS li
|
MATCH (n:neuron {id:$id}) RETURN toInteger(n.layer_index) AS li
|
||||||
""", id=str(elem_id))
|
""", id=str(elem_id))
|
||||||
@@ -130,6 +196,18 @@ class GenotypeMutator:
|
|||||||
return int(rows[0]["li"])
|
return int(rows[0]["li"])
|
||||||
|
|
||||||
async def _get_agent_generation(self, agent_id: str):
|
async def _get_agent_generation(self, agent_id: str):
|
||||||
|
"""
|
||||||
|
This method fetches the generation value of a given agent ID from the Neo4j database.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id: a string representing the ID of the agent whose generation value needs to be fetched.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- An integer value representing the generation number of the specified agent.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If the specified agent ID is not found in the database.
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all("""
|
rows = await self.neo4j.read_all("""
|
||||||
MATCH (a:agent {id:$aid}) RETURN toInteger(a.generation) AS g
|
MATCH (a:agent {id:$aid}) RETURN toInteger(a.generation) AS g
|
||||||
""", aid=str(agent_id))
|
""", aid=str(agent_id))
|
||||||
@@ -139,6 +217,19 @@ class GenotypeMutator:
|
|||||||
return int(rows[0]["g"])
|
return int(rows[0]["g"])
|
||||||
|
|
||||||
async def link_from_element_to_element(self, agent_id: Any, from_id: Any, to_id: Any):
|
async def link_from_element_to_element(self, agent_id: Any, from_id: Any, to_id: Any):
|
||||||
|
"""
|
||||||
|
Async method to create a link between two elements specified by their ids.
|
||||||
|
|
||||||
|
:param agent_id: The id of the agent performing the link operation
|
||||||
|
:param from_id: The id of the element to link from
|
||||||
|
:param to_id: The id of the element to link to
|
||||||
|
|
||||||
|
:return: The result of the link operation
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the link type is not supported
|
||||||
|
|
||||||
|
"""
|
||||||
ft = await self._get_elem_type(str(from_id))
|
ft = await self._get_elem_type(str(from_id))
|
||||||
tt = await self._get_elem_type(str(to_id))
|
tt = await self._get_elem_type(str(to_id))
|
||||||
if ft == "neuron" and tt == "neuron":
|
if ft == "neuron" and tt == "neuron":
|
||||||
@@ -150,6 +241,13 @@ class GenotypeMutator:
|
|||||||
raise ValueError(f"Unsupported link {ft} -> {tt}")
|
raise ValueError(f"Unsupported link {ft} -> {tt}")
|
||||||
|
|
||||||
async def _link_neuron_to_neuron(self, agent_id: Any, from_nid: Any, to_nid: Any):
|
async def _link_neuron_to_neuron(self, agent_id: Any, from_nid: Any, to_nid: Any):
|
||||||
|
"""
|
||||||
|
Link one neuron to another neuron within the agent's neural network.
|
||||||
|
|
||||||
|
:param agent_id: The ID of the agent.
|
||||||
|
:param from_nid: The ID of the neuron to link from.
|
||||||
|
:param to_nid: The ID of the neuron to link to.
|
||||||
|
"""
|
||||||
from_li = await self._get_layer_index_or_none(str(from_nid))
|
from_li = await self._get_layer_index_or_none(str(from_nid))
|
||||||
to_li = await self._get_layer_index_or_none(str(to_nid))
|
to_li = await self._get_layer_index_or_none(str(to_nid))
|
||||||
if from_li is None or to_li is None:
|
if from_li is None or to_li is None:
|
||||||
@@ -178,6 +276,18 @@ class GenotypeMutator:
|
|||||||
""", from_id=str(from_nid), to_id=str(to_nid), g=int(gen))
|
""", from_id=str(from_nid), to_id=str(to_nid), g=int(gen))
|
||||||
|
|
||||||
async def _link_sensor_to_neuron(self, agent_id: Any, from_sid: Any, to_nid: Any):
|
async def _link_sensor_to_neuron(self, agent_id: Any, from_sid: Any, to_nid: Any):
|
||||||
|
"""
|
||||||
|
Async method to link a sensor to a neuron in the system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent_id (Any): The ID of the agent to which the sensor and neuron belong.
|
||||||
|
from_sid (Any): The ID of the source sensor to link.
|
||||||
|
to_nid (Any): The ID of the target neuron to link the sensor to.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the source sensor is not found or if the link between the sensor and neuron already exists.
|
||||||
|
|
||||||
|
"""
|
||||||
srows = await self.neo4j.read_all("""
|
srows = await self.neo4j.read_all("""
|
||||||
MATCH (s:sensor {id:$sid}) RETURN toInteger(s.vector_length) AS vl
|
MATCH (s:sensor {id:$sid}) RETURN toInteger(s.vector_length) AS vl
|
||||||
""", sid=str(from_sid))
|
""", sid=str(from_sid))
|
||||||
@@ -206,6 +316,19 @@ class GenotypeMutator:
|
|||||||
""", nid=str(to_nid), g=int(gen))
|
""", nid=str(to_nid), g=int(gen))
|
||||||
|
|
||||||
async def _link_neuron_to_actuator(self, agent_id: Any, from_nid: Any, to_aid: Any):
|
async def _link_neuron_to_actuator(self, agent_id: Any, from_nid: Any, to_aid: Any):
|
||||||
|
"""
|
||||||
|
Links a neuron to an actuator in the system.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id: Represents the ID of the agent involved in the linking process.
|
||||||
|
- from_nid: Represents the ID of the neuron from which the link originates.
|
||||||
|
- to_aid: Represents the ID of the actuator to which the link is directed.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If the actuator specified by `to_aid` is not found or is already fully connected, or if the link between neuron `from_nid` and actuator `to_aid` already exists.
|
||||||
|
|
||||||
|
This method establishes a link between the specified neuron and actuator, setting the weights to an empty list and updating the generation of the neuron accordingly.
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all(
|
rows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (a:actuator {id:$aid})
|
MATCH (a:actuator {id:$aid})
|
||||||
@@ -258,6 +381,24 @@ class GenotypeMutator:
|
|||||||
""", from_id=str(from_nid), to_id=str(to_aid))
|
""", from_id=str(from_nid), to_id=str(to_aid))
|
||||||
|
|
||||||
async def cut_link(self, from_id: Any, to_id: Any):
|
async def cut_link(self, from_id: Any, to_id: Any):
|
||||||
|
"""
|
||||||
|
Async method that cuts a link between two elements based on their types.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
from_id (Any): The ID of the element where the link originates.
|
||||||
|
to_id (Any): The ID of the element where the link terminates.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of cutting the link between the elements.
|
||||||
|
The result depends on the types of the elements:
|
||||||
|
- If both elements are neurons, the link between them is cut using '_cut_n2n'.
|
||||||
|
- If the from element is a sensor and the to element is a neuron, the link is cut using '_cut_s2n'.
|
||||||
|
- If the from element is a neuron and the to element is an actuator, the link is cut using '_cut_n2a'.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the cut between the specified element types is not supported.
|
||||||
|
|
||||||
|
"""
|
||||||
ft = await self._get_elem_type(str(from_id))
|
ft = await self._get_elem_type(str(from_id))
|
||||||
tt = await self._get_elem_type(str(to_id))
|
tt = await self._get_elem_type(str(to_id))
|
||||||
if ft == "neuron" and tt == "neuron":
|
if ft == "neuron" and tt == "neuron":
|
||||||
@@ -269,7 +410,18 @@ class GenotypeMutator:
|
|||||||
raise ValueError(f"Unsupported cut {ft} -> {tt}")
|
raise ValueError(f"Unsupported cut {ft} -> {tt}")
|
||||||
|
|
||||||
async def mutate_weights(self, agent_id: str):
|
async def mutate_weights(self, agent_id: str):
|
||||||
|
"""
|
||||||
|
Mutates the weights of a neuron associated with a specified agent.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id: str - The identifier of the agent for which weights are to be mutated.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If the agent specified by agent_id is not found or has no cortex, if there are no neurons under the cortex or if the neuron has no inputs and no bias.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- The identifier of the mutated neuron.
|
||||||
|
"""
|
||||||
rows = await self.neo4j.read_all(
|
rows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -362,9 +514,13 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_bias(self, agent_id: str) -> str:
|
async def add_bias(self, agent_id: str) -> str:
|
||||||
"""
|
"""
|
||||||
Buch-Semantik: Wähle zufälliges Neuron. Wenn Bias schon existiert -> Fehler.
|
Add bias to a neuron.
|
||||||
Sonst Bias hinzufügen (in unserem Schema: n.bias setzen), Generation updaten,
|
|
||||||
EvoHist ergänzen.
|
Parameters:
|
||||||
|
agent_id (str): The ID of the agent requesting to add bias.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The ID of the neuron to which bias has been added.
|
||||||
"""
|
"""
|
||||||
nid = await self._pick_random_neuron_id(agent_id)
|
nid = await self._pick_random_neuron_id(agent_id)
|
||||||
|
|
||||||
@@ -396,9 +552,27 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def remove_bias(self, agent_id: str) -> str:
|
async def remove_bias(self, agent_id: str) -> str:
|
||||||
"""
|
"""
|
||||||
Buch-Semantik: Wähle zufälliges Neuron. Wenn kein Bias vorhanden -> Fehler.
|
async def remove_bias(self, agent_id: str) -> str:
|
||||||
Sonst Bias entfernen (in Neo4j: Property auf NULL -> wird gelöscht), Generation updaten,
|
'''
|
||||||
EvoHist ergänzen.
|
Remove bias of a randomly selected neuron belonging to the specified agent.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): The ID of the agent to remove bias from.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: The ID of the neuron that had its bias removed.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If the selected neuron does not have a bias set.
|
||||||
|
|
||||||
|
The method first picks a random neuron ID with '_pick_random_neuron_id' method.
|
||||||
|
Then it reads the bias value of the neuron from Neo4j database.
|
||||||
|
If the bias is None, it raises a ValueError.
|
||||||
|
It retrieves the generation of the agent with '_get_agent_generation' method.
|
||||||
|
Updates the neuron in the database by setting the bias to None and updating its generation.
|
||||||
|
Appends the action of 'remove_bias' to the agent's evolution history.
|
||||||
|
Finally, returns the ID of the neuron that had its bias removed.
|
||||||
|
'''
|
||||||
"""
|
"""
|
||||||
nid = await self._pick_random_neuron_id(agent_id)
|
nid = await self._pick_random_neuron_id(agent_id)
|
||||||
|
|
||||||
@@ -429,12 +603,22 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def mutate_af(self, agent_id: str) -> tuple[str, str, str]:
|
async def mutate_af(self, agent_id: str) -> tuple[str, str, str]:
|
||||||
"""
|
"""
|
||||||
Wählt ein zufälliges Neuron und ersetzt seine Aktivierungsfunktion
|
Mutates the activation function of a neuron belonging to a given agent.
|
||||||
durch eine andere aus der Spec (neural_afs), ungleich der aktuellen.
|
|
||||||
Fallback: 'tanh', wenn keine Alternative verfügbar.
|
|
||||||
Rückgabe: (nid, old_af, new_af)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
:param agent_id: The unique identifier of the agent to whom the neuron belongs.
|
||||||
|
:type agent_id: str
|
||||||
|
:return: A tuple containing the neuron ID, old activation function, and new activation function.
|
||||||
|
:rtype: tuple[str, str, str]
|
||||||
|
|
||||||
|
This method picks a random neuron ID for the specified agent and retrieves its current activation function.
|
||||||
|
It then determines the allowed activation functions for the agent and selects a new activation function different from the current one.
|
||||||
|
If there are no alternative activation functions available, it defaults to using "tanh".
|
||||||
|
|
||||||
|
If the new activation function is the same as the old one, it returns without making any changes.
|
||||||
|
Otherwise, it updates the neuron's activation function and generation in the database.
|
||||||
|
Finally, it logs the mutation operation in the evolutionary history of the agent.
|
||||||
|
|
||||||
|
"""
|
||||||
nid = await self._pick_random_neuron_id(agent_id)
|
nid = await self._pick_random_neuron_id(agent_id)
|
||||||
|
|
||||||
row = await self.neo4j.read_all(
|
row = await self.neo4j.read_all(
|
||||||
@@ -473,15 +657,18 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_outlink(self, agent_id: str) -> tuple[str, str]:
|
async def add_outlink(self, agent_id: str) -> tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Buch-Äquivalent zu add_outlink/1:
|
Add an outbound link from a neuron identified by the given agent_id.
|
||||||
- Zufälliges Neuron A wählen.
|
|
||||||
- Kandidaten-Ziele = (alle Neuronen außer bereits per A→* verlinkte) ∪ (alle Aktuatoren mit freiem Slot, die A noch nicht verlinkt).
|
|
||||||
- Zufälliges Ziel B wählen.
|
|
||||||
- Link A→B herstellen (N→N mit |weights|=1 und ggf. recurrent, N→A ohne Gewichte mit Kapazitätsprüfung).
|
|
||||||
- Evo-Historie: {add_outlink, A, B}.
|
|
||||||
Rückgabe: (A, B)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): The ID of the agent requesting the update.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of two strings representing the IDs of the source and target neurons.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If there are no available target neurons or actuators to link to.
|
||||||
|
|
||||||
|
"""
|
||||||
a_nid = await self._pick_random_neuron_id(agent_id)
|
a_nid = await self._pick_random_neuron_id(agent_id)
|
||||||
cx_id = await self._get_cortex_id_of_neuron(a_nid)
|
cx_id = await self._get_cortex_id_of_neuron(a_nid)
|
||||||
|
|
||||||
@@ -523,14 +710,18 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_inlink(self, agent_id: str) -> tuple[str, str]:
|
async def add_inlink(self, agent_id: str) -> tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Wählt ein zufälliges Ziel-Neuron B und verlinkt eine neue Quelle A
|
|
||||||
(Sensor ODER Neuron) auf B, sofern noch nicht verbunden.
|
|
||||||
- S→N: Gewichte-Liste Länge = sensor.vl (Projekt: ±2π; Buch: ±π/2)
|
|
||||||
- N→N: Skalar-Gewicht (±π/2), recurrent wenn li(B) ≤ li(A)
|
|
||||||
EvoHist: {add_inlink, A, B}
|
|
||||||
Rückgabe: (A_id, B_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Add inlink method adds a new connection between a sensor or neuron to a specified neuron in the Cortex.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id: str - The ID of the agent requesting the connection.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- tuple[str, str] - A Tuple containing the ID of the source (sensor or neuron) and the destination neuron ID connected.
|
||||||
|
|
||||||
|
Note: This method may raise a ValueError if the specified neuron is already connected to all available sensors/neurons.
|
||||||
|
|
||||||
|
"""
|
||||||
b_nid = await self._pick_random_neuron_id(agent_id)
|
b_nid = await self._pick_random_neuron_id(agent_id)
|
||||||
cx_id = await self._get_cortex_id_of_neuron(b_nid)
|
cx_id = await self._get_cortex_id_of_neuron(b_nid)
|
||||||
|
|
||||||
@@ -609,15 +800,17 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_sensorlink(self, agent_id: str) -> tuple[str, str]:
|
async def add_sensorlink(self, agent_id: str) -> tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Wählt einen zufälligen Sensor S und verbindet ihn mit einem Neuron N,
|
add_sensorlink method adds a connection between a sensor and a neuron for a given agent.
|
||||||
das S noch nicht ansteuert (S -> N).
|
|
||||||
- Gewichtsvektor-Länge = sensor.vector_length
|
|
||||||
- Gewichte ~ U(-π/2, +π/2) (Buch)
|
|
||||||
- Neuron bekommt Generation des Agents
|
|
||||||
- EvoHist: {add_sensorlink, S, N}
|
|
||||||
Rückgabe: (S_id, N_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent_id (str): The ID of the agent to which the sensor and neuron connection will be added.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[str, str]: A tuple containing the ID of the sensor and the ID of the neuron connected.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If no sensors are found for the specified agent or if the sensor is already connected to all neurons.
|
||||||
|
"""
|
||||||
s_rows = await self.neo4j.read_all(
|
s_rows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)-[:HAS]->(s:sensor)
|
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)-[:HAS]->(s:sensor)
|
||||||
@@ -670,12 +863,15 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_actuatorlink(self, agent_id: str) -> tuple[str, str]:
|
async def add_actuatorlink(self, agent_id: str) -> tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Wählt einen zufälligen Aktuator A mit freier Kapazität (k < A.vl),
|
Async method to add an actuator link for a given agent.
|
||||||
wählt ein Neuron N, das noch nicht auf A verlinkt ist,
|
|
||||||
erzeugt N->A (keine Gewichte), und loggt EvoHist.
|
|
||||||
Rückgabe: (N_id, A_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id: str - Identifier of the agent to add the actuator link to
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- Tuple containing two strings: n_id and a_id, which represent the identifiers of the neuron and actuator linked, respectively
|
||||||
|
|
||||||
|
"""
|
||||||
arows = await self.neo4j.read_all(
|
arows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)-[:HAS]->(a:actuator)
|
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)-[:HAS]->(a:actuator)
|
||||||
@@ -715,15 +911,16 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_neuron(self, agent_id: str) -> tuple[str, str, str]:
|
async def add_neuron(self, agent_id: str) -> tuple[str, str, str]:
|
||||||
"""
|
"""
|
||||||
Buch-Operator add_neuron:
|
Add a new neuron to the specified agent in the neural network.
|
||||||
- wähle random Target-Layer aus Agent.pattern
|
|
||||||
- erzeuge neuen Neuron-Knoten (ohne Eingänge/Ausgänge, bias=None)
|
|
||||||
- wähle From ∈ (Sensor ∪ Neuron_alt), To ∈ (Neuron_alt ∪ Actuator_mit_Platz)
|
|
||||||
- verlinke From->NewN und NewN->To
|
|
||||||
- logge EvoHist und aktualisiere Fingerprint
|
|
||||||
Rückgabe: (from_id, new_nid, to_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): The ID of the agent to add the neuron to.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- tuple[str, str, str]: A tuple containing the IDs of the elements involved in the neuron addition process.
|
||||||
|
The tuple includes the ID of the element the neuron is created from, the ID of the newly created neuron,
|
||||||
|
and the ID of the element the neuron is connected to.
|
||||||
|
"""
|
||||||
arows = await self.neo4j.read_all(
|
arows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (a:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -821,37 +1018,18 @@ class GenotypeMutator:
|
|||||||
await self._append_evo(agent_id,
|
await self._append_evo(agent_id,
|
||||||
{"op": "add_neuron", "from": str(from_id), "new": str(new_nid), "to": str(to_id)})
|
{"op": "add_neuron", "from": str(from_id), "new": str(new_nid), "to": str(to_id)})
|
||||||
|
|
||||||
""""
|
|
||||||
if hasattr(self, "update_fingerprint_fn"):
|
|
||||||
|
|
||||||
await self.update_fingerprint_fn(agent_id)
|
|
||||||
else:
|
|
||||||
|
|
||||||
try:
|
|
||||||
from genotype import update_fingerprint
|
|
||||||
await update_fingerprint(agent_id)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
|
|
||||||
return from_id, new_nid, to_id
|
return from_id, new_nid, to_id
|
||||||
|
|
||||||
async def outsplice(self, agent_id: str) -> tuple[str, str, str]:
|
async def outsplice(self, agent_id: str) -> tuple[str, str, str]:
|
||||||
"""
|
"""
|
||||||
Buch-Operator: outsplice
|
Asynchronous method to perform outsplice operation for a given agent ID.
|
||||||
- wähle A (Neuron)
|
|
||||||
- wähle B aus A.output, aber nur feedforward:
|
|
||||||
* B:neuron mit layer(B) > layer(A) ODER
|
|
||||||
* B:actuator
|
|
||||||
- erzeuge neue Schicht zwischen A und B:
|
|
||||||
* wir nutzen integer-layers → insert layer A.layer+1:
|
|
||||||
SHIFT: alle Neuronen mit layer_index >= A.layer+1 um +1 erhöhen
|
|
||||||
* K in layer = A.layer+1 anlegen
|
|
||||||
- cut A->B, link A->K und K->B
|
|
||||||
- Generationen setzen, evo_hist anhängen, Fingerprint aktualisieren
|
|
||||||
Rückgabe: (A_id, K_id, B_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): ID of the agent for which outsplice operation needs to be performed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- tuple containing IDs of three elements involved in the outsplice operation: a_id, k_id, b_id.
|
||||||
|
"""
|
||||||
arows = await self.neo4j.read_all(
|
arows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -969,29 +1147,24 @@ class GenotypeMutator:
|
|||||||
append=[s],
|
append=[s],
|
||||||
)
|
)
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
from neo4j_genotype import update_fingerprint
|
|
||||||
await update_fingerprint(agent_id)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
|
|
||||||
return a_id, k_id, b_id
|
return a_id, k_id, b_id
|
||||||
|
|
||||||
async def add_sensor(self, agent_id: str) -> tuple[str, str]:
|
async def add_sensor(self, agent_id: str) -> tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Fügt einen neuen Sensor der Morphologie hinzu und verbindet ihn auf
|
Async method add_sensor: add_sensor(agent_id: str) -> tuple[str, str]
|
||||||
ein zufälliges Neuron (S -> N).
|
Description:
|
||||||
- Sensor-Template kommt aus morphology.get_Sensors(morph_name)
|
This method adds a new sensor to the specified agent's cortex in the Neo4j database.
|
||||||
- Already-used werden per (name, scape, vl) gegen den Cortex gefiltert
|
|
||||||
- Link S->N mit Gewichtsvektor-Länge = sensor.vector_length
|
|
||||||
(Buch-Semantik: Gewichte ~ U(-π/2, +π/2))
|
|
||||||
- Ziel-Neuron erhält generation des Agents
|
|
||||||
- EvoHist: {add_sensor, S, N}
|
|
||||||
Rückgabe: (S_id, N_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
agent_id (str): The ID of the agent for which the sensor is being added.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the specified agent is not found, or if the sensor cannot be added for various reasons.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[str, str]: A tuple containing the ID of the added sensor and the ID of the connected neuron.
|
||||||
|
|
||||||
|
"""
|
||||||
arows = await self.neo4j.read_all(
|
arows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -1079,14 +1252,19 @@ class GenotypeMutator:
|
|||||||
|
|
||||||
async def add_actuator(self, agent_id: str) -> tuple[str, str]:
|
async def add_actuator(self, agent_id: str) -> tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Fügt einen neuen Aktuator der Morphologie hinzu und verbindet ihn von
|
Add actuator to an agent with the specified agent_id
|
||||||
einem zufälligen Neuron (N -> A).
|
|
||||||
- Aktuator-Template kommt aus morphology.get_Actuators(morph_name)
|
|
||||||
- Already-used werden per (name, scape, vl) gegen Cortex gefiltert
|
|
||||||
- EvoHist wird als JSON-String appended
|
|
||||||
Rückgabe: (neuron_id, actuator_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- agent_id (str): The ID of the agent to which the actuator will be added
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- tuple[str, str]: A tuple containing the IDs of the neuron and actuator that have been connected
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
- ValueError: If the agent with the specified agent_id is not found, cannot read morphology from spec_con_json,
|
||||||
|
NN already uses all available actuators for this morphology, cortex has no neurons to connect from
|
||||||
|
|
||||||
|
"""
|
||||||
arows = await self.neo4j.read_all(
|
arows = await self.neo4j.read_all(
|
||||||
"""
|
"""
|
||||||
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
MATCH (ag:agent {id:$aid})-[:OWNS]->(cx:cortex)
|
||||||
@@ -1165,12 +1343,4 @@ class GenotypeMutator:
|
|||||||
append=[s],
|
append=[s],
|
||||||
)
|
)
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
from neo4j_genotype import update_fingerprint
|
|
||||||
await update_fingerprint(agent_id)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
|
|
||||||
return n_id, a_id
|
return n_id, a_id
|
||||||
|
|||||||
1539
mathema/genotype/neo4j/genotype_mutator_tx.py
Normal file
@@ -1,17 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
|
|
||||||
from mathema.genotype.neo4j.genotype import test_add_neuron
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# polis = Polis()
|
|
||||||
# await polis.create()
|
|
||||||
# await polis.start()
|
|
||||||
|
|
||||||
# await polis.stop()
|
|
||||||
# await test_mut_operators()
|
|
||||||
await test_add_neuron()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
# tests/test_population_monitor_integration_lite.py
|
|
||||||
import asyncio
|
|
||||||
import random
|
|
||||||
|
|
||||||
import mathema.core.population_monitor as pm
|
|
||||||
|
|
||||||
|
|
||||||
class FakeExoself:
|
|
||||||
def __init__(self, agent_id, monitor):
|
|
||||||
self.agent_id = agent_id
|
|
||||||
self.monitor = monitor
|
|
||||||
self._task = asyncio.create_task(self._run())
|
|
||||||
|
|
||||||
async def _run(self):
|
|
||||||
try:
|
|
||||||
await asyncio.sleep(0.01)
|
|
||||||
seed = sum(ord(c) for c in str(self.agent_id)) % 1000
|
|
||||||
random.seed(seed)
|
|
||||||
fitness = 0.5 + random.random()
|
|
||||||
await self.monitor.cast(("terminated", self.agent_id, float(fitness), 4, 4, 10))
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
async def cast(self, msg):
|
|
||||||
if msg and msg[0] == "terminate":
|
|
||||||
if self._task:
|
|
||||||
self._task.cancel()
|
|
||||||
|
|
||||||
|
|
||||||
async def fake_exoself_start(agent_id, monitor):
|
|
||||||
return FakeExoself(agent_id, monitor)
|
|
||||||
|
|
||||||
|
|
||||||
pm.EXOSELF_START = fake_exoself_start # 👉 sauberer DI-Hook
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
monitor = await pm.init_population(("pop_iso", pm.INIT_CONSTRAINTS, "gt", "competition"))
|
|
||||||
|
|
||||||
G = 3
|
|
||||||
for _ in range(G):
|
|
||||||
await monitor.gen_ended.wait()
|
|
||||||
|
|
||||||
# await monitor.gen_ended.wait() # gezielt auf Generationsende warten
|
|
||||||
await monitor.stop("normal")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,3 +1,18 @@
|
|||||||
|
"""
|
||||||
|
Replay utility for visualizing the best evolved CarRacing agent.
|
||||||
|
|
||||||
|
This module loads the best-performing agent from a given population stored
|
||||||
|
in Neo4j, reconstructs its policy from a genotype snapshot, and replays the
|
||||||
|
agent in a human-rendered CarRacing environment using pygame.
|
||||||
|
|
||||||
|
High-level workflow:
|
||||||
|
1. Query Neo4j for the agent with the highest recorded fitness in a population.
|
||||||
|
2. Load the agent’s genotype snapshot.
|
||||||
|
3. Build an executable policy from the snapshot.
|
||||||
|
4. Run the policy in the CarRacing environment, step by step.
|
||||||
|
5. Render the environment in real time and automatically handle episode resets.
|
||||||
|
"""
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pygame
|
import pygame
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,29 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class CarRacingScape(Actor):
|
class CarRacingScape(Actor):
|
||||||
|
"""
|
||||||
|
Scape (environment) actor wrapping a CarRacing-like Gymnasium environment.
|
||||||
|
|
||||||
|
This actor provides an asynchronous message interface for sensors and
|
||||||
|
actuators in the actor-based cortex architecture:
|
||||||
|
|
||||||
|
- Sensors request observations/features via ("sense", sid, sensor_pid).
|
||||||
|
The scape replies to the given sensor actor with ("percept", vec).
|
||||||
|
|
||||||
|
- Actuators apply actions via ("action", action, actuator_pid).
|
||||||
|
The scape performs an env.step(action) and replies with
|
||||||
|
("result", step_reward, halt_flag) where halt_flag is 1 if the episode
|
||||||
|
terminated or was truncated.
|
||||||
|
|
||||||
|
In addition, the scape automatically resets the environment when an episode
|
||||||
|
ends (halt_flag == 1) using env.fast_reset().
|
||||||
|
|
||||||
|
Notes about `_stepped`:
|
||||||
|
- Some environments do not provide a meaningful feature vector immediately
|
||||||
|
after reset until at least one `step()` was executed.
|
||||||
|
- `_get_features()` ensures that the environment has been stepped once
|
||||||
|
(with a zero action) before calling `env.get_feature_vector()`.
|
||||||
|
"""
|
||||||
def __init__(self, env, name: str = "CarRacingScape"):
|
def __init__(self, env, name: str = "CarRacingScape"):
|
||||||
super().__init__(name)
|
super().__init__(name)
|
||||||
self.env = env
|
self.env = env
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
"""
|
||||||
|
this is a test scape for validation.
|
||||||
|
"""
|
||||||
from mathema.actors.actor import Actor
|
from mathema.actors.actor import Actor
|
||||||
import math
|
import math
|
||||||
import logging
|
import logging
|
||||||
|
|||||||
17
mathema/settings.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
"""
|
||||||
|
global parameters and settings for the
|
||||||
|
neuroevolutionary system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# default if not otherwise specified
|
||||||
|
INIT_POPULATION_ID: str = "test"
|
||||||
|
|
||||||
|
EFF: float = 0.05
|
||||||
|
SURVIVAL_PERCENTAGE: float = 0.5
|
||||||
|
SPECIE_SIZE_LIMIT: int = 10
|
||||||
|
INIT_SPECIE_SIZE: int = 10
|
||||||
|
|
||||||
|
GENERATION_LIMIT: int = 1000
|
||||||
|
EVALUATIONS_LIMIT: int = 100_000
|
||||||
|
FITNESS_GOAL: float = 6000
|
||||||
|
|
||||||
@@ -1,145 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from mathema.actors.actor import Actor
|
|
||||||
from mathema.actors.sensor import Sensor
|
|
||||||
from mathema.actors.actuator import Actuator
|
|
||||||
from mathema.scape.car_racing import CarRacingScape
|
|
||||||
from mathema.envs.openai_car_racing import CarRacing
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
log = logging.getLogger("smoke")
|
|
||||||
|
|
||||||
FEATURE_LEN = 10 + 6
|
|
||||||
|
|
||||||
|
|
||||||
class DummyCortex(Actor):
|
|
||||||
"""Minimaler Cortex: zählt Fitness und Episoden. Erwartet Actuator->('sync', aid, fitness, halt_flag)."""
|
|
||||||
|
|
||||||
def __init__(self, stop_after_episodes: int = 3):
|
|
||||||
super().__init__("DummyCortex")
|
|
||||||
self.total_fitness = 0.0
|
|
||||||
self.episodes = 0
|
|
||||||
self.stop_after = int(stop_after_episodes)
|
|
||||||
self.sensors = []
|
|
||||||
self.neurons = []
|
|
||||||
self.actuators = []
|
|
||||||
|
|
||||||
async def run(self):
|
|
||||||
log.info("[Cortex] started. stop_after=%d", self.stop_after)
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
msg = await self.inbox.get()
|
|
||||||
tag = msg[0]
|
|
||||||
if tag == "sync":
|
|
||||||
_, aid, fitness_delta, halt_flag = msg
|
|
||||||
self.total_fitness += float(fitness_delta)
|
|
||||||
if halt_flag == 1:
|
|
||||||
self.episodes += 1
|
|
||||||
log.info("[Cortex] EPISODE done: %d cum_fitness=%.3f",
|
|
||||||
self.episodes, self.total_fitness)
|
|
||||||
if self.episodes >= self.stop_after:
|
|
||||||
log.info("[Cortex] stopping smoke test...")
|
|
||||||
|
|
||||||
for a in (self.sensors + self.neurons + self.actuators):
|
|
||||||
try:
|
|
||||||
await a.send(("terminate",))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
return
|
|
||||||
elif tag == "reactivate":
|
|
||||||
|
|
||||||
pass
|
|
||||||
elif tag == "terminate":
|
|
||||||
return
|
|
||||||
finally:
|
|
||||||
log.info("[Cortex] terminated.")
|
|
||||||
|
|
||||||
|
|
||||||
class RelayNeuron(Actor):
|
|
||||||
"""
|
|
||||||
Minimal-Neuron: nimmt Sensor-Features entgegen ("forward", sid, vec)
|
|
||||||
und sendet eine 3-dimensionale Aktor-Action ("forward", nid, [steer,gas,brake]) weiter.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, nid: str, out_actuator: Actuator):
|
|
||||||
super().__init__(f"RelayNeuron-{nid}")
|
|
||||||
self.nid = nid
|
|
||||||
self.out = out_actuator
|
|
||||||
|
|
||||||
async def run(self):
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
msg = await self.inbox.get()
|
|
||||||
tag = msg[0]
|
|
||||||
if tag == "forward":
|
|
||||||
_, _sid, features = msg
|
|
||||||
|
|
||||||
action_vec = [0.0, 0.2, -1.0]
|
|
||||||
await self.out.send(("forward", self.nid, action_vec))
|
|
||||||
elif tag == "terminate":
|
|
||||||
return
|
|
||||||
finally:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
env = CarRacing(seed_value=5, render_mode=None)
|
|
||||||
scape = CarRacingScape(env)
|
|
||||||
|
|
||||||
cx = DummyCortex(stop_after_episodes=3)
|
|
||||||
|
|
||||||
actuator = Actuator(
|
|
||||||
aid="A1",
|
|
||||||
cx_pid=cx,
|
|
||||||
name="car_ApplyAction",
|
|
||||||
fanin_ids=["N1"],
|
|
||||||
expect_count=1,
|
|
||||||
scape=scape
|
|
||||||
)
|
|
||||||
|
|
||||||
neuron = RelayNeuron("N1", actuator)
|
|
||||||
|
|
||||||
sensor = Sensor(
|
|
||||||
sid="S1",
|
|
||||||
cx_pid=cx,
|
|
||||||
name="car_GetFeatures",
|
|
||||||
vector_length=FEATURE_LEN,
|
|
||||||
fanout_pids=[neuron],
|
|
||||||
scape=scape
|
|
||||||
)
|
|
||||||
|
|
||||||
cx.sensors = [sensor]
|
|
||||||
cx.neurons = [neuron]
|
|
||||||
cx.actuators = [actuator]
|
|
||||||
|
|
||||||
tasks = [
|
|
||||||
asyncio.create_task(scape.run(), name="CarScape"),
|
|
||||||
asyncio.create_task(cx.run(), name="Cortex"),
|
|
||||||
asyncio.create_task(sensor.run(), name="Sensor"),
|
|
||||||
asyncio.create_task(neuron.run(), name="Neuron"),
|
|
||||||
asyncio.create_task(actuator.run(), name="Actuator"),
|
|
||||||
]
|
|
||||||
|
|
||||||
steps = 0
|
|
||||||
try:
|
|
||||||
while not tasks[1].done():
|
|
||||||
await sensor.send(("sync",))
|
|
||||||
steps += 1
|
|
||||||
|
|
||||||
await asyncio.sleep(0.0)
|
|
||||||
log.info("[SMOKE] finished after %d steps. ✅", steps)
|
|
||||||
finally:
|
|
||||||
|
|
||||||
try:
|
|
||||||
await scape.send(("terminate",))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
for t in tasks:
|
|
||||||
if not t.done():
|
|
||||||
t.cancel()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
||||||
13
mathema/stats/car_pop_transaction_test.jsonl
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{"ts":1765639613,"gen":1,"t_sec":367007,"cum_fitness":227.40101317121585,"best":197.72553191488373,"avg":25.266779241246205,"std":61.67146734447497,"agents":9,"eval_acc":277,"cycle_acc":159367.0,"time_acc":903.0}
|
||||||
|
{"ts":1765639613,"gen":2,"t_sec":367081,"cum_fitness":317.6296859169127,"best":150.03019250252797,"avg":28.87542599244661,"std":56.67916151134183,"agents":11,"eval_acc":479,"cycle_acc":243635.0,"time_acc":1371.0}
|
||||||
|
{"ts":1765639613,"gen":3,"t_sec":367169,"cum_fitness":613.0014690982745,"best":209.76859169199517,"avg":61.30014690982745,"std":87.56502883628107,"agents":10,"eval_acc":695,"cycle_acc":347287.0,"time_acc":2082.0}
|
||||||
|
{"ts":1765639613,"gen":4,"t_sec":367256,"cum_fitness":614.7368794326097,"best":200.78581560283587,"avg":61.47368794326097,"std":78.19851334990376,"agents":10,"eval_acc":890,"cycle_acc":436408.0,"time_acc":2519.0}
|
||||||
|
{"ts":1765639613,"gen":5,"t_sec":367331,"cum_fitness":771.1370820668635,"best":203.260536980748,"avg":77.11370820668635,"std":94.68909264303997,"agents":10,"eval_acc":1060,"cycle_acc":526977.0,"time_acc":2971.0}
|
||||||
|
{"ts":1765639613,"gen":6,"t_sec":367442,"cum_fitness":1005.2231003039412,"best":240.36803444782174,"avg":100.52231003039412,"std":101.71769730692468,"agents":10,"eval_acc":1266,"cycle_acc":632455.0,"time_acc":3474.0}
|
||||||
|
{"ts":1765639613,"gen":7,"t_sec":367520,"cum_fitness":1264.2317629179447,"best":382.49260385006573,"avg":114.9301602652677,"std":133.8757324454927,"agents":11,"eval_acc":1450,"cycle_acc":741490.0,"time_acc":3852.0}
|
||||||
|
{"ts":1765639613,"gen":8,"t_sec":367650,"cum_fitness":2647.7638804457683,"best":883.2604863221765,"avg":240.70580731325165,"std":313.94121501548653,"agents":11,"eval_acc":1673,"cycle_acc":854553.0,"time_acc":4653.0}
|
||||||
|
{"ts":1765639613,"gen":9,"t_sec":367771,"cum_fitness":3294.015704153962,"best":886.3604863221761,"avg":299.45597310490564,"std":357.0894275322936,"agents":11,"eval_acc":1892,"cycle_acc":1003231.0,"time_acc":5336.0}
|
||||||
|
{"ts":1765639613,"gen":10,"t_sec":367912,"cum_fitness":3991.0866261397696,"best":894.5938196555113,"avg":399.10866261397695,"std":399.7360856267592,"agents":10,"eval_acc":2111,"cycle_acc":1171051.0,"time_acc":6085.0}
|
||||||
|
{"ts":1765639613,"gen":11,"t_sec":367987,"cum_fitness":4355.802431610881,"best":898.543819655511,"avg":435.5802431610881,"std":435.8926029922781,"agents":10,"eval_acc":2262,"cycle_acc":1286687.0,"time_acc":6569.0}
|
||||||
|
{"ts":1765639613,"gen":12,"t_sec":368146,"cum_fitness":4405.635764944219,"best":893.5438196555116,"avg":440.5635764944219,"std":440.64533248937005,"agents":10,"eval_acc":2482,"cycle_acc":1442441.0,"time_acc":7519.0}
|
||||||
|
{"ts":1765639613,"gen":13,"t_sec":368243,"cum_fitness":4407.452431610885,"best":894.2271529888436,"avg":440.74524316108847,"std":440.8278024255607,"agents":10,"eval_acc":2662,"cycle_acc":1556196.0,"time_acc":8058.0}
|
||||||
BIN
mathema/stats/car_pop_transaction_test_agents.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
BIN
mathema/stats/car_pop_transaction_test_avg.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
mathema/stats/car_pop_transaction_test_best.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
mathema/stats/car_pop_transaction_test_cum_fitness.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
mathema/stats/car_pop_transaction_test_cycle_acc.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
mathema/stats/car_pop_transaction_test_eval_acc.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
mathema/stats/car_pop_transaction_test_std.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
mathema/stats/car_pop_transaction_test_t_sec.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
mathema/stats/car_pop_transaction_test_time_acc.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
@@ -1,142 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from mathema.actors.neuron import Neuron
|
|
||||||
from mathema.actors.actor import Actor
|
|
||||||
|
|
||||||
|
|
||||||
class Collector(Actor):
|
|
||||||
def __init__(self, name="Collector"):
|
|
||||||
super().__init__(name)
|
|
||||||
self.events = []
|
|
||||||
self._stop = asyncio.Event()
|
|
||||||
|
|
||||||
async def run(self):
|
|
||||||
while True:
|
|
||||||
msg = await self.inbox.get()
|
|
||||||
tag = msg[0]
|
|
||||||
if tag == "forward":
|
|
||||||
_, from_id, vec = msg
|
|
||||||
self.events.append((from_id, vec))
|
|
||||||
elif tag == "terminate":
|
|
||||||
self._stop.set()
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
async def start(actor):
|
|
||||||
return asyncio.create_task(actor.run())
|
|
||||||
|
|
||||||
|
|
||||||
async def test_feedforward():
|
|
||||||
col = Collector("COL-ff")
|
|
||||||
N = Neuron(nid="N", cx_pid=None, af_name="tanh",
|
|
||||||
input_idps=[("S", [1.0], False), ("bias", [0.0], False)],
|
|
||||||
output_pids=[col], bias=None)
|
|
||||||
|
|
||||||
tN = await start(N)
|
|
||||||
tC = await start(col)
|
|
||||||
|
|
||||||
await N.send(("cycle_start",))
|
|
||||||
|
|
||||||
await N.send(("forward", "S", [1.0]))
|
|
||||||
await asyncio.sleep(0.01)
|
|
||||||
|
|
||||||
print("FF events:", col.events)
|
|
||||||
|
|
||||||
await col.send(("terminate",))
|
|
||||||
await N.send(("terminate",))
|
|
||||||
await asyncio.gather(tN, tC)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_lateral_nonrecurrent():
|
|
||||||
col = Collector("COL-lat")
|
|
||||||
N1 = Neuron("N1", None, "tanh",
|
|
||||||
[("S", [1.0], False), ("bias", [0.0], False)], [], None)
|
|
||||||
N2 = Neuron("N2", None, "tanh",
|
|
||||||
[("S", [1.0], False), ("N1", [1.0], False), ("bias", [0.0], False)], [col], None)
|
|
||||||
|
|
||||||
N1.outputs = [N2]
|
|
||||||
|
|
||||||
t1 = await start(N1)
|
|
||||||
t2 = await start(N2)
|
|
||||||
tC = await start(col)
|
|
||||||
|
|
||||||
await N1.send(("cycle_start",))
|
|
||||||
await N2.send(("cycle_start",))
|
|
||||||
|
|
||||||
await N1.send(("forward", "S", [1.0]))
|
|
||||||
await N2.send(("forward", "S", [1.0]))
|
|
||||||
await asyncio.sleep(0.01)
|
|
||||||
|
|
||||||
await asyncio.sleep(0.01)
|
|
||||||
|
|
||||||
print("LAT events:", col.events)
|
|
||||||
|
|
||||||
await col.send(("terminate",))
|
|
||||||
await N1.send(("terminate",))
|
|
||||||
await N2.send(("terminate",))
|
|
||||||
await asyncio.gather(t1, t2, tC)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_recurrent_edge():
|
|
||||||
col = Collector("COL-rec")
|
|
||||||
N1 = Neuron("N1", None, "tanh",
|
|
||||||
[("S", [1.0], False), ("bias", [0.0], False)], [], None)
|
|
||||||
N2 = Neuron("N2", None, "tanh",
|
|
||||||
[("S", [1.0], False), ("N1", [1.0], True), ("bias", [0.0], False)], [col], None)
|
|
||||||
|
|
||||||
N1.outputs = [N2]
|
|
||||||
|
|
||||||
t1 = await start(N1)
|
|
||||||
t2 = await start(N2)
|
|
||||||
tC = await start(col)
|
|
||||||
|
|
||||||
await N1.send(("cycle_start",))
|
|
||||||
await N2.send(("cycle_start",))
|
|
||||||
|
|
||||||
await N1.send(("forward", "S", [1.0]))
|
|
||||||
await N2.send(("forward", "S", [1.0]))
|
|
||||||
await asyncio.sleep(0.02)
|
|
||||||
|
|
||||||
await N1.send(("cycle_start",))
|
|
||||||
await N2.send(("cycle_start",))
|
|
||||||
await N1.send(("forward", "S", [1.0]))
|
|
||||||
await N2.send(("forward", "S", [1.0]))
|
|
||||||
await asyncio.sleep(0.02)
|
|
||||||
|
|
||||||
print("REC events:", col.events)
|
|
||||||
|
|
||||||
await col.send(("terminate",))
|
|
||||||
await N1.send(("terminate",))
|
|
||||||
await N2.send(("terminate",))
|
|
||||||
await asyncio.gather(t1, t2, tC)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_self_loop():
|
|
||||||
col = Collector("COL-self")
|
|
||||||
N = Neuron("N", None, "tanh",
|
|
||||||
[("S", [1.0], False), ("N", [1.0], True), ("bias", [0.0], False)], [], None)
|
|
||||||
|
|
||||||
N.outputs = [N, col]
|
|
||||||
|
|
||||||
tN = await start(N)
|
|
||||||
tC = await start(col)
|
|
||||||
|
|
||||||
await N.send(("cycle_start",))
|
|
||||||
await N.send(("forward", "S", [1.0]))
|
|
||||||
await asyncio.sleep(0.02)
|
|
||||||
|
|
||||||
await N.send(("cycle_start",))
|
|
||||||
await N.send(("forward", "S", [1.0]))
|
|
||||||
await asyncio.sleep(0.02)
|
|
||||||
|
|
||||||
print("SELF events:", col.events)
|
|
||||||
|
|
||||||
await col.send(("terminate",))
|
|
||||||
await N.send(("terminate",))
|
|
||||||
await asyncio.gather(tN, tC)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(test_feedforward())
|
|
||||||
asyncio.run(test_lateral_nonrecurrent())
|
|
||||||
asyncio.run(test_recurrent_edge())
|
|
||||||
asyncio.run(test_self_loop())
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from mathema.core.population_monitor import init_population, continue_
|
|
||||||
from mathema.utils.logging_config import setup_logging
|
|
||||||
|
|
||||||
setup_logging()
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
async def run_xor_test(
|
|
||||||
pop_id: str = "xor_pop",
|
|
||||||
gens: int = 1000,
|
|
||||||
):
|
|
||||||
monitor = await init_population((
|
|
||||||
pop_id,
|
|
||||||
[{"morphology": "xor_mimic", "neural_afs": ["tanh"]}],
|
|
||||||
"gt",
|
|
||||||
"competition",
|
|
||||||
))
|
|
||||||
|
|
||||||
for _ in range(gens):
|
|
||||||
await monitor.gen_ended.wait()
|
|
||||||
s = monitor.state
|
|
||||||
await monitor._best_fitness_in_population(s.population_id)
|
|
||||||
|
|
||||||
await monitor.stop("normal")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(run_xor_test())
|
|
||||||