# actors/neuron.py import math import random from actor import Actor def tanh(x): return math.tanh(x) class Neuron(Actor): def __init__(self, nid, cx_pid, af_name, input_idps, output_pids): super().__init__(f"Neuron-{nid}") self.nid = nid self.cx_pid = cx_pid self.af = tanh if af_name == "tanh" else tanh # input_idps: [(input_id, [w1, w2, ...])] self.inputs = {} self.order = [] for (inp_id, weights) in input_idps: self.order.append(inp_id) self.inputs[inp_id] = {"weights": list(weights), "got": False, "val": None} # WICHTIG: Bias lernbar machen self.bias = random.uniform(-2.0, 2.0) # Für Backup/Restore self._backup_inputs = None self._backup_bias = None self.outputs = output_pids print(f"Neuron {nid}: inputs={list(self.inputs.keys())}, bias={self.bias}") async def run(self): while True: msg = await self.inbox.get() tag = msg[0] if tag == "forward": _, from_id, data = msg if from_id not in self.inputs: continue slot = self.inputs[from_id] slot["got"] = True slot["val"] = data if all(self.inputs[i]["got"] for i in self.order): acc = 0.0 for i in self.order: w = self.inputs[i]["weights"] v = self.inputs[i]["val"] if isinstance(v, list): acc += sum(wj * vj for wj, vj in zip(w, v)) else: acc += w[0] * float(v) out = self.af(acc + self.bias) for pid in self.outputs: await pid.send(("forward", self.nid, [out])) for i in self.order: self.inputs[i]["got"] = False self.inputs[i]["val"] = None print(f"Neuron {self.nid}: input_sum={acc + self.bias:.3f}, output={out:.3f}") elif tag == "get_backup": # Packe Gewichte + Bias zurück idps = [(i, self.inputs[i]["weights"]) for i in self.order] idps.append(("bias", self.bias)) await self.cx_pid.send(("backup_from_neuron", self.nid, idps)) elif tag == "weight_backup": # Tiefkopie der Gewichte + Bias print(f"Neuron {self.nid}: backing up weights") self._backup_inputs = {k: {"weights": v["weights"][:]} for k, v in self.inputs.items()} self._backup_bias = self.bias elif tag == "weight_restore": if self._backup_inputs is not None: for k in self.inputs: self.inputs[k]["weights"] = self._backup_inputs[k]["weights"][:] self.bias = self._backup_bias elif tag == "weight_perturb": # In neuron.py weight_perturb, add: print(f"Neuron {self.nid}: perturbing {len([w for i in self.order for w in self.inputs[i]['weights']])} weights") # MP wie im Buch: 1/sqrt(TotalWeightsIncludingBias) tot_w = sum(len(self.inputs[i]["weights"]) for i in self.order) + 1 mp = 1 / math.sqrt(tot_w) delta_mag = 2.0 * math.pi sat_lim = 2.0 * math.pi # Gewichte perturbieren for i in self.order: ws = self.inputs[i]["weights"] for j in range(len(ws)): if random.random() < mp: ws[j] = _sat(ws[j] + (random.random() - 0.5) * delta_mag, -sat_lim, sat_lim) # Bias perturbieren if random.random() < mp: self.bias = _sat(self.bias + (random.random() - 0.5) * delta_mag, -sat_lim, sat_lim) elif tag == "terminate": return def _sat(val, lo, hi): return lo if val < lo else (hi if val > hi else val)