diff --git a/ui/hud.py b/ui/hud.py index ef7dff7..f7b73d1 100644 --- a/ui/hud.py +++ b/ui/hud.py @@ -378,7 +378,8 @@ class HUD: info_lines = [ f"Layers: {info['total_layers']}", f"Neurons: {info['total_neurons']}", - f"Connections: {info['total_connections']}" + f"Connections: {info['total_connections']}", + f"Network Cost: {info['network_cost']}", ] for i, line in enumerate(info_lines): diff --git a/world/base/brain.py b/world/base/brain.py index fadf7fb..782571f 100644 --- a/world/base/brain.py +++ b/world/base/brain.py @@ -21,6 +21,8 @@ class FlexibleNeuralNetwork: # Initialize with just input and output layers (no hidden layers) self._initialize_basic_network() + self.network_cost = self.calculate_network_cost() + def _initialize_basic_network(self): """Initialize a basic network with input->output connections only.""" # Input layer (no actual neurons, just placeholders) @@ -144,6 +146,8 @@ class FlexibleNeuralNetwork: # Ensure the network maintains basic connectivity mutated._ensure_network_connectivity() + mutated.network_cost = mutated.calculate_network_cost() + return mutated def _mutate_weights(self): @@ -411,7 +415,8 @@ class FlexibleNeuralNetwork: 'total_layers': len(self.layers), 'layer_sizes': [len(layer) for layer in self.layers], 'total_connections': 0, - 'total_neurons': sum(len(layer) for layer in self.layers) + 'total_neurons': sum(len(layer) for layer in self.layers), + 'network_cost': self.network_cost } for layer in self.layers[1:]: @@ -421,6 +426,21 @@ class FlexibleNeuralNetwork: return info + def calculate_network_cost(self): + """ + Estimate the computational cost of the network. + Cost is defined as the total number of connections plus the number of neurons + (i.e., total multiply-accumulate operations and activations per forward pass). + """ + total_connections = 0 + total_neurons = 0 + for layer in self.layers[1:]: # Skip input layer (no computation) + for neuron in layer: + total_neurons += 1 + if 'connections' in neuron: + total_connections += len(neuron['connections']) + return total_connections + total_neurons + class CellBrain(BehavioralModel): """ diff --git a/world/objects.py b/world/objects.py index b061af0..1c8e2e1 100644 --- a/world/objects.py +++ b/world/objects.py @@ -268,7 +268,7 @@ class DefaultCell(BaseEntity): self.behavioral_model = behavioral_model - def tick(self, interactable: Optional[List[BaseEntity]] = None) -> "DefaultCell": + def tick(self, interactable: Optional[List[BaseEntity]] = None) -> Union["DefaultCell", List["DefaultCell"]]: """ Updates the cell according to its behavioral model. @@ -276,17 +276,11 @@ class DefaultCell(BaseEntity): :return: Self. """ - if self.energy == 0: + if self.energy <= 0: # too hungry lmao self.flag_for_death() return self - self.energy -= 1 - - if self.tick_count > 2000: - # too old lmao - self.flag_for_death() - if interactable is None: interactable = [] @@ -303,25 +297,25 @@ class DefaultCell(BaseEntity): distance_to_food = get_distance_between_objects(self, food_object) if distance_to_food < self.max_visual_width and food_objects: - self.energy += 100 + self.energy += 110 food_object.flag_for_death() return self - if self.energy >= 1500: + if self.energy >= 1600: # too much energy, split duplicate_x, duplicate_y = self.position.get_position() - duplicate_x += random.randint(-self.interaction_radius, self.interaction_radius) - duplicate_y += random.randint(-self.interaction_radius, self.interaction_radius) + duplicate_x += random.randint(-self.max_visual_width, self.max_visual_width) + duplicate_y += random.randint(-self.max_visual_width, self.max_visual_width) duplicate_x_2, duplicate_y_2 = self.position.get_position() - duplicate_x_2 += random.randint(-self.interaction_radius, self.interaction_radius) - duplicate_y_2 += random.randint(-self.interaction_radius, self.interaction_radius) + duplicate_x_2 += random.randint(-self.max_visual_width, self.max_visual_width) + duplicate_y_2 += random.randint(-self.max_visual_width, self.max_visual_width) new_cell = DefaultCell(Position(x=int(duplicate_x), y=int(duplicate_y)), Rotation(angle=random.randint(0, 359))) - new_cell.set_brain(self.behavioral_model.mutate(1)) + new_cell.set_brain(self.behavioral_model.mutate(0.4)) new_cell_2 = DefaultCell(Position(x=int(duplicate_x_2), y=int(duplicate_y_2)), Rotation(angle=random.randint(0, 359))) - new_cell_2.set_brain(self.behavioral_model.mutate(1)) + new_cell_2.set_brain(self.behavioral_model.mutate(0.4)) return [new_cell, new_cell_2] @@ -388,6 +382,10 @@ class DefaultCell(BaseEntity): # tick rotational velocity self.rotation.set_rotation(self.rotation.get_rotation() + self.rotational_velocity) + movement_cost = abs(output_data["angular_acceleration"]) + abs(output_data["linear_acceleration"]) + + self.energy -= (self.behavioral_model.neural_network.network_cost * 0.01) + 1 + (0.2 * movement_cost) + return self @staticmethod