general tweaks
Some checks failed
Build Simulation and Test / Run All Tests (push) Failing after 36s
Some checks failed
Build Simulation and Test / Run All Tests (push) Failing after 36s
This commit is contained in:
parent
848356a76e
commit
bada025b33
@ -44,7 +44,7 @@ SELECTION_THRESHOLD = 3 # pixels
|
||||
|
||||
# Simulation settings
|
||||
FOOD_SPAWNING = True
|
||||
FOOD_OBJECTS_COUNT = 100
|
||||
FOOD_OBJECTS_COUNT = 500
|
||||
RANDOM_SEED = 0
|
||||
|
||||
# Vector visualization settings
|
||||
@ -56,10 +56,10 @@ ANGULAR_TIP_SIZE = 2.5
|
||||
DIRECTION_TIP_SIZE = 3
|
||||
|
||||
# Cell physics settings
|
||||
MAX_ACCELERATION = 0.1
|
||||
MAX_ACCELERATION = 0.125
|
||||
MAX_ANGULAR_ACCELERATION = 0.25
|
||||
MAX_VELOCITY = 0.5
|
||||
MAX_ROTATIONAL_VELOCITY = 3
|
||||
MAX_VELOCITY = 1
|
||||
MAX_ROTATIONAL_VELOCITY = 3 # degrees per tick
|
||||
|
||||
KEYMAP_LEGEND = [
|
||||
("WASD", "Move camera"),
|
||||
|
||||
@ -76,7 +76,7 @@ class InputHandler:
|
||||
elif event.key == pygame.K_r:
|
||||
self.camera.reset_position()
|
||||
elif event.key == pygame.K_RSHIFT:
|
||||
self.sprint_mode = True # Enter sprint mode
|
||||
self.sprint_mode = not self.sprint_mode # Enter sprint mode
|
||||
|
||||
return running
|
||||
|
||||
@ -84,8 +84,8 @@ class InputHandler:
|
||||
"""Handle keyup events."""
|
||||
if event.key == pygame.K_LSHIFT:
|
||||
self.tps = self.default_tps
|
||||
if event.key == pygame.K_RSHIFT:
|
||||
self.sprint_mode = False # Exit sprint mode
|
||||
# if event.key == pygame.K_RSHIFT:
|
||||
# self.sprint_mode = False # Exit sprint mode
|
||||
|
||||
def _handle_mouse_down(self, event):
|
||||
"""Handle mouse button down events."""
|
||||
|
||||
@ -3,6 +3,7 @@ import time
|
||||
import random
|
||||
import sys
|
||||
|
||||
from world.base.brain import CellBrain, FlexibleNeuralNetwork
|
||||
from world.world import World, Position, Rotation
|
||||
from world.objects import FoodObject, DefaultCell
|
||||
from world.simulation_interface import Camera
|
||||
@ -38,14 +39,21 @@ class SimulationEngine:
|
||||
world = World(CELL_SIZE, (CELL_SIZE * GRID_WIDTH, CELL_SIZE * GRID_HEIGHT))
|
||||
random.seed(RANDOM_SEED)
|
||||
|
||||
half_width = GRID_WIDTH * CELL_SIZE // 2
|
||||
half_height = GRID_HEIGHT * CELL_SIZE // 2
|
||||
|
||||
if FOOD_SPAWNING:
|
||||
for _ in range(FOOD_OBJECTS_COUNT):
|
||||
x = random.randint(-100, 100)
|
||||
y = random.randint(-100, 100)
|
||||
x = random.randint(-half_width, half_width)
|
||||
y = random.randint(-half_height, half_height)
|
||||
world.add_object(FoodObject(Position(x=x, y=y)))
|
||||
|
||||
for _ in range(20):
|
||||
world.add_object(DefaultCell(Position(x=random.randint(-100, 100), y=random.randint(-100, 100)), Rotation(angle=0)))
|
||||
for _ in range(300):
|
||||
new_cell = DefaultCell(Position(x=random.randint(-half_width, half_width), y=random.randint(-half_height, half_height)), Rotation(angle=0))
|
||||
|
||||
new_cell.behavioral_model = new_cell.behavioral_model.mutate(3)
|
||||
|
||||
world.add_object(new_cell)
|
||||
|
||||
return world
|
||||
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
import numpy as np
|
||||
import random
|
||||
from copy import deepcopy
|
||||
|
||||
from config.constants import MAX_VELOCITY, MAX_ROTATIONAL_VELOCITY
|
||||
from world.behavioral import BehavioralModel
|
||||
|
||||
|
||||
@ -10,7 +12,7 @@ class FlexibleNeuralNetwork:
|
||||
Supports variable topology with cross-layer connections.
|
||||
"""
|
||||
|
||||
def __init__(self, input_size=2, output_size=2):
|
||||
def __init__(self, input_size=2, output_size=2, empty_start=True):
|
||||
self.input_size = input_size
|
||||
self.output_size = output_size
|
||||
|
||||
@ -18,8 +20,11 @@ class FlexibleNeuralNetwork:
|
||||
# Each neuron is represented by its connections and bias
|
||||
self.layers = []
|
||||
|
||||
# Initialize with just input and output layers (no hidden layers)
|
||||
self._initialize_basic_network()
|
||||
# Initialize network based on empty_start parameter
|
||||
if empty_start:
|
||||
self._initialize_empty_network()
|
||||
else:
|
||||
self._initialize_basic_network()
|
||||
|
||||
self.network_cost = self.calculate_network_cost()
|
||||
|
||||
@ -46,6 +51,24 @@ class FlexibleNeuralNetwork:
|
||||
|
||||
self.layers = [input_layer, output_layer]
|
||||
|
||||
def _initialize_empty_network(self):
|
||||
"""Initialize an empty network with no connections or biases."""
|
||||
# Input layer (no actual neurons, just placeholders)
|
||||
input_layer = [{'type': 'input', 'id': i} for i in range(self.input_size)]
|
||||
|
||||
# Output layer with no connections and zero bias
|
||||
output_layer = []
|
||||
for i in range(self.output_size):
|
||||
neuron = {
|
||||
'type': 'output',
|
||||
'id': f'out_{i}',
|
||||
'bias': 0.0,
|
||||
'connections': [] # Empty connections list
|
||||
}
|
||||
output_layer.append(neuron)
|
||||
|
||||
self.layers = [input_layer, output_layer]
|
||||
|
||||
def _remove_duplicate_connections(self):
|
||||
"""Remove duplicate connections and keep only the last weight for each unique connection."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
@ -99,14 +122,19 @@ class FlexibleNeuralNetwork:
|
||||
continue # Skip input neurons in hidden layers
|
||||
|
||||
# Calculate weighted sum of inputs
|
||||
weighted_sum = neuron['bias']
|
||||
weighted_sum = 0.0 # Start with 0 instead of bias
|
||||
|
||||
for source_layer, source_neuron, weight in neuron['connections']:
|
||||
if source_layer < len(activations):
|
||||
if source_neuron < len(activations[source_layer]):
|
||||
weighted_sum += activations[source_layer][source_neuron] * weight
|
||||
# Only add bias if neuron has connections
|
||||
if 'connections' in neuron and len(neuron['connections']) > 0:
|
||||
weighted_sum = neuron['bias']
|
||||
|
||||
for source_layer, source_neuron, weight in neuron['connections']:
|
||||
if source_layer < len(activations):
|
||||
if source_neuron < len(activations[source_layer]):
|
||||
weighted_sum += activations[source_layer][source_neuron] * weight
|
||||
|
||||
# Apply activation function (tanh for bounded output)
|
||||
# If no connections and no bias applied, this will be tanh(0) = 0
|
||||
activation = np.tanh(weighted_sum)
|
||||
layer_activations.append(activation)
|
||||
|
||||
@ -451,7 +479,7 @@ class CellBrain(BehavioralModel):
|
||||
super().__init__()
|
||||
|
||||
# Define input and output keys
|
||||
self.input_keys = ['distance', 'angle']
|
||||
self.input_keys = ['distance', 'angle', 'current_speed', 'current_angular_velocity']
|
||||
self.output_keys = ['linear_acceleration', 'angular_acceleration']
|
||||
|
||||
# Initialize inputs and outputs
|
||||
@ -461,7 +489,9 @@ class CellBrain(BehavioralModel):
|
||||
# Set input ranges for normalization
|
||||
default_ranges = {
|
||||
'distance': (0, 50),
|
||||
'angle': (-180, 180)
|
||||
'angle': (-180, 180),
|
||||
'current_speed': (-MAX_VELOCITY, MAX_VELOCITY),
|
||||
'current_angular_velocity': (-MAX_ROTATIONAL_VELOCITY, MAX_ROTATIONAL_VELOCITY)
|
||||
}
|
||||
self.input_ranges = input_ranges if input_ranges is not None else default_ranges
|
||||
|
||||
|
||||
@ -322,6 +322,8 @@ class DefaultCell(BaseEntity):
|
||||
input_data = {
|
||||
"distance": distance_to_food,
|
||||
"angle": angle_between_food,
|
||||
"current_speed": math.sqrt(self.velocity[0] ** 2 + self.velocity[1] ** 2),
|
||||
"current_angular_velocity": self.rotational_velocity,
|
||||
}
|
||||
|
||||
output_data = self.behavioral_model.tick(input_data)
|
||||
@ -384,7 +386,7 @@ class DefaultCell(BaseEntity):
|
||||
|
||||
movement_cost = abs(output_data["angular_acceleration"]) + abs(output_data["linear_acceleration"])
|
||||
|
||||
self.energy -= (self.behavioral_model.neural_network.network_cost * 0.01) + 1 + (0.2 * movement_cost)
|
||||
self.energy -= (self.behavioral_model.neural_network.network_cost * 0.01) + 1 + (0.5 * movement_cost)
|
||||
|
||||
return self
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user