Implement duplicate connection removal and enhance mutation methods in neural network
Some checks failed
Build Simulation and Test / Run All Tests (push) Failing after 30s

This commit is contained in:
Sam 2025-06-16 16:09:23 -05:00
parent 2a04e7917a
commit 9f0d6a6925
2 changed files with 240 additions and 82 deletions

View File

@ -142,7 +142,7 @@ class HUD:
BACKGROUND_COLOR = (30, 30, 30) # Dark gray background color
# Title positioning constants
TITLE_TOP_MARGIN = 30 # Distance above visualization for title
TITLE_TOP_MARGIN = 20 # Distance above visualization for title
# Neuron appearance constants
NEURON_RADIUS = 8 # Radius of neuron circles
@ -153,7 +153,7 @@ class HUD:
# Connection appearance constants
WEIGHT_NORMALIZATION_DIVISOR = 2 # Divisor for normalizing weights to [-1, 1] range
MAX_CONNECTION_THICKNESS = 3 # Maximum thickness for connection lines
MAX_CONNECTION_THICKNESS = 4 # Maximum thickness for connection lines
MIN_CONNECTION_THICKNESS = 1 # Minimum thickness for connection lines
# Connection colors (RGB values)
@ -291,25 +291,30 @@ class HUD:
if not source_pos:
continue
# Color based on weight: red for negative, green for positive
weight_normalized = max(ACTIVATION_CLAMP_MIN,
min(ACTIVATION_CLAMP_MAX, weight / WEIGHT_NORMALIZATION_DIVISOR))
if weight_normalized >= 0:
# Positive weight: interpolate from gray to green
intensity = int(weight_normalized * 255)
color = (max(0, CONNECTION_BASE_INTENSITY - intensity),
CONNECTION_BASE_INTENSITY + intensity // 2,
max(0, CONNECTION_BASE_INTENSITY - intensity))
# Get activation value of the source neuron
if source_layer < len(activations) and source_neuron < len(activations[source_layer]):
activation = activations[source_layer][source_neuron]
else:
# Negative weight: interpolate from gray to red
intensity = int(-weight_normalized * 255)
color = (CONNECTION_BASE_INTENSITY + intensity // 2,
max(0, CONNECTION_BASE_INTENSITY - intensity),
max(0, CONNECTION_BASE_INTENSITY - intensity))
activation = 0.0
# Line thickness based on weight magnitude
thickness = max(MIN_CONNECTION_THICKNESS, int(abs(weight_normalized) * MAX_CONNECTION_THICKNESS))
# Clamp activation to [-1, 1]
activation = max(ACTIVATION_CLAMP_MIN, min(ACTIVATION_CLAMP_MAX, activation))
# Color: interpolate from red (-1) to yellow (0) to green (+1)
if activation <= 0:
# Red to yellow
r = 255
g = int(255 * (activation + 1))
b = 0
else:
# Yellow to green
r = int(255 * (1 - activation))
g = 255
b = 0
color = (r, g, b)
# Thickness: proportional to abs(weight)
thickness = max(MIN_CONNECTION_THICKNESS, int(abs(weight) * MAX_CONNECTION_THICKNESS))
pygame.draw.line(screen, color, source_pos, target_pos, thickness)

View File

@ -44,6 +44,37 @@ class FlexibleNeuralNetwork:
self.layers = [input_layer, output_layer]
def _remove_duplicate_connections(self):
"""Remove duplicate connections and keep only the last weight for each unique connection."""
for layer in self.layers[1:]: # Skip input layer
for neuron in layer:
if 'connections' not in neuron:
continue
# Use a dictionary to track unique connections by (source_layer, source_neuron)
unique_connections = {}
for source_layer, source_neuron, weight in neuron['connections']:
connection_key = (source_layer, source_neuron)
# Keep the last weight encountered for this connection
unique_connections[connection_key] = weight
# Rebuild connections list without duplicates
neuron['connections'] = [
(source_layer, source_neuron, weight)
for (source_layer, source_neuron), weight in unique_connections.items()
]
def _connection_exists(self, target_neuron, source_layer_idx, source_neuron_idx):
"""Check if a connection already exists between two neurons."""
if 'connections' not in target_neuron:
return False
for source_layer, source_neuron, weight in target_neuron['connections']:
if source_layer == source_layer_idx and source_neuron == source_neuron_idx:
return True
return False
def forward(self, inputs):
"""
Forward pass through the network.
@ -85,26 +116,33 @@ class FlexibleNeuralNetwork:
"""
Create a mutated copy of this network.
:param mutation_rate: Probability of each type of mutation
:param mutation_rate: Base probability multiplied by specific mutation weights
:return: New mutated FlexibleNeuralNetwork instance
"""
mutated = deepcopy(self)
# Different types of mutations
# Weighted mutations (probability = mutation_rate * weight)
# Higher weights = more likely to occur
mutations = [
mutated._mutate_weights,
mutated._mutate_biases,
mutated._add_connection,
mutated._remove_connection,
mutated._add_neuron,
mutated._remove_neuron
(mutated._mutate_weights, 5.0), # Most common - fine-tune existing
(mutated._mutate_biases, 3.0), # Common - adjust neuron thresholds
(mutated._add_connection, 1.5), # Moderate - grow connectivity
(mutated._remove_connection, 0.8), # Less common - reduce connectivity
(mutated._add_neuron, 0.3), # Rare - structural growth
(mutated._remove_neuron, 0.1) # Very rare - structural reduction
]
# Apply random mutations
for mutation_func in mutations:
if random.random() < mutation_rate:
# Apply weighted random mutations
for mutation_func, weight in mutations:
if random.random() < (mutation_rate * weight):
mutation_func()
# Clean up any duplicate connections that might have been created
mutated._remove_duplicate_connections()
# Ensure the network maintains basic connectivity
mutated._ensure_network_connectivity()
return mutated
def _mutate_weights(self):
@ -131,25 +169,39 @@ class FlexibleNeuralNetwork:
if len(self.layers) < 2:
return
# Find layers with neurons
valid_target_layers = []
for i in range(1, len(self.layers)):
if len(self.layers[i]) > 0:
valid_target_layers.append(i)
if not valid_target_layers:
return
# Pick a random target neuron (not in input layer)
target_layer_idx = random.randint(1, len(self.layers) - 1)
target_layer_idx = random.choice(valid_target_layers)
target_neuron_idx = random.randint(0, len(self.layers[target_layer_idx]) - 1)
target_neuron = self.layers[target_layer_idx][target_neuron_idx]
if 'connections' not in target_neuron:
return
# Pick a random source (from any previous layer)
source_layer_idx = random.randint(0, target_layer_idx - 1)
if len(self.layers[source_layer_idx]) == 0:
# Find valid source layers (must have neurons and be before target)
valid_source_layers = []
for i in range(target_layer_idx):
if len(self.layers[i]) > 0:
valid_source_layers.append(i)
if not valid_source_layers:
return
# Pick a random source (from any previous layer with neurons)
source_layer_idx = random.choice(valid_source_layers)
source_neuron_idx = random.randint(0, len(self.layers[source_layer_idx]) - 1)
# Check if connection already exists
for conn in target_neuron['connections']:
if conn[0] == source_layer_idx and conn[1] == source_neuron_idx:
return # Connection already exists
# Check if connection already exists using the helper method
if self._connection_exists(target_neuron, source_layer_idx, source_neuron_idx):
return # Connection already exists, don't add duplicate
# Add new connection
new_weight = random.uniform(-2, 2)
@ -165,7 +217,6 @@ class FlexibleNeuralNetwork:
def _add_neuron(self):
"""Add a new neuron to a random hidden layer or create a new hidden layer."""
if random.random() < 0.05: # 5% chance to add neuron
if len(self.layers) == 2: # Only input and output layers
# Create a new hidden layer
hidden_neuron = {
@ -175,22 +226,33 @@ class FlexibleNeuralNetwork:
'connections': []
}
# Connect to some input neurons
# Connect to some input neurons (avoid duplicates)
for i in range(self.input_size):
if random.random() < 0.7: # 70% chance to connect to each input
if not self._connection_exists(hidden_neuron, 0, i):
hidden_neuron['connections'].append((0, i, random.uniform(-2, 2)))
# Insert hidden layer
self.layers.insert(1, [hidden_neuron])
# Update output layer connections to potentially use new hidden neuron
for neuron in self.layers[-1]: # Output layer
for neuron in self.layers[-1]: # Output layer (now at index 2)
if random.random() < 0.5: # 50% chance to connect to new hidden neuron
if not self._connection_exists(neuron, 1, 0):
neuron['connections'].append((1, 0, random.uniform(-2, 2)))
else:
# Add neuron to existing hidden layer
hidden_layer_idx = random.randint(1, len(self.layers) - 2)
# Find hidden layers that exist
hidden_layer_indices = []
for i in range(1, len(self.layers) - 1):
if i < len(self.layers): # Safety check
hidden_layer_indices.append(i)
if not hidden_layer_indices:
return
hidden_layer_idx = random.choice(hidden_layer_indices)
new_neuron = {
'type': 'hidden',
'id': f'hidden_{random.randint(1000, 9999)}',
@ -198,20 +260,44 @@ class FlexibleNeuralNetwork:
'connections': []
}
# Connect to some neurons from previous layers
# Connect to some neurons from previous layers (avoid duplicates)
for layer_idx in range(hidden_layer_idx):
if len(self.layers[layer_idx]) > 0: # Only if layer has neurons
for neuron_idx in range(len(self.layers[layer_idx])):
if random.random() < 0.3: # 30% chance to connect
if not self._connection_exists(new_neuron, layer_idx, neuron_idx):
new_neuron['connections'].append((layer_idx, neuron_idx, random.uniform(-2, 2)))
self.layers[hidden_layer_idx].append(new_neuron)
# Update connections from later layers to potentially connect to this new neuron
new_neuron_idx = len(self.layers[hidden_layer_idx]) - 1
for later_layer_idx in range(hidden_layer_idx + 1, len(self.layers)):
if len(self.layers[later_layer_idx]) > 0: # Only if layer has neurons
for neuron in self.layers[later_layer_idx]:
if random.random() < 0.2: # 20% chance to connect to new neuron
if not self._connection_exists(neuron, hidden_layer_idx, new_neuron_idx):
neuron['connections'].append((hidden_layer_idx, new_neuron_idx, random.uniform(-2, 2)))
def _remove_neuron(self):
"""Remove a random neuron from hidden layers."""
if len(self.layers) > 2: # Has hidden layers
if len(self.layers) <= 2: # No hidden layers
return
# Find hidden layers that have neurons
valid_hidden_layers = []
for layer_idx in range(1, len(self.layers) - 1): # Only hidden layers
if len(self.layers[layer_idx]) > 0 and random.random() < 0.02: # 2% chance
if len(self.layers[layer_idx]) > 0:
valid_hidden_layers.append(layer_idx)
if not valid_hidden_layers:
return
# Pick a random hidden layer with neurons
layer_idx = random.choice(valid_hidden_layers)
neuron_idx = random.randint(0, len(self.layers[layer_idx]) - 1)
# Remove the neuron
self.layers[layer_idx].pop(neuron_idx)
# Remove connections to this neuron from later layers
@ -223,7 +309,74 @@ class FlexibleNeuralNetwork:
for src_layer, src_neuron, weight in neuron['connections']
if not (src_layer == layer_idx and src_neuron == neuron_idx)
]
# Adjust neuron indices for remaining neurons in the same layer
for later_layer_idx in range(layer_idx + 1, len(self.layers)):
for neuron in self.layers[later_layer_idx]:
if 'connections' in neuron:
adjusted_connections = []
for src_layer, src_neuron, weight in neuron['connections']:
if src_layer == layer_idx and src_neuron > neuron_idx:
# Adjust index down by 1 since we removed a neuron
adjusted_connections.append((src_layer, src_neuron - 1, weight))
else:
adjusted_connections.append((src_layer, src_neuron, weight))
neuron['connections'] = adjusted_connections
# Remove empty hidden layers to keep network clean
if len(self.layers[layer_idx]) == 0:
self.layers.pop(layer_idx)
# Adjust all layer indices in connections that reference layers after the removed one
for layer in self.layers:
for neuron in layer:
if 'connections' in neuron:
adjusted_connections = []
for src_layer, src_neuron, weight in neuron['connections']:
if src_layer > layer_idx:
adjusted_connections.append((src_layer - 1, src_neuron, weight))
else:
adjusted_connections.append((src_layer, src_neuron, weight))
neuron['connections'] = adjusted_connections
def _ensure_network_connectivity(self):
"""Ensure the network maintains basic connectivity from inputs to outputs."""
# Check if output neurons have any connections
output_layer = self.layers[-1]
for i, output_neuron in enumerate(output_layer):
if 'connections' not in output_neuron or len(output_neuron['connections']) == 0:
# Output neuron has no connections - reconnect to input layer
for j in range(self.input_size):
if not self._connection_exists(output_neuron, 0, j):
output_neuron['connections'].append((0, j, random.uniform(-2, 2)))
break # Add at least one connection
# Ensure at least one path exists from input to output
if len(self.layers) > 2: # Has hidden layers
# Check if any hidden neurons are connected to inputs
has_input_connection = False
for layer_idx in range(1, len(self.layers) - 1): # Hidden layers
for neuron in self.layers[layer_idx]:
if 'connections' in neuron:
for src_layer, src_neuron, weight in neuron['connections']:
if src_layer == 0: # Connected to input
has_input_connection = True
break
if has_input_connection:
break
if has_input_connection:
break
# If no hidden neuron connects to input, create one
if not has_input_connection and len(self.layers) > 2:
first_hidden_layer = self.layers[1]
if len(first_hidden_layer) > 0:
first_neuron = first_hidden_layer[0]
if 'connections' in first_neuron:
# Add connection to first input
if not self._connection_exists(first_neuron, 0, 0):
first_neuron['connections'].append((0, 0, random.uniform(-2, 2)))
def get_structure_info(self):
"""Return information about the network structure."""