Compare commits
2 Commits
bada025b33
...
3b64ef62e1
| Author | SHA1 | Date | |
|---|---|---|---|
| 3b64ef62e1 | |||
| d604641453 |
@ -8,6 +8,7 @@ dependencies = [
|
||||
"pre-commit>=4.2.0",
|
||||
"pydantic>=2.11.5",
|
||||
"pygame>=2.6.1",
|
||||
"pygame-gui>=0.6.14",
|
||||
"pytest>=8.3.5",
|
||||
]
|
||||
|
||||
|
||||
53
requirements.txt
Normal file
53
requirements.txt
Normal file
@ -0,0 +1,53 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile -o requirements.txt pyproject.toml
|
||||
annotated-types==0.7.0
|
||||
# via pydantic
|
||||
cfgv==3.4.0
|
||||
# via pre-commit
|
||||
distlib==0.3.9
|
||||
# via virtualenv
|
||||
filelock==3.18.0
|
||||
# via virtualenv
|
||||
identify==2.6.12
|
||||
# via pre-commit
|
||||
iniconfig==2.1.0
|
||||
# via pytest
|
||||
nodeenv==1.9.1
|
||||
# via pre-commit
|
||||
numpy==2.3.0
|
||||
# via dynamicsystemabstraction (pyproject.toml)
|
||||
packaging==25.0
|
||||
# via pytest
|
||||
platformdirs==4.3.8
|
||||
# via virtualenv
|
||||
pluggy==1.6.0
|
||||
# via pytest
|
||||
pre-commit==4.2.0
|
||||
# via dynamicsystemabstraction (pyproject.toml)
|
||||
pydantic==2.11.7
|
||||
# via dynamicsystemabstraction (pyproject.toml)
|
||||
pydantic-core==2.33.2
|
||||
# via pydantic
|
||||
pygame==2.6.1
|
||||
# via dynamicsystemabstraction (pyproject.toml)
|
||||
pygame-ce==2.5.5
|
||||
# via pygame-gui
|
||||
pygame-gui==0.6.14
|
||||
# via dynamicsystemabstraction (pyproject.toml)
|
||||
pygments==2.19.1
|
||||
# via pytest
|
||||
pytest==8.4.1
|
||||
# via dynamicsystemabstraction (pyproject.toml)
|
||||
python-i18n==0.3.9
|
||||
# via pygame-gui
|
||||
pyyaml==6.0.2
|
||||
# via pre-commit
|
||||
typing-extensions==4.14.0
|
||||
# via
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
# typing-inspection
|
||||
typing-inspection==0.4.1
|
||||
# via pydantic
|
||||
virtualenv==20.31.2
|
||||
# via pre-commit
|
||||
52
uv.lock
generated
52
uv.lock
generated
@ -47,6 +47,7 @@ dependencies = [
|
||||
{ name = "pre-commit" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pygame" },
|
||||
{ name = "pygame-gui" },
|
||||
{ name = "pytest" },
|
||||
]
|
||||
|
||||
@ -61,6 +62,7 @@ requires-dist = [
|
||||
{ name = "pre-commit", specifier = ">=4.2.0" },
|
||||
{ name = "pydantic", specifier = ">=2.11.5" },
|
||||
{ name = "pygame", specifier = ">=2.6.1" },
|
||||
{ name = "pygame-gui", specifier = ">=0.6.14" },
|
||||
{ name = "pytest", specifier = ">=8.3.5" },
|
||||
]
|
||||
|
||||
@ -313,6 +315,47 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/11/17f7f319ca91824b86557e9303e3b7a71991ef17fd45286bf47d7f0a38e6/pygame-2.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:813af4fba5d0b2cb8e58f5d95f7910295c34067dcc290d34f1be59c48bd1ea6a", size = 10620084, upload-time = "2024-09-29T11:48:51.587Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygame-ce"
|
||||
version = "2.5.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/be/af69521e694442dbde5db29069953f25367ddacaa50d9ae644745853d37c/pygame_ce-2.5.5.tar.gz", hash = "sha256:a7f297c223c6e35f16d65d47a19757005763ea7e90795ccc37c0bc562364ae6b", size = 5821935, upload-time = "2025-06-07T07:33:03.501Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/48/ff/65c13ed5a0ca7c186e054a416f74e1a73ab90e6fc62b8c133e07d1fe321b/pygame_ce-2.5.5-cp311-cp311-macosx_10_11_x86_64.whl", hash = "sha256:7697ca838c560e16a305de47a4ea3ad3152ebc7927b309f4251d5718b38db388", size = 12415712, upload-time = "2025-06-07T07:31:25.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/17/80c6c64373d7488e5c15f70245093146880574cbb962222f03d30c7bbc4f/pygame_ce-2.5.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:683d91ce985192106014b1cc887f2f6d257f4e2329d49b6942d2cd46f030cc8a", size = 11654443, upload-time = "2025-06-07T07:31:28.212Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/34/adf751a50a85f61f42e80cbad684f74fe0f26cb030f16c04b45392000248/pygame_ce-2.5.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:524fbda5e132abfbeda9c296d523bc3adbe9ad33498d04762ff9c7912818c3f2", size = 12304561, upload-time = "2025-06-07T07:31:30.744Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/19/1647079f2883454f0a0e532f8e76a784984a77a23bd1a9a6fbffe4746a9e/pygame_ce-2.5.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81ea332ee9c51ccc66cbe73124a13264c303ab842aca093c2f5f2282f55c168c", size = 12835077, upload-time = "2025-06-07T07:31:33.811Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/a6/5167c4ab343fe8ef62a360af1dacd585eb57ea42b62eb9339687e998bd39/pygame_ce-2.5.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d32e970b5f06a8710b098f178693456cc433170a7f94ef451e31d682d1a2f86", size = 12480565, upload-time = "2025-06-07T07:31:36.494Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/27/80e5d042c2be4ec2d1a41f42fd61ed8661cd780343511bd9a8e5edb34548/pygame_ce-2.5.5-cp311-cp311-win32.whl", hash = "sha256:8642add37c09cef881812bfb6e7f7771e7d666f870e5640e473841bf34fd771b", size = 9804082, upload-time = "2025-06-07T07:31:39.095Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/fd/dff4d8b02bfda1be6a251747520de5e012fe56355d4fb44d4cae70290efe/pygame_ce-2.5.5-cp311-cp311-win_amd64.whl", hash = "sha256:13d1951bff009b2b1d593785fd43a9be6d3d6de0b1b0c0d85ad7d6e967d31c99", size = 10377304, upload-time = "2025-06-07T07:31:41.739Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/2c/45729f86bd7d1b67062be9c44618d7a4269a3892b35d9677e09f3409e91e/pygame_ce-2.5.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cb6801f224a806f883e5de558d3424a53e714abec7d6a47d32fc6b9cfa92e598", size = 12409360, upload-time = "2025-06-07T07:31:44.103Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/28/96d776b0c8efe9939c05ac15ea317b687cec6684ac9808d1c771029e308c/pygame_ce-2.5.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adddaf63af2f5b5d0a3840aa73021b71766fa16c1ed3fc18756b54e90b66592f", size = 11660289, upload-time = "2025-06-07T07:31:46.725Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/01/48bad8a2431507efc3b357816cbbd4815bba063f09becd54f8d9c5b04b20/pygame_ce-2.5.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1602ed3dd274e36d5c6008ff2c500223bea38aab0d4fa4a8eae0a0e5897bcb88", size = 12290133, upload-time = "2025-06-07T07:31:49.188Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/5d/0edb786e614bd8d5e2d34a864f0d0ac1ed6b035d7eea04976dbd35bb7e3f/pygame_ce-2.5.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb42884c847d54a40c98cf631ca8014406eaf8bbfeddde298269193c533a61eb", size = 12823259, upload-time = "2025-06-07T07:31:51.489Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/72/3ae43a335e80f55f3243d674cf47becf17e2a4c565d745b961c50aeec7a1/pygame_ce-2.5.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13dec639f227dd1092b161e696769f8b6dfbeb92e4e4fabc729dc689a4c63997", size = 12467026, upload-time = "2025-06-07T07:31:53.792Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/30/71e761df3d3567b59f7f73b938c682aaf2e6406d248d871ae5dd569d94e1/pygame_ce-2.5.5-cp312-cp312-win32.whl", hash = "sha256:356ba09a684a92b04330d3d6bb3136066a170aea15f300b078edda347b719fb2", size = 9802023, upload-time = "2025-06-07T07:31:55.983Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/77/adca7896a89d2a30d974bd337ac8adfa14e031f52319c5bfa379be092ade/pygame_ce-2.5.5-cp312-cp312-win_amd64.whl", hash = "sha256:7863dbaf5c33bbba878b6e5aa1b11592048a96b383b11dac15a0837a14c30b62", size = 10378199, upload-time = "2025-06-07T07:31:58.046Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/d6/b72511c35d3c3ed07072cee07a1dea34e950375a225d181276c4b2316d3a/pygame_ce-2.5.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:32008bef532318c7f8d97fb087a16b44c0d5140969963b6afbf0f9521d18f3dc", size = 12403252, upload-time = "2025-06-07T07:32:00.325Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/0e/23b4ea53172d8841cfaee5b5274581b2b7fa96426e6029dda0e37b369f86/pygame_ce-2.5.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:00004956921641d9dc7e628be77ea7f23333b839e48ec981704b6516537bd67f", size = 11654505, upload-time = "2025-06-07T07:32:02.91Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/95/a58928af657fbba391d1691ce66f2afede3e8c65cb021b42e4488f6fd490/pygame_ce-2.5.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:255efe334dc69a78c3b7986b9c893ef86cead5aaf8c61cb6990949b4fca84142", size = 12286043, upload-time = "2025-06-07T07:32:05.147Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/61/499b2d078c62c9b91a6ccd2e63805ac884e1715d307d083e54bbbd75a24d/pygame_ce-2.5.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4b90f01d3ae8ae0f84361480531323bbd06e10be75f4e67478b999b43f8a1f9", size = 12821547, upload-time = "2025-06-07T07:32:07.347Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/19/7f9a37b7ff55dc34a8f727b86b89800a1fdb4b1436e601dea66f89764472/pygame_ce-2.5.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e37bfd4545fb057ce24d06e13d1262989ca0ece3b12010c585130607e3c2bbf8", size = 12463330, upload-time = "2025-06-07T07:32:10.136Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/f5/5ad50c34f042bbc135312cd75d86d156bf18f54b72ae8947498acbda8cbd/pygame_ce-2.5.5-cp313-cp313-win32.whl", hash = "sha256:476a1b56b19f5023ddd0512716f11c413c3587b93dfd4aebd40869f261d3b8b7", size = 9799763, upload-time = "2025-06-07T07:32:12.332Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/88/89cfcaf55c8ccab5a2d59f206bf7b7d4336c4b27d9b63531a0e274cac817/pygame_ce-2.5.5-cp313-cp313-win_amd64.whl", hash = "sha256:8568fab6d43e23ca209fb860f7d387f2f89bd4047a4fa617ed0951fd9739109c", size = 10376053, upload-time = "2025-06-07T07:32:14.526Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygame-gui"
|
||||
version = "0.6.14"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pygame-ce" },
|
||||
{ name = "python-i18n" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/29/bf/d2589b06e39a6588480e6313ab530a6073a4ac6734d0f48a1883a5c46236/pygame_gui-0.6.14-py2.py3-none-any.whl", hash = "sha256:e351e88fab01756af6338d071c3cf6ce832a90c3b9f7db4fcb7b5216d5634482", size = 30896869, upload-time = "2025-05-30T18:25:53.938Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.5"
|
||||
@ -328,6 +371,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-i18n"
|
||||
version = "0.3.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fe/32/d9ba976458c9503ec22db4eb677a5d919edaecd73d893effeaa92a67b84b/python-i18n-0.3.9.tar.gz", hash = "sha256:df97f3d2364bf3a7ebfbd6cbefe8e45483468e52a9e30b909c6078f5f471e4e8", size = 11778, upload-time = "2020-08-26T14:31:27.512Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/73/9a0b2974dd9a3d50788d235f10c4d73c2efcd22926036309645fc2f0db0c/python_i18n-0.3.9-py3-none-any.whl", hash = "sha256:bda5b8d889ebd51973e22e53746417bd32783c9bd6780fd27cadbb733915651d", size = 13750, upload-time = "2020-08-26T14:31:26.266Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0.2"
|
||||
|
||||
@ -1,475 +1,8 @@
|
||||
import numpy as np
|
||||
import random
|
||||
from copy import deepcopy
|
||||
|
||||
from world.base.neural import FlexibleNeuralNetwork
|
||||
from config.constants import MAX_VELOCITY, MAX_ROTATIONAL_VELOCITY
|
||||
from world.behavioral import BehavioralModel
|
||||
|
||||
|
||||
class FlexibleNeuralNetwork:
|
||||
"""
|
||||
A flexible neural network that can mutate its structure and weights.
|
||||
Supports variable topology with cross-layer connections.
|
||||
"""
|
||||
|
||||
def __init__(self, input_size=2, output_size=2, empty_start=True):
|
||||
self.input_size = input_size
|
||||
self.output_size = output_size
|
||||
|
||||
# Network structure: list of layers, each layer is a list of neurons
|
||||
# Each neuron is represented by its connections and bias
|
||||
self.layers = []
|
||||
|
||||
# Initialize network based on empty_start parameter
|
||||
if empty_start:
|
||||
self._initialize_empty_network()
|
||||
else:
|
||||
self._initialize_basic_network()
|
||||
|
||||
self.network_cost = self.calculate_network_cost()
|
||||
|
||||
def _initialize_basic_network(self):
|
||||
"""Initialize a basic network with input->output connections only."""
|
||||
# Input layer (no actual neurons, just placeholders)
|
||||
input_layer = [{'type': 'input', 'id': i} for i in range(self.input_size)]
|
||||
|
||||
# Output layer with connections to all inputs
|
||||
output_layer = []
|
||||
for i in range(self.output_size):
|
||||
neuron = {
|
||||
'type': 'output',
|
||||
'id': f'out_{i}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': [] # List of (source_layer, source_neuron, weight)
|
||||
}
|
||||
|
||||
# Connect to all input neurons
|
||||
for j in range(self.input_size):
|
||||
neuron['connections'].append((0, j, random.uniform(-2, 2)))
|
||||
|
||||
output_layer.append(neuron)
|
||||
|
||||
self.layers = [input_layer, output_layer]
|
||||
|
||||
def _initialize_empty_network(self):
|
||||
"""Initialize an empty network with no connections or biases."""
|
||||
# Input layer (no actual neurons, just placeholders)
|
||||
input_layer = [{'type': 'input', 'id': i} for i in range(self.input_size)]
|
||||
|
||||
# Output layer with no connections and zero bias
|
||||
output_layer = []
|
||||
for i in range(self.output_size):
|
||||
neuron = {
|
||||
'type': 'output',
|
||||
'id': f'out_{i}',
|
||||
'bias': 0.0,
|
||||
'connections': [] # Empty connections list
|
||||
}
|
||||
output_layer.append(neuron)
|
||||
|
||||
self.layers = [input_layer, output_layer]
|
||||
|
||||
def _remove_duplicate_connections(self):
|
||||
"""Remove duplicate connections and keep only the last weight for each unique connection."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
for neuron in layer:
|
||||
if 'connections' not in neuron:
|
||||
continue
|
||||
|
||||
# Use a dictionary to track unique connections by (source_layer, source_neuron)
|
||||
unique_connections = {}
|
||||
|
||||
for source_layer, source_neuron, weight in neuron['connections']:
|
||||
connection_key = (source_layer, source_neuron)
|
||||
# Keep the last weight encountered for this connection
|
||||
unique_connections[connection_key] = weight
|
||||
|
||||
# Rebuild connections list without duplicates
|
||||
neuron['connections'] = [
|
||||
(source_layer, source_neuron, weight)
|
||||
for (source_layer, source_neuron), weight in unique_connections.items()
|
||||
]
|
||||
|
||||
def _connection_exists(self, target_neuron, source_layer_idx, source_neuron_idx):
|
||||
"""Check if a connection already exists between two neurons."""
|
||||
if 'connections' not in target_neuron:
|
||||
return False
|
||||
|
||||
for source_layer, source_neuron, weight in target_neuron['connections']:
|
||||
if source_layer == source_layer_idx and source_neuron == source_neuron_idx:
|
||||
return True
|
||||
return False
|
||||
|
||||
def forward(self, inputs):
|
||||
"""
|
||||
Forward pass through the network.
|
||||
|
||||
:param inputs: List or array of input values
|
||||
:return: List of output values
|
||||
"""
|
||||
if len(inputs) != self.input_size:
|
||||
raise ValueError(f"Expected {self.input_size} inputs, got {len(inputs)}")
|
||||
|
||||
# Store activations for each layer
|
||||
activations = [inputs] # Input layer activations
|
||||
|
||||
# Process each subsequent layer
|
||||
for layer_idx in range(1, len(self.layers)):
|
||||
layer_activations = []
|
||||
|
||||
for neuron in self.layers[layer_idx]:
|
||||
if neuron['type'] == 'input':
|
||||
continue # Skip input neurons in hidden layers
|
||||
|
||||
# Calculate weighted sum of inputs
|
||||
weighted_sum = 0.0 # Start with 0 instead of bias
|
||||
|
||||
# Only add bias if neuron has connections
|
||||
if 'connections' in neuron and len(neuron['connections']) > 0:
|
||||
weighted_sum = neuron['bias']
|
||||
|
||||
for source_layer, source_neuron, weight in neuron['connections']:
|
||||
if source_layer < len(activations):
|
||||
if source_neuron < len(activations[source_layer]):
|
||||
weighted_sum += activations[source_layer][source_neuron] * weight
|
||||
|
||||
# Apply activation function (tanh for bounded output)
|
||||
# If no connections and no bias applied, this will be tanh(0) = 0
|
||||
activation = np.tanh(weighted_sum)
|
||||
layer_activations.append(activation)
|
||||
|
||||
activations.append(layer_activations)
|
||||
|
||||
return activations[-1] # Return output layer activations
|
||||
|
||||
def mutate(self, mutation_rate=0.1):
|
||||
"""
|
||||
Create a mutated copy of this network.
|
||||
|
||||
:param mutation_rate: Base probability multiplied by specific mutation weights
|
||||
:return: New mutated FlexibleNeuralNetwork instance
|
||||
"""
|
||||
mutated = deepcopy(self)
|
||||
|
||||
# Weighted mutations (probability = mutation_rate * weight)
|
||||
# Higher weights = more likely to occur
|
||||
mutations = [
|
||||
(mutated._mutate_weights, 5.0), # Most common - fine-tune existing
|
||||
(mutated._mutate_biases, 3.0), # Common - adjust neuron thresholds
|
||||
(mutated._add_connection, 1.5), # Moderate - grow connectivity
|
||||
(mutated._remove_connection, 0.8), # Less common - reduce connectivity
|
||||
(mutated._add_neuron, 0.3), # Rare - structural growth
|
||||
(mutated._remove_neuron, 0.1), # Very rare - structural reduction
|
||||
(mutated._add_layer, 0.05), # New: create a new layer (very rare)
|
||||
]
|
||||
|
||||
# Apply weighted random mutations
|
||||
for mutation_func, weight in mutations:
|
||||
if random.random() < (mutation_rate * weight):
|
||||
mutation_func()
|
||||
|
||||
# Clean up any duplicate connections that might have been created
|
||||
mutated._remove_duplicate_connections()
|
||||
|
||||
# Ensure the network maintains basic connectivity
|
||||
mutated._ensure_network_connectivity()
|
||||
|
||||
mutated.network_cost = mutated.calculate_network_cost()
|
||||
|
||||
return mutated
|
||||
|
||||
def _mutate_weights(self):
|
||||
"""Slightly modify existing connection weights."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron:
|
||||
for i in range(len(neuron['connections'])):
|
||||
if random.random() < 0.3: # 30% chance to mutate each weight
|
||||
source_layer, source_neuron, weight = neuron['connections'][i]
|
||||
# Add small random change
|
||||
new_weight = weight + random.uniform(-0.5, 0.5)
|
||||
neuron['connections'][i] = (source_layer, source_neuron, new_weight)
|
||||
|
||||
def _mutate_biases(self):
|
||||
"""Slightly modify neuron biases."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
for neuron in layer:
|
||||
if 'bias' in neuron and random.random() < 0.3:
|
||||
neuron['bias'] += random.uniform(-0.5, 0.5)
|
||||
|
||||
def _add_connection(self):
|
||||
"""Add a new random connection."""
|
||||
if len(self.layers) < 2:
|
||||
return
|
||||
|
||||
# Find layers with neurons
|
||||
valid_target_layers = []
|
||||
for i in range(1, len(self.layers)):
|
||||
if len(self.layers[i]) > 0:
|
||||
valid_target_layers.append(i)
|
||||
|
||||
if not valid_target_layers:
|
||||
return
|
||||
|
||||
# Pick a random target neuron (not in input layer)
|
||||
target_layer_idx = random.choice(valid_target_layers)
|
||||
target_neuron_idx = random.randint(0, len(self.layers[target_layer_idx]) - 1)
|
||||
target_neuron = self.layers[target_layer_idx][target_neuron_idx]
|
||||
|
||||
if 'connections' not in target_neuron:
|
||||
return
|
||||
|
||||
# Find valid source layers (must have neurons and be before target)
|
||||
valid_source_layers = []
|
||||
for i in range(target_layer_idx):
|
||||
if len(self.layers[i]) > 0:
|
||||
valid_source_layers.append(i)
|
||||
|
||||
if not valid_source_layers:
|
||||
return
|
||||
|
||||
# Pick a random source (from any previous layer with neurons)
|
||||
source_layer_idx = random.choice(valid_source_layers)
|
||||
source_neuron_idx = random.randint(0, len(self.layers[source_layer_idx]) - 1)
|
||||
|
||||
# Check if connection already exists using the helper method
|
||||
if self._connection_exists(target_neuron, source_layer_idx, source_neuron_idx):
|
||||
return # Connection already exists, don't add duplicate
|
||||
|
||||
# Add new connection
|
||||
new_weight = random.uniform(-2, 2)
|
||||
target_neuron['connections'].append((source_layer_idx, source_neuron_idx, new_weight))
|
||||
|
||||
def _remove_connection(self):
|
||||
"""Remove a random connection."""
|
||||
for layer in self.layers[1:]:
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron and len(neuron['connections']) > 1:
|
||||
if random.random() < 0.1: # 10% chance to remove a connection
|
||||
neuron['connections'].pop(random.randint(0, len(neuron['connections']) - 1))
|
||||
|
||||
def _add_neuron(self):
|
||||
"""Add a new neuron to a random hidden layer or create a new hidden layer."""
|
||||
if len(self.layers) == 2: # Only input and output layers
|
||||
# Create a new hidden layer
|
||||
hidden_neuron = {
|
||||
'type': 'hidden',
|
||||
'id': f'hidden_{random.randint(1000, 9999)}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': []
|
||||
}
|
||||
|
||||
# Connect to some input neurons (avoid duplicates)
|
||||
for i in range(self.input_size):
|
||||
if random.random() < 0.7: # 70% chance to connect to each input
|
||||
if not self._connection_exists(hidden_neuron, 0, i):
|
||||
hidden_neuron['connections'].append((0, i, random.uniform(-2, 2)))
|
||||
|
||||
# Insert hidden layer
|
||||
self.layers.insert(1, [hidden_neuron])
|
||||
|
||||
# Update output layer connections to potentially use new hidden neuron
|
||||
for neuron in self.layers[-1]: # Output layer (now at index 2)
|
||||
if random.random() < 0.5: # 50% chance to connect to new hidden neuron
|
||||
if not self._connection_exists(neuron, 1, 0):
|
||||
neuron['connections'].append((1, 0, random.uniform(-2, 2)))
|
||||
|
||||
else:
|
||||
# Add neuron to existing hidden layer
|
||||
# Find hidden layers that exist
|
||||
hidden_layer_indices = []
|
||||
for i in range(1, len(self.layers) - 1):
|
||||
if i < len(self.layers): # Safety check
|
||||
hidden_layer_indices.append(i)
|
||||
|
||||
if not hidden_layer_indices:
|
||||
return
|
||||
|
||||
hidden_layer_idx = random.choice(hidden_layer_indices)
|
||||
new_neuron = {
|
||||
'type': 'hidden',
|
||||
'id': f'hidden_{random.randint(1000, 9999)}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': []
|
||||
}
|
||||
|
||||
# Connect to some neurons from previous layers (avoid duplicates)
|
||||
for layer_idx in range(hidden_layer_idx):
|
||||
if len(self.layers[layer_idx]) > 0: # Only if layer has neurons
|
||||
for neuron_idx in range(len(self.layers[layer_idx])):
|
||||
if random.random() < 0.3: # 30% chance to connect
|
||||
if not self._connection_exists(new_neuron, layer_idx, neuron_idx):
|
||||
new_neuron['connections'].append((layer_idx, neuron_idx, random.uniform(-2, 2)))
|
||||
|
||||
self.layers[hidden_layer_idx].append(new_neuron)
|
||||
|
||||
# Update connections from later layers to potentially connect to this new neuron
|
||||
new_neuron_idx = len(self.layers[hidden_layer_idx]) - 1
|
||||
for later_layer_idx in range(hidden_layer_idx + 1, len(self.layers)):
|
||||
if len(self.layers[later_layer_idx]) > 0: # Only if layer has neurons
|
||||
for neuron in self.layers[later_layer_idx]:
|
||||
if random.random() < 0.2: # 20% chance to connect to new neuron
|
||||
if not self._connection_exists(neuron, hidden_layer_idx, new_neuron_idx):
|
||||
neuron['connections'].append((hidden_layer_idx, new_neuron_idx, random.uniform(-2, 2)))
|
||||
|
||||
def _remove_neuron(self):
|
||||
"""Remove a random neuron from hidden layers."""
|
||||
if len(self.layers) <= 2: # No hidden layers
|
||||
return
|
||||
|
||||
# Find hidden layers that have neurons
|
||||
valid_hidden_layers = []
|
||||
for layer_idx in range(1, len(self.layers) - 1): # Only hidden layers
|
||||
if len(self.layers[layer_idx]) > 0:
|
||||
valid_hidden_layers.append(layer_idx)
|
||||
|
||||
if not valid_hidden_layers:
|
||||
return
|
||||
|
||||
# Pick a random hidden layer with neurons
|
||||
layer_idx = random.choice(valid_hidden_layers)
|
||||
neuron_idx = random.randint(0, len(self.layers[layer_idx]) - 1)
|
||||
|
||||
# Remove the neuron
|
||||
self.layers[layer_idx].pop(neuron_idx)
|
||||
|
||||
# Remove connections to this neuron from later layers
|
||||
for later_layer_idx in range(layer_idx + 1, len(self.layers)):
|
||||
for neuron in self.layers[later_layer_idx]:
|
||||
if 'connections' in neuron:
|
||||
neuron['connections'] = [
|
||||
(src_layer, src_neuron, weight)
|
||||
for src_layer, src_neuron, weight in neuron['connections']
|
||||
if not (src_layer == layer_idx and src_neuron == neuron_idx)
|
||||
]
|
||||
|
||||
# Adjust neuron indices for remaining neurons in the same layer
|
||||
for later_layer_idx in range(layer_idx + 1, len(self.layers)):
|
||||
for neuron in self.layers[later_layer_idx]:
|
||||
if 'connections' in neuron:
|
||||
adjusted_connections = []
|
||||
for src_layer, src_neuron, weight in neuron['connections']:
|
||||
if src_layer == layer_idx and src_neuron > neuron_idx:
|
||||
# Adjust index down by 1 since we removed a neuron
|
||||
adjusted_connections.append((src_layer, src_neuron - 1, weight))
|
||||
else:
|
||||
adjusted_connections.append((src_layer, src_neuron, weight))
|
||||
neuron['connections'] = adjusted_connections
|
||||
|
||||
# Remove empty hidden layers to keep network clean
|
||||
if len(self.layers[layer_idx]) == 0:
|
||||
self.layers.pop(layer_idx)
|
||||
|
||||
# Adjust all layer indices in connections that reference layers after the removed one
|
||||
for layer in self.layers:
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron:
|
||||
adjusted_connections = []
|
||||
for src_layer, src_neuron, weight in neuron['connections']:
|
||||
if src_layer > layer_idx:
|
||||
adjusted_connections.append((src_layer - 1, src_neuron, weight))
|
||||
else:
|
||||
adjusted_connections.append((src_layer, src_neuron, weight))
|
||||
neuron['connections'] = adjusted_connections
|
||||
|
||||
def _add_layer(self):
|
||||
"""Add a new hidden layer at a random position with at least one neuron."""
|
||||
if len(self.layers) < 2:
|
||||
return # Need at least input and output layers
|
||||
|
||||
# Choose a position between input and output layers
|
||||
insert_idx = random.randint(1, len(self.layers) - 1)
|
||||
# Create a new hidden neuron
|
||||
new_neuron = {
|
||||
'type': 'hidden',
|
||||
'id': f'hidden_{random.randint(1000, 9999)}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': []
|
||||
}
|
||||
# Connect to all neurons in the previous layer
|
||||
for prev_idx in range(len(self.layers[insert_idx - 1])):
|
||||
if random.random() < 0.5:
|
||||
new_neuron['connections'].append((insert_idx - 1, prev_idx, random.uniform(-2, 2)))
|
||||
# Insert the new layer
|
||||
self.layers.insert(insert_idx, [new_neuron])
|
||||
# Connect neurons in the next layer to the new neuron
|
||||
if insert_idx + 1 < len(self.layers):
|
||||
for neuron in self.layers[insert_idx + 1]:
|
||||
if 'connections' in neuron and random.random() < 0.5:
|
||||
neuron['connections'].append((insert_idx, 0, random.uniform(-2, 2)))
|
||||
|
||||
def _ensure_network_connectivity(self):
|
||||
"""Ensure the network maintains basic connectivity from inputs to outputs."""
|
||||
# Check if output neurons have any connections
|
||||
output_layer = self.layers[-1]
|
||||
|
||||
for i, output_neuron in enumerate(output_layer):
|
||||
if 'connections' not in output_neuron or len(output_neuron['connections']) == 0:
|
||||
# Output neuron has no connections - reconnect to input layer
|
||||
for j in range(self.input_size):
|
||||
if not self._connection_exists(output_neuron, 0, j):
|
||||
output_neuron['connections'].append((0, j, random.uniform(-2, 2)))
|
||||
break # Add at least one connection
|
||||
|
||||
# Ensure at least one path exists from input to output
|
||||
if len(self.layers) > 2: # Has hidden layers
|
||||
# Check if any hidden neurons are connected to inputs
|
||||
has_input_connection = False
|
||||
for layer_idx in range(1, len(self.layers) - 1): # Hidden layers
|
||||
for neuron in self.layers[layer_idx]:
|
||||
if 'connections' in neuron:
|
||||
for src_layer, src_neuron, weight in neuron['connections']:
|
||||
if src_layer == 0: # Connected to input
|
||||
has_input_connection = True
|
||||
break
|
||||
if has_input_connection:
|
||||
break
|
||||
if has_input_connection:
|
||||
break
|
||||
|
||||
# If no hidden neuron connects to input, create one
|
||||
if not has_input_connection and len(self.layers) > 2:
|
||||
first_hidden_layer = self.layers[1]
|
||||
if len(first_hidden_layer) > 0:
|
||||
first_neuron = first_hidden_layer[0]
|
||||
if 'connections' in first_neuron:
|
||||
# Add connection to first input
|
||||
if not self._connection_exists(first_neuron, 0, 0):
|
||||
first_neuron['connections'].append((0, 0, random.uniform(-2, 2)))
|
||||
|
||||
def get_structure_info(self):
|
||||
"""Return information about the network structure."""
|
||||
info = {
|
||||
'total_layers': len(self.layers),
|
||||
'layer_sizes': [len(layer) for layer in self.layers],
|
||||
'total_connections': 0,
|
||||
'total_neurons': sum(len(layer) for layer in self.layers),
|
||||
'network_cost': self.network_cost
|
||||
}
|
||||
|
||||
for layer in self.layers[1:]:
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron:
|
||||
info['total_connections'] += len(neuron['connections'])
|
||||
|
||||
return info
|
||||
|
||||
def calculate_network_cost(self):
|
||||
"""
|
||||
Estimate the computational cost of the network.
|
||||
Cost is defined as the total number of connections plus the number of neurons
|
||||
(i.e., total multiply-accumulate operations and activations per forward pass).
|
||||
"""
|
||||
total_connections = 0
|
||||
total_neurons = 0
|
||||
for layer in self.layers[1:]: # Skip input layer (no computation)
|
||||
for neuron in layer:
|
||||
total_neurons += 1
|
||||
if 'connections' in neuron:
|
||||
total_connections += len(neuron['connections'])
|
||||
return total_connections + total_neurons
|
||||
|
||||
|
||||
class CellBrain(BehavioralModel):
|
||||
"""
|
||||
Enhanced CellBrain using a flexible neural network with input normalization.
|
||||
|
||||
@ -0,0 +1,467 @@
|
||||
import numpy as np
|
||||
import random
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class FlexibleNeuralNetwork:
|
||||
"""
|
||||
A flexible neural network that can mutate its structure and weights.
|
||||
Supports variable topology with cross-layer connections.
|
||||
"""
|
||||
|
||||
def __init__(self, input_size=2, output_size=2, empty_start=True):
|
||||
self.input_size = input_size
|
||||
self.output_size = output_size
|
||||
|
||||
# Network structure: list of layers, each layer is a list of neurons
|
||||
# Each neuron is represented by its connections and bias
|
||||
self.layers = []
|
||||
|
||||
# Initialize network based on empty_start parameter
|
||||
if empty_start:
|
||||
self._initialize_empty_network()
|
||||
else:
|
||||
self._initialize_basic_network()
|
||||
|
||||
self.network_cost = self.calculate_network_cost()
|
||||
|
||||
def _initialize_basic_network(self):
|
||||
"""Initialize a basic network with input->output connections only."""
|
||||
# Input layer (no actual neurons, just placeholders)
|
||||
input_layer = [{'type': 'input', 'id': i} for i in range(self.input_size)]
|
||||
|
||||
# Output layer with connections to all inputs
|
||||
output_layer = []
|
||||
for i in range(self.output_size):
|
||||
neuron = {
|
||||
'type': 'output',
|
||||
'id': f'out_{i}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': [] # List of (source_layer, source_neuron, weight)
|
||||
}
|
||||
|
||||
# Connect to all input neurons
|
||||
for j in range(self.input_size):
|
||||
neuron['connections'].append((0, j, random.uniform(-2, 2)))
|
||||
|
||||
output_layer.append(neuron)
|
||||
|
||||
self.layers = [input_layer, output_layer]
|
||||
|
||||
def _initialize_empty_network(self):
|
||||
"""Initialize an empty network with no connections or biases."""
|
||||
# Input layer (no actual neurons, just placeholders)
|
||||
input_layer = [{'type': 'input', 'id': i} for i in range(self.input_size)]
|
||||
|
||||
# Output layer with no connections and zero bias
|
||||
output_layer = []
|
||||
for i in range(self.output_size):
|
||||
neuron = {
|
||||
'type': 'output',
|
||||
'id': f'out_{i}',
|
||||
'bias': 0.0,
|
||||
'connections': [] # Empty connections list
|
||||
}
|
||||
output_layer.append(neuron)
|
||||
|
||||
self.layers = [input_layer, output_layer]
|
||||
|
||||
def _remove_duplicate_connections(self):
|
||||
"""Remove duplicate connections and keep only the last weight for each unique connection."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
for neuron in layer:
|
||||
if 'connections' not in neuron:
|
||||
continue
|
||||
|
||||
# Use a dictionary to track unique connections by (source_layer, source_neuron)
|
||||
unique_connections = {}
|
||||
|
||||
for source_layer, source_neuron, weight in neuron['connections']:
|
||||
connection_key = (source_layer, source_neuron)
|
||||
# Keep the last weight encountered for this connection
|
||||
unique_connections[connection_key] = weight
|
||||
|
||||
# Rebuild connections list without duplicates
|
||||
neuron['connections'] = [
|
||||
(source_layer, source_neuron, weight)
|
||||
for (source_layer, source_neuron), weight in unique_connections.items()
|
||||
]
|
||||
|
||||
def _connection_exists(self, target_neuron, source_layer_idx, source_neuron_idx):
|
||||
"""Check if a connection already exists between two neurons."""
|
||||
if 'connections' not in target_neuron:
|
||||
return False
|
||||
|
||||
for source_layer, source_neuron, weight in target_neuron['connections']:
|
||||
if source_layer == source_layer_idx and source_neuron == source_neuron_idx:
|
||||
return True
|
||||
return False
|
||||
|
||||
def forward(self, inputs):
|
||||
"""
|
||||
Forward pass through the network.
|
||||
|
||||
:param inputs: List or array of input values
|
||||
:return: List of output values
|
||||
"""
|
||||
if len(inputs) != self.input_size:
|
||||
raise ValueError(f"Expected {self.input_size} inputs, got {len(inputs)}")
|
||||
|
||||
# Store activations for each layer
|
||||
activations = [inputs] # Input layer activations
|
||||
|
||||
# Process each subsequent layer
|
||||
for layer_idx in range(1, len(self.layers)):
|
||||
layer_activations = []
|
||||
|
||||
for neuron in self.layers[layer_idx]:
|
||||
if neuron['type'] == 'input':
|
||||
continue # Skip input neurons in hidden layers
|
||||
|
||||
# Calculate weighted sum of inputs
|
||||
weighted_sum = 0.0 # Start with 0 instead of bias
|
||||
|
||||
# Only add bias if neuron has connections
|
||||
if 'connections' in neuron and len(neuron['connections']) > 0:
|
||||
weighted_sum = neuron['bias']
|
||||
|
||||
for source_layer, source_neuron, weight in neuron['connections']:
|
||||
if source_layer < len(activations):
|
||||
if source_neuron < len(activations[source_layer]):
|
||||
weighted_sum += activations[source_layer][source_neuron] * weight
|
||||
|
||||
# Apply activation function (tanh for bounded output)
|
||||
# If no connections and no bias applied, this will be tanh(0) = 0
|
||||
activation = np.tanh(weighted_sum)
|
||||
layer_activations.append(activation)
|
||||
|
||||
activations.append(layer_activations)
|
||||
|
||||
return activations[-1] # Return output layer activations
|
||||
|
||||
def mutate(self, mutation_rate=0.1):
|
||||
"""
|
||||
Create a mutated copy of this network.
|
||||
|
||||
:param mutation_rate: Base probability multiplied by specific mutation weights
|
||||
:return: New mutated FlexibleNeuralNetwork instance
|
||||
"""
|
||||
mutated = deepcopy(self)
|
||||
|
||||
# Weighted mutations (probability = mutation_rate * weight)
|
||||
# Higher weights = more likely to occur
|
||||
mutations = [
|
||||
(mutated._mutate_weights, 5.0), # Most common - fine-tune existing
|
||||
(mutated._mutate_biases, 3.0), # Common - adjust neuron thresholds
|
||||
(mutated._add_connection, 1.5), # Moderate - grow connectivity
|
||||
(mutated._remove_connection, 0.8), # Less common - reduce connectivity
|
||||
(mutated._add_neuron, 0.3), # Rare - structural growth
|
||||
(mutated._remove_neuron, 0.1), # Very rare - structural reduction
|
||||
(mutated._add_layer, 0.05), # New: create a new layer (very rare)
|
||||
]
|
||||
|
||||
# Apply weighted random mutations
|
||||
for mutation_func, weight in mutations:
|
||||
if random.random() < (mutation_rate * weight):
|
||||
mutation_func()
|
||||
|
||||
# Clean up any duplicate connections that might have been created
|
||||
mutated._remove_duplicate_connections()
|
||||
|
||||
# Ensure the network maintains basic connectivity
|
||||
mutated._ensure_network_connectivity()
|
||||
|
||||
mutated.network_cost = mutated.calculate_network_cost()
|
||||
|
||||
return mutated
|
||||
|
||||
def _mutate_weights(self):
|
||||
"""Slightly modify existing connection weights."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron:
|
||||
for i in range(len(neuron['connections'])):
|
||||
if random.random() < 0.3: # 30% chance to mutate each weight
|
||||
source_layer, source_neuron, weight = neuron['connections'][i]
|
||||
# Add small random change
|
||||
new_weight = weight + random.uniform(-0.5, 0.5)
|
||||
neuron['connections'][i] = (source_layer, source_neuron, new_weight)
|
||||
|
||||
def _mutate_biases(self):
|
||||
"""Slightly modify neuron biases."""
|
||||
for layer in self.layers[1:]: # Skip input layer
|
||||
for neuron in layer:
|
||||
if 'bias' in neuron and random.random() < 0.3:
|
||||
neuron['bias'] += random.uniform(-0.5, 0.5)
|
||||
|
||||
def _add_connection(self):
|
||||
"""Add a new random connection."""
|
||||
if len(self.layers) < 2:
|
||||
return
|
||||
|
||||
# Find layers with neurons
|
||||
valid_target_layers = []
|
||||
for i in range(1, len(self.layers)):
|
||||
if len(self.layers[i]) > 0:
|
||||
valid_target_layers.append(i)
|
||||
|
||||
if not valid_target_layers:
|
||||
return
|
||||
|
||||
# Pick a random target neuron (not in input layer)
|
||||
target_layer_idx = random.choice(valid_target_layers)
|
||||
target_neuron_idx = random.randint(0, len(self.layers[target_layer_idx]) - 1)
|
||||
target_neuron = self.layers[target_layer_idx][target_neuron_idx]
|
||||
|
||||
if 'connections' not in target_neuron:
|
||||
return
|
||||
|
||||
# Find valid source layers (must have neurons and be before target)
|
||||
valid_source_layers = []
|
||||
for i in range(target_layer_idx):
|
||||
if len(self.layers[i]) > 0:
|
||||
valid_source_layers.append(i)
|
||||
|
||||
if not valid_source_layers:
|
||||
return
|
||||
|
||||
# Pick a random source (from any previous layer with neurons)
|
||||
source_layer_idx = random.choice(valid_source_layers)
|
||||
source_neuron_idx = random.randint(0, len(self.layers[source_layer_idx]) - 1)
|
||||
|
||||
# Check if connection already exists using the helper method
|
||||
if self._connection_exists(target_neuron, source_layer_idx, source_neuron_idx):
|
||||
return # Connection already exists, don't add duplicate
|
||||
|
||||
# Add new connection
|
||||
new_weight = random.uniform(-2, 2)
|
||||
target_neuron['connections'].append((source_layer_idx, source_neuron_idx, new_weight))
|
||||
|
||||
def _remove_connection(self):
|
||||
"""Remove a random connection."""
|
||||
for layer in self.layers[1:]:
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron and len(neuron['connections']) > 1:
|
||||
if random.random() < 0.1: # 10% chance to remove a connection
|
||||
neuron['connections'].pop(random.randint(0, len(neuron['connections']) - 1))
|
||||
|
||||
def _add_neuron(self):
|
||||
"""Add a new neuron to a random hidden layer or create a new hidden layer."""
|
||||
if len(self.layers) == 2: # Only input and output layers
|
||||
# Create a new hidden layer
|
||||
hidden_neuron = {
|
||||
'type': 'hidden',
|
||||
'id': f'hidden_{random.randint(1000, 9999)}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': []
|
||||
}
|
||||
|
||||
# Connect to some input neurons (avoid duplicates)
|
||||
for i in range(self.input_size):
|
||||
if random.random() < 0.7: # 70% chance to connect to each input
|
||||
if not self._connection_exists(hidden_neuron, 0, i):
|
||||
hidden_neuron['connections'].append((0, i, random.uniform(-2, 2)))
|
||||
|
||||
# Insert hidden layer
|
||||
self.layers.insert(1, [hidden_neuron])
|
||||
|
||||
# Update output layer connections to potentially use new hidden neuron
|
||||
for neuron in self.layers[-1]: # Output layer (now at index 2)
|
||||
if random.random() < 0.5: # 50% chance to connect to new hidden neuron
|
||||
if not self._connection_exists(neuron, 1, 0):
|
||||
neuron['connections'].append((1, 0, random.uniform(-2, 2)))
|
||||
|
||||
else:
|
||||
# Add neuron to existing hidden layer
|
||||
# Find hidden layers that exist
|
||||
hidden_layer_indices = []
|
||||
for i in range(1, len(self.layers) - 1):
|
||||
if i < len(self.layers): # Safety check
|
||||
hidden_layer_indices.append(i)
|
||||
|
||||
if not hidden_layer_indices:
|
||||
return
|
||||
|
||||
hidden_layer_idx = random.choice(hidden_layer_indices)
|
||||
new_neuron = {
|
||||
'type': 'hidden',
|
||||
'id': f'hidden_{random.randint(1000, 9999)}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': []
|
||||
}
|
||||
|
||||
# Connect to some neurons from previous layers (avoid duplicates)
|
||||
for layer_idx in range(hidden_layer_idx):
|
||||
if len(self.layers[layer_idx]) > 0: # Only if layer has neurons
|
||||
for neuron_idx in range(len(self.layers[layer_idx])):
|
||||
if random.random() < 0.3: # 30% chance to connect
|
||||
if not self._connection_exists(new_neuron, layer_idx, neuron_idx):
|
||||
new_neuron['connections'].append((layer_idx, neuron_idx, random.uniform(-2, 2)))
|
||||
|
||||
self.layers[hidden_layer_idx].append(new_neuron)
|
||||
|
||||
# Update connections from later layers to potentially connect to this new neuron
|
||||
new_neuron_idx = len(self.layers[hidden_layer_idx]) - 1
|
||||
for later_layer_idx in range(hidden_layer_idx + 1, len(self.layers)):
|
||||
if len(self.layers[later_layer_idx]) > 0: # Only if layer has neurons
|
||||
for neuron in self.layers[later_layer_idx]:
|
||||
if random.random() < 0.2: # 20% chance to connect to new neuron
|
||||
if not self._connection_exists(neuron, hidden_layer_idx, new_neuron_idx):
|
||||
neuron['connections'].append((hidden_layer_idx, new_neuron_idx, random.uniform(-2, 2)))
|
||||
|
||||
def _remove_neuron(self):
|
||||
"""Remove a random neuron from hidden layers."""
|
||||
if len(self.layers) <= 2: # No hidden layers
|
||||
return
|
||||
|
||||
# Find hidden layers that have neurons
|
||||
valid_hidden_layers = []
|
||||
for layer_idx in range(1, len(self.layers) - 1): # Only hidden layers
|
||||
if len(self.layers[layer_idx]) > 0:
|
||||
valid_hidden_layers.append(layer_idx)
|
||||
|
||||
if not valid_hidden_layers:
|
||||
return
|
||||
|
||||
# Pick a random hidden layer with neurons
|
||||
layer_idx = random.choice(valid_hidden_layers)
|
||||
neuron_idx = random.randint(0, len(self.layers[layer_idx]) - 1)
|
||||
|
||||
# Remove the neuron
|
||||
self.layers[layer_idx].pop(neuron_idx)
|
||||
|
||||
# Remove connections to this neuron from later layers
|
||||
for later_layer_idx in range(layer_idx + 1, len(self.layers)):
|
||||
for neuron in self.layers[later_layer_idx]:
|
||||
if 'connections' in neuron:
|
||||
neuron['connections'] = [
|
||||
(src_layer, src_neuron, weight)
|
||||
for src_layer, src_neuron, weight in neuron['connections']
|
||||
if not (src_layer == layer_idx and src_neuron == neuron_idx)
|
||||
]
|
||||
|
||||
# Adjust neuron indices for remaining neurons in the same layer
|
||||
for later_layer_idx in range(layer_idx + 1, len(self.layers)):
|
||||
for neuron in self.layers[later_layer_idx]:
|
||||
if 'connections' in neuron:
|
||||
adjusted_connections = []
|
||||
for src_layer, src_neuron, weight in neuron['connections']:
|
||||
if src_layer == layer_idx and src_neuron > neuron_idx:
|
||||
# Adjust index down by 1 since we removed a neuron
|
||||
adjusted_connections.append((src_layer, src_neuron - 1, weight))
|
||||
else:
|
||||
adjusted_connections.append((src_layer, src_neuron, weight))
|
||||
neuron['connections'] = adjusted_connections
|
||||
|
||||
# Remove empty hidden layers to keep network clean
|
||||
if len(self.layers[layer_idx]) == 0:
|
||||
self.layers.pop(layer_idx)
|
||||
|
||||
# Adjust all layer indices in connections that reference layers after the removed one
|
||||
for layer in self.layers:
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron:
|
||||
adjusted_connections = []
|
||||
for src_layer, src_neuron, weight in neuron['connections']:
|
||||
if src_layer > layer_idx:
|
||||
adjusted_connections.append((src_layer - 1, src_neuron, weight))
|
||||
else:
|
||||
adjusted_connections.append((src_layer, src_neuron, weight))
|
||||
neuron['connections'] = adjusted_connections
|
||||
|
||||
def _add_layer(self):
|
||||
"""Add a new hidden layer at a random position with at least one neuron."""
|
||||
if len(self.layers) < 2:
|
||||
return # Need at least input and output layers
|
||||
|
||||
# Choose a position between input and output layers
|
||||
insert_idx = random.randint(1, len(self.layers) - 1)
|
||||
# Create a new hidden neuron
|
||||
new_neuron = {
|
||||
'type': 'hidden',
|
||||
'id': f'hidden_{random.randint(1000, 9999)}',
|
||||
'bias': random.uniform(-1, 1),
|
||||
'connections': []
|
||||
}
|
||||
# Connect to all neurons in the previous layer
|
||||
for prev_idx in range(len(self.layers[insert_idx - 1])):
|
||||
if random.random() < 0.5:
|
||||
new_neuron['connections'].append((insert_idx - 1, prev_idx, random.uniform(-2, 2)))
|
||||
# Insert the new layer
|
||||
self.layers.insert(insert_idx, [new_neuron])
|
||||
# Connect neurons in the next layer to the new neuron
|
||||
if insert_idx + 1 < len(self.layers):
|
||||
for neuron in self.layers[insert_idx + 1]:
|
||||
if 'connections' in neuron and random.random() < 0.5:
|
||||
neuron['connections'].append((insert_idx, 0, random.uniform(-2, 2)))
|
||||
|
||||
def _ensure_network_connectivity(self):
|
||||
"""Ensure the network maintains basic connectivity from inputs to outputs."""
|
||||
# Check if output neurons have any connections
|
||||
output_layer = self.layers[-1]
|
||||
|
||||
for i, output_neuron in enumerate(output_layer):
|
||||
if 'connections' not in output_neuron or len(output_neuron['connections']) == 0:
|
||||
# Output neuron has no connections - reconnect to input layer
|
||||
for j in range(self.input_size):
|
||||
if not self._connection_exists(output_neuron, 0, j):
|
||||
output_neuron['connections'].append((0, j, random.uniform(-2, 2)))
|
||||
break # Add at least one connection
|
||||
|
||||
# Ensure at least one path exists from input to output
|
||||
if len(self.layers) > 2: # Has hidden layers
|
||||
# Check if any hidden neurons are connected to inputs
|
||||
has_input_connection = False
|
||||
for layer_idx in range(1, len(self.layers) - 1): # Hidden layers
|
||||
for neuron in self.layers[layer_idx]:
|
||||
if 'connections' in neuron:
|
||||
for src_layer, src_neuron, weight in neuron['connections']:
|
||||
if src_layer == 0: # Connected to input
|
||||
has_input_connection = True
|
||||
break
|
||||
if has_input_connection:
|
||||
break
|
||||
if has_input_connection:
|
||||
break
|
||||
|
||||
# If no hidden neuron connects to input, create one
|
||||
if not has_input_connection and len(self.layers) > 2:
|
||||
first_hidden_layer = self.layers[1]
|
||||
if len(first_hidden_layer) > 0:
|
||||
first_neuron = first_hidden_layer[0]
|
||||
if 'connections' in first_neuron:
|
||||
# Add connection to first input
|
||||
if not self._connection_exists(first_neuron, 0, 0):
|
||||
first_neuron['connections'].append((0, 0, random.uniform(-2, 2)))
|
||||
|
||||
def get_structure_info(self):
|
||||
"""Return information about the network structure."""
|
||||
info = {
|
||||
'total_layers': len(self.layers),
|
||||
'layer_sizes': [len(layer) for layer in self.layers],
|
||||
'total_connections': 0,
|
||||
'total_neurons': sum(len(layer) for layer in self.layers),
|
||||
'network_cost': self.network_cost
|
||||
}
|
||||
|
||||
for layer in self.layers[1:]:
|
||||
for neuron in layer:
|
||||
if 'connections' in neuron:
|
||||
info['total_connections'] += len(neuron['connections'])
|
||||
|
||||
return info
|
||||
|
||||
def calculate_network_cost(self):
|
||||
"""
|
||||
Estimate the computational cost of the network.
|
||||
Cost is defined as the total number of connections plus the number of neurons
|
||||
(i.e., total multiply-accumulate operations and activations per forward pass).
|
||||
"""
|
||||
total_connections = 0
|
||||
total_neurons = 0
|
||||
for layer in self.layers[1:]: # Skip input layer (no computation)
|
||||
for neuron in layer:
|
||||
total_neurons += 1
|
||||
if 'connections' in neuron:
|
||||
total_connections += len(neuron['connections'])
|
||||
return total_connections + total_neurons
|
||||
Loading…
x
Reference in New Issue
Block a user