diff --git a/.pylintrc b/.pylintrc index e6d41748..8a600ec4 100644 --- a/.pylintrc +++ b/.pylintrc @@ -149,7 +149,10 @@ disable=print-statement, no-self-use, duplicate-code, abstract-method, - super-with-arguments + super-with-arguments, + import-error, + unnecessary-pass, + redefined-builtin # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -571,7 +574,7 @@ valid-metaclass-classmethod-first-arg=cls max-args=10 # Maximum number of attributes for a class (see R0902). -max-attributes=20 +max-attributes=25 # Maximum number of boolean expressions in an if statement (see R0916). max-bool-expr=5 diff --git a/README.md b/README.md index c1651eac..63d2dbaf 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Opytimizer: A Nature-Inspired Python Optimizer [![Latest release](https://img.shields.io/github/release/gugarosa/opytimizer.svg)](https://github.com/gugarosa/opytimizer/releases) -[![DOI](https://zenodo.org/badge/109152650.svg)](https://zenodo.org/badge/latestdoi/109152650) +[![DOI](http://img.shields.io/badge/DOI-10.5281/zenodo.4594294-006DB9.svg)](https://doi.org/10.5281/zenodo.4594294) [![Build status](https://img.shields.io/travis/com/gugarosa/opytimizer/master.svg)](https://github.com/gugarosa/opytimizer/releases) [![Open issues](https://img.shields.io/github/issues/gugarosa/opytimizer.svg)](https://github.com/gugarosa/opytimizer/issues) [![License](https://img.shields.io/github/license/gugarosa/opytimizer.svg)](https://github.com/gugarosa/opytimizer/blob/master/LICENSE) @@ -65,6 +65,7 @@ Opytimizer is based on the following structure, and you should pay attention to - optimizer - space - functions + - constrained - weighted - math - distribution @@ -86,8 +87,8 @@ Opytimizer is based on the following structure, and you should pay attention to - search - tree - utils - - constants - - decorator + - callback + - constant - exception - history - logging @@ -106,11 +107,11 @@ Instead of using raw and straightforward functions, why not try this module? Com ### Math -Just because we are computing stuff, it does not means that we do not need math. Math is the mathematical package, containing low-level math implementations. From random numbers to distributions generation, you can find your needs on this module. +Just because we are computing stuff does not means that we do not need math. Math is the mathematical package containing low-level math implementations. From random numbers to distribution generation, you can find your needs on this module. ### Optimizers -This is why we are called Opytimizer. This is the heart of the heuristics, where you can find a large number of meta-heuristics, optimization techniques, anything that can be called as an optimizer. Please take a look on the [available optimizers](https://github.com/gugarosa/opytimizer/wiki/Types-of-Optimizers). +This is why we are called Opytimizer. This is the heart of heuristics, where you can find a large number of meta-heuristics, optimization techniques, anything that can be called an optimizer. Please take a look at the [available optimizers](https://github.com/gugarosa/opytimizer/wiki/Types-of-Optimizers). ### Spaces @@ -118,7 +119,7 @@ One can see the space as the place that agents will update their positions and e ### Utils -This is a utility package. Common things shared across the application should be implemented here. It is better to implement once and use as you wish than re-implementing the same thing over and over again. +This is a utility package. Common things shared across the application should be implemented here. It is better to implement once and use as you wish than re-implementing the same thing repeatedly. ### Visualization @@ -128,7 +129,7 @@ Everyone needs images and plots to help visualize what is happening, correct? Th ## Installation -We believe that everything has to be easy. Not tricky or daunting, Opytimizer will be the one-to-go package that you will need, from the very first installation to the daily-tasks implementing needs. If you may just run the following under your most preferred Python environment (raw, conda, virtualenv, whatever): +We believe that everything has to be easy. Not tricky or daunting, Opytimizer will be the one-to-go package that you will need, from the first installation to the daily tasks implementing needs. If you may just run the following under your most preferred Python environment (raw, conda, virtualenv, whatever): ```bash pip install opytimizer @@ -148,15 +149,15 @@ Note that sometimes, there is a need for additional implementation. If needed, f ### Ubuntu -No specific additional commands needed. +No specific additional commands are needed. ### Windows -No specific additional commands needed. +No specific additional commands are needed. ### MacOS -No specific additional commands needed. +No specific additional commands are needed. --- @@ -168,25 +169,24 @@ Take a look at a quick working example of Opytimizer. Note that we are not passi import numpy as np from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace def sphere(x): return np.sum(x ** 2) n_agents = 20 n_variables = 2 -n_iterations = 1000 -lower_bound = (-10, -10) -upper_bound = (10, 10) +lower_bound = [-10, -10] +upper_bound = [10, 10] -s = SearchSpace(n_agents, n_iterations, n_variables, lower_bound, upper_bound) -p = PSO() -f = Function(sphere) +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(sphere) -o = Opytimizer(s, p, f) -o.start() +opt = Opytimizer(space, optimizer, function) +opt.start(n_iterations=1000) ``` --- diff --git a/docs/api/opytimizer.core.rst b/docs/api/opytimizer.core.rst index 73745235..fc975f31 100644 --- a/docs/api/opytimizer.core.rst +++ b/docs/api/opytimizer.core.rst @@ -1,7 +1,7 @@ opytimizer.core ========================= -Core is the core. Essentially, it is the parent of everything. You should find parent classes defining the basic of our structure. They should provide variables and methods that will help to construct other modules. +Core is the core. Essentially, it is the parent of everything. You should find parent classes defining the basis of our structure. They should provide variables and methods that will help to construct other modules. .. toctree:: opytimizer.core.agent diff --git a/docs/api/opytimizer.functions.constrained.rst b/docs/api/opytimizer.functions.constrained.rst new file mode 100644 index 00000000..0d426681 --- /dev/null +++ b/docs/api/opytimizer.functions.constrained.rst @@ -0,0 +1,7 @@ +opytimizer.functions.constrained +================================= + +.. automodule:: opytimizer.functions.constrained + :members: + :private-members: + :special-members: \ No newline at end of file diff --git a/docs/api/opytimizer.functions.rst b/docs/api/opytimizer.functions.rst index 30850a07..e8c4caa4 100644 --- a/docs/api/opytimizer.functions.rst +++ b/docs/api/opytimizer.functions.rst @@ -1,12 +1,12 @@ opytimizer.functions ========================= -Instead of using raw and simple functions, why not try this module? Compose high-level abstract functions or even new function-based ideas in order to solve your problems. Note that for now, we will only support multi-objective function strategies. +Instead of using raw and straightforward functions, why not try this module? Compose high-level abstract functions or even new function-based ideas in order to solve your problems. Note that for now, we will only support multi-objective function strategies. .. toctree:: + opytimizer.functions.constrained opytimizer.functions.weighted - .. automodule:: opytimizer.functions :members: :show-inheritance: \ No newline at end of file diff --git a/docs/api/opytimizer.math.rst b/docs/api/opytimizer.math.rst index 7f52dc4d..2f8ab322 100644 --- a/docs/api/opytimizer.math.rst +++ b/docs/api/opytimizer.math.rst @@ -1,7 +1,7 @@ opytimizer.math ========================= -Just because we are computing stuff, it does not means that we do not need math. Math is the mathematical package, containing low level math implementations. From random numbers to distributions generation, you can find your needs on this module. +Just because we are computing stuff does not means that we do not need math. Math is the mathematical package containing low-level math implementations. From random numbers to distribution generation, you can find your needs on this module. .. toctree:: opytimizer.math.distribution diff --git a/docs/api/opytimizer.optimizers.rst b/docs/api/opytimizer.optimizers.rst index 1d596f40..5ce0ca2c 100644 --- a/docs/api/opytimizer.optimizers.rst +++ b/docs/api/opytimizer.optimizers.rst @@ -1,7 +1,7 @@ opytimizer.optimizers ====================== -This is why we are called Opytimizer. This is the heart of the heuristics, where you can find a broad number of meta-heuristics, optimization techniques, anything that can be called as an optimizer. Investigate over any module for more information. +This is why we are called Opytimizer. This is the heart of heuristics, where you can find a large number of meta-heuristics, optimization techniques, anything that can be called an optimizer. Please take a look at the [available optimizers](https://github.com/gugarosa/opytimizer/wiki/Types-of-Optimizers). .. toctree:: opytimizer.optimizers.boolean diff --git a/docs/api/opytimizer.rst b/docs/api/opytimizer.rst index f1d85adc..edacb97c 100644 --- a/docs/api/opytimizer.rst +++ b/docs/api/opytimizer.rst @@ -1,5 +1,5 @@ opytimizer -========================= +=========== .. autoclass:: opytimizer.Opytimizer :members: diff --git a/docs/api/opytimizer.spaces.rst b/docs/api/opytimizer.spaces.rst index ce72b9bb..7827b973 100644 --- a/docs/api/opytimizer.spaces.rst +++ b/docs/api/opytimizer.spaces.rst @@ -1,7 +1,7 @@ opytimizer.spaces ========================= -One can see the space as the place that agents will update their positions and evaluate a fitness function. However, newest approaches may consider a different type of space. Thinking on that, we are glad to support diverse space implementations. +One can see the space as the place that agents will update their positions and evaluate a fitness function. However, the newest approaches may consider a different type of space. Thinking about that, we are glad to support diverse space implementations. .. toctree:: opytimizer.spaces.boolean diff --git a/docs/api/opytimizer.utils.decorator.rst b/docs/api/opytimizer.utils.callback.rst similarity index 55% rename from docs/api/opytimizer.utils.decorator.rst rename to docs/api/opytimizer.utils.callback.rst index 896dc43d..ff59131f 100644 --- a/docs/api/opytimizer.utils.decorator.rst +++ b/docs/api/opytimizer.utils.callback.rst @@ -1,7 +1,7 @@ -opytimizer.utils.decorator +opytimizer.utils.callback =========================== -.. automodule:: opytimizer.utils.decorator +.. automodule:: opytimizer.utils.callback :members: :private-members: :special-members: \ No newline at end of file diff --git a/docs/api/opytimizer.utils.constant.rst b/docs/api/opytimizer.utils.constant.rst new file mode 100644 index 00000000..c0c9a3e8 --- /dev/null +++ b/docs/api/opytimizer.utils.constant.rst @@ -0,0 +1,7 @@ +opytimizer.utils.constant +========================== + +.. automodule:: opytimizer.utils.constant + :members: + :private-members: + :special-members: \ No newline at end of file diff --git a/docs/api/opytimizer.utils.constants.rst b/docs/api/opytimizer.utils.constants.rst deleted file mode 100644 index 41aad3bc..00000000 --- a/docs/api/opytimizer.utils.constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -opytimizer.utils.constants -============================== - -.. automodule:: opytimizer.utils.constants - :members: - :private-members: - :special-members: \ No newline at end of file diff --git a/docs/api/opytimizer.utils.rst b/docs/api/opytimizer.utils.rst index 2254f087..75f39d2c 100644 --- a/docs/api/opytimizer.utils.rst +++ b/docs/api/opytimizer.utils.rst @@ -1,11 +1,11 @@ opytimizer.utils ========================= -This is an utilities package. Common things shared across the application should be implemented here. It is better to implement once and use as you wish than re-implementing the same thing over and over again. +This is a utility package. Common things shared across the application should be implemented here. It is better to implement once and use as you wish than re-implementing the same thing repeatedly. .. toctree:: - opytimizer.utils.constants - opytimizer.utils.decorator + opytimizer.utils.callback + opytimizer.utils.constant opytimizer.utils.exception opytimizer.utils.history opytimizer.utils.logging diff --git a/docs/api/opytimizer.visualization.rst b/docs/api/opytimizer.visualization.rst index feb67f6f..062400a8 100644 --- a/docs/api/opytimizer.visualization.rst +++ b/docs/api/opytimizer.visualization.rst @@ -1,7 +1,7 @@ opytimizer.visualization ========================= -Everyone needs images and plots to help visualize what is happening, correct? This package will provide every visual-related method for you. Check a specific variable convergence, your fitness function convergence, plot benchmark function surfaces and much more! +Everyone needs images and plots to help visualize what is happening, correct? This package will provide every visual-related method for you. Check a specific variable convergence, your fitness function convergence, plot benchmark function surfaces, and much more! .. toctree:: opytimizer.visualization.convergence diff --git a/examples/applications/additional_features/create_optimization_checkpoints.py b/examples/applications/additional_features/create_optimization_checkpoints.py new file mode 100644 index 00000000..0f9235d2 --- /dev/null +++ b/examples/applications/additional_features/create_optimization_checkpoints.py @@ -0,0 +1,31 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace +from opytimizer.utils.callback import CheckpointCallback + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 20 +n_variables = 2 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(Sphere()) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +# CheckpointCallback will snapshot the optimization every `frequency` iterations +opt.start(n_iterations=10, callbacks=[CheckpointCallback(frequency=1)]) diff --git a/examples/applications/additional_features/multiple_optimization_runnings.py b/examples/applications/additional_features/multiple_optimization_runnings.py new file mode 100644 index 00000000..b56fc4ce --- /dev/null +++ b/examples/applications/additional_features/multiple_optimization_runnings.py @@ -0,0 +1,33 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 20 +n_variables = 2 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(Sphere()) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +# Every call on `start` will the continue the optimization for `n_iterations` +# Note that the following lines achieves the same results as a 100-iteration running +opt.start(n_iterations=50) +opt.start(n_iterations=25) +opt.start(n_iterations=25) diff --git a/examples/applications/additional_features/resume_optimization_from_file.py b/examples/applications/additional_features/resume_optimization_from_file.py new file mode 100644 index 00000000..20e9000d --- /dev/null +++ b/examples/applications/additional_features/resume_optimization_from_file.py @@ -0,0 +1,38 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace +from opytimizer.utils.callback import CheckpointCallback + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 20 +n_variables = 2 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(Sphere()) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=10, callbacks=[CheckpointCallback(frequency=10)]) + +# Deletes the optimization objecs +del opt + +# Loads the task from file and resumes it +# Note that the following lines achieves the same results as a 35-iteration running +opt = Opytimizer.load('iter_10_checkpoint.pkl') +opt.start(n_iterations=25) diff --git a/examples/applications/boolean_optimization.py b/examples/applications/boolean_optimization.py deleted file mode 100644 index 4f98ad3d..00000000 --- a/examples/applications/boolean_optimization.py +++ /dev/null @@ -1,37 +0,0 @@ -import numpy as np -from opytimark.markers.boolean import Knapsack - -import opytimizer.math.random as r -from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.boolean.bpso import BPSO -from opytimizer.spaces.boolean import BooleanSpace - -# Random seed for experimental consistency -np.random.seed(0) - -# Number of agents, decision variables and iterations -n_agents = 5 -n_variables = 5 -n_iterations = 10 - -# Creating the BooleanSpace class -s = BooleanSpace(n_agents=n_agents, n_iterations=n_iterations, n_variables=n_variables) - -# Hyperparameters for the optimizer -hyperparams = { - 'c1': r.generate_binary_random_number(size=(n_variables, 1)), - 'c2': r.generate_binary_random_number(size=(n_variables, 1)) -} - -# Creating BPSO's optimizer -p = BPSO(hyperparams=hyperparams) - -# Creating Function's object -f = Function(pointer=Knapsack(values=(55, 10, 47, 5, 4), weights=(95, 4, 60, 32, 23), max_capacity=100)) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() diff --git a/examples/applications/constrained_single_objective_optimization.py b/examples/applications/constrained_single_objective_optimization.py deleted file mode 100644 index 10e09b71..00000000 --- a/examples/applications/constrained_single_objective_optimization.py +++ /dev/null @@ -1,50 +0,0 @@ -import numpy as np -from opytimark.markers.n_dimensional import Sphere - -from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace - - -# Defining a constraint function for further usage -# Note that it needs to return a boolean whether the -# constraint is valid or not -def c_1(x): - return x[0] + x[1] < 0 - -# Random seed for experimental consistency -np.random.seed(0) - -# Number of agents, decision variables and iterations -n_agents = 20 -n_variables = 2 -n_iterations = 1000 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (-10, -10) -upper_bound = (10, 10) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) - -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} - -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) - -# Creating Function's object -f = Function(pointer=Sphere(), constraints=[c_1], penalty=100.0) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() diff --git a/examples/applications/genetic_programming_optimization.py b/examples/applications/genetic_programming_optimization.py deleted file mode 100644 index a84f8dcb..00000000 --- a/examples/applications/genetic_programming_optimization.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -from opytimark.markers.n_dimensional import Sphere - -from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.evolutionary.gp import GP -from opytimizer.spaces.tree import TreeSpace - -# Random seed for experimental consistency -np.random.seed(0) - -# Number of trees, number of terminals, decision variables and iterations -n_trees = 10 -n_terminals = 2 -n_variables = 2 -n_iterations = 1000 - -# Minimum and maximum depths of the trees -min_depth = 2 -max_depth = 5 - -# List of functions nodes -functions = ['SUM', 'MUL', 'DIV'] - -# Finally, we define the lower and upper bounds -# Note that they have to be the same size as n_variables -lower_bound = (-10, -10) -upper_bound = (10, 10) - -# Creating the TreeSpace object -s = TreeSpace(n_trees=n_trees, n_terminals=n_terminals, n_variables=n_variables, - n_iterations=n_iterations, min_depth=min_depth, max_depth=max_depth, - functions=functions, lower_bound=lower_bound, upper_bound=upper_bound) - -# Hyperparameters for the optimizer -hyperparams = { - 'p_reproduction': 0.25, - 'p_mutation': 0.1, - 'p_crossover': 0.2, - 'prunning_ratio': 0.0 -} - -# Creating GP's optimizer -p = GP(hyperparams=hyperparams) - -# Creating Function's object -f = Function(pointer=Sphere()) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() diff --git a/examples/applications/grid_search_optimization.py b/examples/applications/grid_search_optimization.py deleted file mode 100644 index d1960f93..00000000 --- a/examples/applications/grid_search_optimization.py +++ /dev/null @@ -1,32 +0,0 @@ -from opytimark.markers.n_dimensional import Sphere - -from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.misc.gs import GS -from opytimizer.spaces.grid import GridSpace - -# Number of decision variables -n_variables = 2 - -# And also the size of the step in the grid -step = (0.1, 1) - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (-10, -10) -upper_bound = (10, 10) - -# Creating the GridSpace class -s = GridSpace(n_variables=n_variables, step=step, - lower_bound=lower_bound, upper_bound=upper_bound) - -# Creating GS optimizer -p = GS() - -# Creating Function's object -f = Function(pointer=Sphere()) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() diff --git a/examples/applications/hyper_complex_space_optimization.py b/examples/applications/hyper_complex_space_optimization.py deleted file mode 100644 index 7fc5a3e9..00000000 --- a/examples/applications/hyper_complex_space_optimization.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -from opytimark.markers.n_dimensional import Sphere - -import opytimizer.math.hyper as h -import opytimizer.utils.decorator as d -from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.hyper_complex import HyperComplexSpace - -# Random seed for experimental consistency -np.random.seed(0) - -# Number of agents, decision variables, dimensions and iterations -n_agents = 20 -n_variables = 2 -n_dimensions = 4 -n_iterations = 10000 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (-10, -10) -upper_bound = (10, 10) - -# Creating the HyperComplexSpace class -s = HyperComplexSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, n_dimensions=n_dimensions, - lower_bound=lower_bound, upper_bound=upper_bound) - -# Wrapping the objective function with a spanning decorator -# This decorator allows values to be spanned between lower and upper bounds -@d.hyper_spanning(lower_bound, upper_bound) -def wrapper(x): - z = Sphere() - return z(x) - -# Creating Function's object -f = Function(pointer=wrapper) - -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} - -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() diff --git a/examples/applications/multi_objective/multi_objective_optimization.py b/examples/applications/multi_objective/multi_objective_optimization.py new file mode 100644 index 00000000..bb3e2987 --- /dev/null +++ b/examples/applications/multi_objective/multi_objective_optimization.py @@ -0,0 +1,29 @@ +import numpy as np +from opytimark.markers.n_dimensional import Rastrigin, Sphere + +from opytimizer import Opytimizer +from opytimizer.functions import WeightedFunction +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 20 +n_variables = 2 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = WeightedFunction([Rastrigin(), Sphere()], [0.5, 0.5]) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=1000) diff --git a/examples/applications/multi_objective_optimization.py b/examples/applications/multi_objective_optimization.py deleted file mode 100644 index 6473376d..00000000 --- a/examples/applications/multi_objective_optimization.py +++ /dev/null @@ -1,43 +0,0 @@ -import numpy as np -from opytimark.markers.n_dimensional import Exponential, Sphere - -from opytimizer import Opytimizer -from opytimizer.functions.weighted import WeightedFunction -from opytimizer.optimizers.swarm.fa import FA -from opytimizer.spaces.search import SearchSpace - -# Random seed for experimental consistency -np.random.seed(0) - -# Number of agents, decision variables and iterations -n_agents = 20 -n_variables = 2 -n_iterations = 1000 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (-10, -10) -upper_bound = (10, 10) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) - -# Hyperparameters for the optimizer -hyperparams = { - 'alpha': 0.5, - 'beta': 0.2, - 'gamma': 1.0 -} - -# Creating FA's optimizer -p = FA(hyperparams=hyperparams) - -# Defining task's main function -z = WeightedFunction(functions=[Exponential(), Sphere()], weights=[0.5, 0.5]) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=z) - -# Running the optimization task -history = o.start() diff --git a/examples/applications/single_objective/boolean_optimization.py b/examples/applications/single_objective/boolean_optimization.py new file mode 100644 index 00000000..54d72ddd --- /dev/null +++ b/examples/applications/single_objective/boolean_optimization.py @@ -0,0 +1,32 @@ +import numpy as np +from opytimark.markers.boolean import Knapsack + +import opytimizer.math.random as r +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.boolean import BPSO +from opytimizer.spaces import BooleanSpace + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 5 +n_variables = 5 + +# Parameters for the optimizer +params = { + 'c1': r.generate_binary_random_number(size=(n_variables, 1)), + 'c2': r.generate_binary_random_number(size=(n_variables, 1)) +} + +# Creates the space, optimizer and function +space = BooleanSpace(n_agents, n_variables) +optimizer = BPSO(params) +function = Function(Knapsack(values=(55, 10, 47, 5, 4), weights=(95, 4, 60, 32, 23), max_capacity=100)) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=1000) diff --git a/examples/applications/single_objective/constrained_standard_optimization.py b/examples/applications/single_objective/constrained_standard_optimization.py new file mode 100644 index 00000000..866e4a49 --- /dev/null +++ b/examples/applications/single_objective/constrained_standard_optimization.py @@ -0,0 +1,36 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.functions import ConstrainedFunction +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace + + +# Defines a constraint function that returns a boolean +# whether the constraint is valid or not +def c_1(x): + return x[0] + x[1] < 0 + + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 20 +n_variables = 2 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = ConstrainedFunction(Sphere(), [c_1], penalty=100.0) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=1000) diff --git a/examples/applications/single_objective/genetic_programming_optimization.py b/examples/applications/single_objective/genetic_programming_optimization.py new file mode 100644 index 00000000..fe74c158 --- /dev/null +++ b/examples/applications/single_objective/genetic_programming_optimization.py @@ -0,0 +1,36 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.evolutionary import GP +from opytimizer.spaces import TreeSpace + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents, terminals and decision variables +n_agents = 20 +n_terminals = 2 +n_variables = 2 + +# Minimum and maximum depths of the trees +min_depth = 2 +max_depth = 5 + +# Functions nodes, lower and upper bounds +functions = ['SUM', 'MUL', 'DIV'] +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = TreeSpace(n_agents, n_variables, lower_bound, upper_bound, + n_terminals, min_depth, max_depth, functions) +optimizer = GP() +function = Function(Sphere()) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=1000) diff --git a/examples/applications/single_objective/grid_search_optimization.py b/examples/applications/single_objective/grid_search_optimization.py new file mode 100644 index 00000000..8094b0ce --- /dev/null +++ b/examples/applications/single_objective/grid_search_optimization.py @@ -0,0 +1,25 @@ +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.misc import GS +from opytimizer.spaces import GridSpace + +# Number of decision variables and step size of the grid +n_variables = 2 +step = [0.1, 1] + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = GridSpace(n_variables, step, lower_bound, upper_bound) +optimizer = GS() +function = Function(Sphere()) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start() diff --git a/examples/applications/single_objective/hyper_complex_optimization.py b/examples/applications/single_objective/hyper_complex_optimization.py new file mode 100644 index 00000000..c406cfe2 --- /dev/null +++ b/examples/applications/single_objective/hyper_complex_optimization.py @@ -0,0 +1,39 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +import opytimizer.math.hyper as h +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import HyperComplexSpace + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents, decision variables and dimensions +n_agents = 20 +n_variables = 2 +n_dimensions = 4 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Wraps the objective function with a spanning decorator, +# allowing values to be spanned between lower and upper bounds +@h.span_to_hyper_value(lower_bound, upper_bound) +def wrapper(x): + z = Sphere() + return z(x) + + +# Creates the space, optimizer and function +space = HyperComplexSpace(n_agents, n_variables, n_dimensions) +optimizer = PSO() +function = Function(wrapper) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=1000) diff --git a/examples/applications/single_objective/standard_optimization.py b/examples/applications/single_objective/standard_optimization.py new file mode 100644 index 00000000..75b3c2c0 --- /dev/null +++ b/examples/applications/single_objective/standard_optimization.py @@ -0,0 +1,29 @@ +import numpy as np +from opytimark.markers.n_dimensional import Sphere + +from opytimizer import Opytimizer +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace + +# Random seed for experimental consistency +np.random.seed(0) + +# Number of agents and decision variables +n_agents = 20 +n_variables = 2 + +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [-10, -10] +upper_bound = [10, 10] + +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(Sphere()) + +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function, save_agents=False) + +# Runs the optimization task +opt.start(n_iterations=1000) diff --git a/examples/applications/single_objective_optimization.py b/examples/applications/single_objective_optimization.py deleted file mode 100644 index 21829e93..00000000 --- a/examples/applications/single_objective_optimization.py +++ /dev/null @@ -1,43 +0,0 @@ -import numpy as np -from opytimark.markers.n_dimensional import Sphere - -from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace - -# Random seed for experimental consistency -np.random.seed(0) - -# Number of agents, decision variables and iterations -n_agents = 20 -n_variables = 2 -n_iterations = 1000 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (-10, -10) -upper_bound = (10, 10) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) - -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} - -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) - -# Creating Function's object -f = Function(pointer=Sphere()) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() diff --git a/examples/core/create_agent.py b/examples/core/create_agent.py index c5372f33..c32b1f11 100644 --- a/examples/core/create_agent.py +++ b/examples/core/create_agent.py @@ -1,13 +1,17 @@ -from opytimizer.core.agent import Agent +from opytimizer.core import Agent # We need to define the amount of decision variables # and its dimension (single, complex, quaternion, octonion, sedenion) -n_variables = 1 +n_variables = 2 n_dimensions = 2 -# Creating a new Agent -a = Agent(n_variables=n_variables, n_dimensions=n_dimensions) +# We also need to define its bounds +lower_bound = [0, 0] +upper_bound = [1, 1] -# Printing out some agent's properties +# Creates a new Agent +a = Agent(n_variables, n_dimensions, lower_bound, upper_bound) + +# Prints out some properties print(a.n_variables, a.n_dimensions) print(a.position, a.fit) diff --git a/examples/core/create_function.py b/examples/core/create_function.py index 040d5350..83fdbea0 100644 --- a/examples/core/create_function.py +++ b/examples/core/create_function.py @@ -1,18 +1,18 @@ -from opytimizer.core.function import Function +from opytimizer.core import Function -# One should declare a function of x, where it should return a value -def test_function(x): - return x + 2 +# Defines a function with a single input and a float-based return +def test_function(z): + return z + 2 -# Declaring x variable for further use +# Declares `x` x = 0 -# Functions can be used if your objective -# function is an internal python code -f = Function(pointer=test_function) +# Any type of internal python-coded function +# can be used as a pointer +f = Function(test_function) -# Testing out your new Function class +# Prints out some properties print(f'x: {x}') print(f'f(x): {f(x)}') diff --git a/examples/core/create_node.py b/examples/core/create_node.py index 81d9e18f..8fc77dc5 100644 --- a/examples/core/create_node.py +++ b/examples/core/create_node.py @@ -2,18 +2,18 @@ from opytimizer.core.node import Node -# Creating two new Nodes -n1 = Node(name='0', node_type='TERMINAL', value=np.array(1)) -n2 = Node(name='1', node_type='TERMINAL', value=np.array(2)) +# Creates two new Nodes +n1 = Node(name='0', category='TERMINAL', value=np.array(1)) +n2 = Node(name='1', category='TERMINAL', value=np.array(2)) # Outputting information about one of the nodes print(n1) print(f'Post Order: {n1.post_order} | Size: {n1.n_nodes}.') # Additionally, one can stack nodes to create a tree -t = Node(name='SUM', node_type='FUNCTION', left=n1, right=n2) +t = Node(name='SUM', category='FUNCTION', left=n1, right=n2) -# Defining `n1` and `n2` parent as `t` +# Defines `n1` and `n2` parent as `t` n1.parent = t n2.parent = t diff --git a/examples/functions/create_constrained_function.py b/examples/functions/create_constrained_function.py new file mode 100644 index 00000000..5c8be603 --- /dev/null +++ b/examples/functions/create_constrained_function.py @@ -0,0 +1,22 @@ +from opytimizer.functions import ConstrainedFunction + + +# Defines a function with a single input and a float-based return +def test_function(z): + return z[0] + z[1] + + +# Defines a constraint where it returns a validity boolean +def c_1(z): + return z[0] + z[1] < 0 + + +# Declares `x` +x = [1, 1] + +# Creates a ConstrainedFunction +f = ConstrainedFunction(test_function, [c_1], 10000.0) + +# Prints out some properties +print(f'x: {x}') +print(f'f(x): {f(x)}') diff --git a/examples/functions/create_weighted_function.py b/examples/functions/create_weighted_function.py index a29ad9d6..ce70c4ae 100644 --- a/examples/functions/create_weighted_function.py +++ b/examples/functions/create_weighted_function.py @@ -1,24 +1,24 @@ -from opytimizer.functions.weighted import WeightedFunction +from opytimizer.functions import WeightedFunction -# One should declare a function of x, where it should return a value -def test_function1(x): - return x + 2 +# Defines some test functions +def test_function1(z): + return z + 2 -def test_function2(x): - return x + 5 +def test_function2(z): + return z + 5 -# Declaring x variable for further use +# Declares `x` x = 0 -# Functions can be used if your objective -# function is an internal python code -g = WeightedFunction(functions=[test_function1, test_function2], weights=[0.5, 0.5]) +# Any type of internal python-coded function +# can be used as a pointer +h = WeightedFunction([test_function1, test_function2], [0.5, 0.5]) # Testing out your new Function class print(f'x: {x}') -print(f'f(x): {g.functions[0](x)}') -print(f'g(x): {g.functions[1](x)}') -print(f'z(x) = 0.5f(x) + 0.5g(x): {g(x)}') +print(f'f(x): {h.functions[0](x)}') +print(f'g(x): {h.functions[1](x)}') +print(f'h(x) = 0.5f(x) + 0.5g(x): {h(x)}') diff --git a/examples/integrations/learnergy/dropout_rbm.py b/examples/integrations/learnergy/dropout_rbm.py index deb79e4b..8e2fca85 100644 --- a/examples/integrations/learnergy/dropout_rbm.py +++ b/examples/integrations/learnergy/dropout_rbm.py @@ -1,21 +1,21 @@ import torchvision -from learnergy.models.binary import DropoutRBM +from learnergy.models.bernoulli import DropoutRBM from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Creating training and testing dataset +# Creates training and testing dataset train = torchvision.datasets.MNIST( root='./data', train=True, download=True, transform=torchvision.transforms.ToTensor()) def dropout_rbm(opytimizer): - # Gathering hyperparams + # Gathers params dropout = opytimizer[0][0] - # Creating an RBM + # Creates an RBM model = DropoutRBM(n_visible=784, n_hidden=128, steps=1, learning_rate=0.1, momentum=0, decay=0, temperature=1, dropout=dropout, use_gpu=False) @@ -25,35 +25,21 @@ def dropout_rbm(opytimizer): return error -# Creating Function's object -f = Function(pointer=dropout_rbm) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 5 n_variables = 1 -n_iterations = 5 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0,) -upper_bound = (1,) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0] +upper_bound = [1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(dropout_rbm) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=5) diff --git a/examples/integrations/learnergy/rbm.py b/examples/integrations/learnergy/rbm.py index b1c3ff56..91576678 100644 --- a/examples/integrations/learnergy/rbm.py +++ b/examples/integrations/learnergy/rbm.py @@ -1,23 +1,23 @@ import torchvision -from learnergy.models.binary import RBM +from learnergy.models.bernoulli import RBM from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Creating training and testing dataset +# Creates training and testing dataset train = torchvision.datasets.MNIST( root='./data', train=True, download=True, transform=torchvision.transforms.ToTensor()) def rbm(opytimizer): - # Gathering hyperparams + # Gathers params lr = opytimizer[0][0] momentum = opytimizer[1][0] decay = opytimizer[2][0] - # Creating an RBM + # Creates an RBM model = RBM(n_visible=784, n_hidden=128, steps=1, learning_rate=lr, momentum=momentum, decay=decay, temperature=1, use_gpu=False) @@ -27,35 +27,21 @@ def rbm(opytimizer): return error -# Creating Function's object -f = Function(pointer=rbm) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 3 -n_iterations = 10 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0, 0) -upper_bound = (1, 1, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0, 0] +upper_bound = [1, 1, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(rbm) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=10) diff --git a/examples/integrations/nalp/lstm.py b/examples/integrations/nalp/lstm.py index c833eb49..852b1f82 100644 --- a/examples/integrations/nalp/lstm.py +++ b/examples/integrations/nalp/lstm.py @@ -1,36 +1,32 @@ import tensorflow as tf -from nalp.corpus.text import TextCorpus -from nalp.datasets.language_modeling import LanguageModelingDataset -from nalp.encoders.integer import IntegerEncoder -from nalp.models.generators.lstm import LSTMGenerator +from nalp.corpus import TextCorpus +from nalp.datasets import LanguageModelingDataset +from nalp.encoders import IntegerEncoder +from nalp.models.generators import LSTMGenerator from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Creating a character TextCorpus from file +# Creates a character TextCorpus from file corpus = TextCorpus(from_file='examples/integrations/nalp/chapter1_harry.txt', corpus_type='char') -# Creating an IntegerEncoder +# Creating an IntegerEncoder, learning encoding and encoding tokens encoder = IntegerEncoder() - -# Learns the encoding based on the TextCorpus dictionary and reverse dictionary encoder.learn(corpus.vocab_index, corpus.index_vocab) - -# Applies the encoding on new data encoded_tokens = encoder.encode(corpus.tokens) # Creating Language Modeling Dataset -dataset = LanguageModelingDataset(encoded_tokens, max_length=10, batch_size=64) +dataset = LanguageModelingDataset(encoded_tokens, max_contiguous_pad_length=10, batch_size=64) def lstm(opytimizer): - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] - # Creating the LSTM + # Creates the LSTM lstm = LSTMGenerator(vocab_size=corpus.vocab_size, embedding_size=256, hidden_size=512) # As NALP's LSTMs are stateful, we need to build it with a fixed batch size @@ -44,41 +40,27 @@ def lstm(opytimizer): # Fitting the LSTM history = lstm.fit(dataset.batches, epochs=100) - # Gathering last iteration's accuracy + # Gathers last iteration's accuracy acc = history.history['accuracy'][-1] return 1 - acc -# Creating Function's object -f = Function(pointer=lstm) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 5 n_variables = 1 -n_iterations = 3 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0,) -upper_bound = (1,) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0] +upper_bound = [1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(lstm) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=3) diff --git a/examples/integrations/nalp/rnn.py b/examples/integrations/nalp/rnn.py index 86e0659c..992debb0 100644 --- a/examples/integrations/nalp/rnn.py +++ b/examples/integrations/nalp/rnn.py @@ -1,36 +1,32 @@ import tensorflow as tf -from nalp.corpus.text import TextCorpus -from nalp.datasets.language_modeling import LanguageModelingDataset -from nalp.encoders.integer import IntegerEncoder -from nalp.models.generators.rnn import RNNGenerator +from nalp.corpus import TextCorpus +from nalp.datasets import LanguageModelingDataset +from nalp.encoders import IntegerEncoder +from nalp.models.generators import RNNGenerator from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Creating a character TextCorpus from file +# Creates a character TextCorpus from file corpus = TextCorpus(from_file='examples/integrations/nalp/chapter1_harry.txt', corpus_type='char') -# Creating an IntegerEncoder +# Creating an IntegerEncoder, learning encoding and encoding tokens encoder = IntegerEncoder() - -# Learns the encoding based on the TextCorpus dictionary and reverse dictionary encoder.learn(corpus.vocab_index, corpus.index_vocab) - -# Applies the encoding on new data encoded_tokens = encoder.encode(corpus.tokens) # Creating Language Modeling Dataset -dataset = LanguageModelingDataset(encoded_tokens, max_length=10, batch_size=64) +dataset = LanguageModelingDataset(encoded_tokens, max_contiguous_pad_length=10, batch_size=64) def rnn(opytimizer): - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] - # Creating the RNN + # Creates the RNN rnn = RNNGenerator(vocab_size=corpus.vocab_size, embedding_size=256, hidden_size=512) # As NALP's RNNs are stateful, we need to build it with a fixed batch size @@ -44,41 +40,27 @@ def rnn(opytimizer): # Fitting the RNN history = rnn.fit(dataset.batches, epochs=100) - # Gathering last iteration's accuracy + # Gathers last iteration's accuracy acc = history.history['accuracy'][-1] return 1 - acc -# Creating Function's object -f = Function(pointer=rnn) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 5 n_variables = 1 -n_iterations = 3 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0,) -upper_bound = (1,) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0] +upper_bound = [1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(rnn) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=3) diff --git a/examples/integrations/opfython/supervised_opf_feature_selection.py b/examples/integrations/opfython/supervised_opf_feature_selection.py index 8a270587..26f6ae30 100644 --- a/examples/integrations/opfython/supervised_opf_feature_selection.py +++ b/examples/integrations/opfython/supervised_opf_feature_selection.py @@ -5,27 +5,27 @@ import opytimizer.math.random as r from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.boolean.bpso import BPSO -from opytimizer.spaces.boolean import BooleanSpace +from opytimizer.core import Function +from opytimizer.optimizers.boolean import BPSO +from opytimizer.spaces import BooleanSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target # Adding 1 to labels, i.e., OPF should have labels from 1+ Y += 1 -# Splitting data into training and testing sets +# Splits data into training and testing sets X_train, X_val, Y_train, Y_val = s.split( X, Y, percentage=0.5, random_state=1) def supervised_opf_feature_selection(opytimizer): - # Gathering features + # Gathers features features = opytimizer[:, 0].astype(bool) # Remaking training and validation subgraphs with selected features @@ -42,35 +42,29 @@ def supervised_opf_feature_selection(opytimizer): # Predicts new data preds = opf.predict(X_val_selected) - # Calculating accuracy + # Calculates accuracy acc = g.opf_accuracy(Y_val, preds) return 1 - acc -# Creating Function's object -f = Function(pointer=supervised_opf_feature_selection) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 5 n_variables = 64 -n_iterations = 3 - -# Creating the SearchSpace class -b = BooleanSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables) -# Hyperparameters for the optimizer -hyperparams = { +# Parameters for the optimizer +params = { 'c1': r.generate_binary_random_number(size=(n_variables, 1)), 'c2': r.generate_binary_random_number(size=(n_variables, 1)) } -# Creating BPSO's optimizer -p = BPSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = BooleanSpace(n_agents, n_variables) +optimizer = BPSO() +function = Function(supervised_opf_feature_selection) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=b, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=3) diff --git a/examples/integrations/opfython/unsupervised_opf_clustering.py b/examples/integrations/opfython/unsupervised_opf_clustering.py index cf640da3..c6216600 100644 --- a/examples/integrations/opfython/unsupervised_opf_clustering.py +++ b/examples/integrations/opfython/unsupervised_opf_clustering.py @@ -4,27 +4,27 @@ from sklearn.datasets import load_digits from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target # Adding 1 to labels, i.e., OPF should have labels from 1+ Y += 1 -# Splitting data into training and testing sets +# Splits data into training and testing sets X_train, X_test, Y_train, Y_test = s.split( X, Y, percentage=0.5, random_state=1) def unsupervised_opf_clustering(opytimizer): - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds max_k = int(opytimizer[0][0]) @@ -41,41 +41,27 @@ def unsupervised_opf_clustering(opytimizer): # Predicts new data preds, _ = opf.predict(X_test) - # Calculating accuracy + # Calculates accuracy acc = g.opf_accuracy(Y_test, preds) return 1 - acc -# Creating Function's object -f = Function(pointer=unsupervised_opf_clustering) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 5 n_variables = 1 -n_iterations = 3 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (1,) -upper_bound = (15,) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [1] +upper_bound = [15] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(unsupervised_opf_clustering) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=3) diff --git a/examples/integrations/pytorch/cnn.py b/examples/integrations/pytorch/cnn.py index e9de4946..3e6b3161 100644 --- a/examples/integrations/pytorch/cnn.py +++ b/examples/integrations/pytorch/cnn.py @@ -6,14 +6,14 @@ from torch.autograd import Variable from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace # Loading digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target @@ -36,7 +36,7 @@ def __init__(self, n_classes): # Overriding initial class super(CNN, self).__init__() - # Creating sequential model for convolutional part + # Creates sequential model for convolutional part self.conv = torch.nn.Sequential() # First convolutional block @@ -53,7 +53,7 @@ def __init__(self, n_classes): self.conv.add_module("maxpool_2", torch.nn.MaxPool2d(kernel_size=2)) self.conv.add_module("relu_2", torch.nn.ReLU()) - # Creating sequential model for fully connected part + # Creates sequential model for fully connected part self.fc = torch.nn.Sequential() # Fully connected block @@ -63,7 +63,7 @@ def __init__(self, n_classes): self.fc.add_module("fc2", torch.nn.Linear(32, n_classes)) def forward(self, x): - # Performing first block forward step + # Performs first block forward step x = self.conv.forward(x) # Flattening tensor @@ -72,31 +72,31 @@ def forward(self, x): def fit(model, loss, opt, x, y): - # Declaring initial variables + # Declares initial variables x = Variable(x, requires_grad=False) y = Variable(y, requires_grad=False) # Resetting the gradient opt.zero_grad() - # Performing the foward pass + # Performs the foward pass fw_x = model.forward(x) output = loss.forward(fw_x, y) - # Performing backward pass + # Performs backward pass output.backward() - # Updating parameters + # Updates parameters opt.step() return output.item() def predict(model, x_val): - # Declaring validation variable + # Declares validation variable x = Variable(x_val, requires_grad=False) - # Performing backward pass with this variable + # Performs backward pass with this variable output = model.forward(x) # Getting the index of the prediction @@ -116,28 +116,28 @@ def cnn(opytimizer): batch_size = 100 epochs = 50 - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] momentum = opytimizer[1][0] - # Declaring the loss function + # Declares the loss function loss = torch.nn.CrossEntropyLoss(reduction='mean') - # Declaring the optimization algorithm + # Declares the optimization algorithm opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) - # Performing training loop + # Performs training loop for _ in range(epochs): # Initial cost as 0.0 cost = 0.0 - # Calculating the number of batches + # Calculates the number of batches num_batches = len(X_train) // batch_size # For every batch for k in range(num_batches): - # Declaring initial and ending for each batch + # Declares initial and ending for each batch start, end = k * batch_size, (k + 1) * batch_size # Cost will be the loss accumulated from model's fitting @@ -147,41 +147,27 @@ def cnn(opytimizer): # Predicting samples from evaluating set preds = predict(model, X_val) - # Calculating accuracy + # Calculates accuracy acc = np.mean(preds == Y_val) return 1 - acc -# Creating Function's object -f = Function(pointer=cnn) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 2 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0) -upper_bound = (1, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0] +upper_bound = [1, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(cnn) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/pytorch/enhanced_neural_network.py b/examples/integrations/pytorch/enhanced_neural_network.py index 906c1d3c..2f582999 100644 --- a/examples/integrations/pytorch/enhanced_neural_network.py +++ b/examples/integrations/pytorch/enhanced_neural_network.py @@ -6,56 +6,56 @@ from torch.autograd import Variable from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target -# Splitting the data +# Splits the data X_train, X_val, Y_train, Y_val = train_test_split( X, Y, test_size=0.5, random_state=42) -# Converting from numpy array to torch tensors +# Converts from numpy array to torch tensors X_train = torch.from_numpy(X_train).float() X_val = torch.from_numpy(X_val).float() Y_train = torch.from_numpy(Y_train).long() def fit(model, loss, opt, x, y): - # Declaring initial variables + # Declares initial variables x = Variable(x, requires_grad=False) y = Variable(y, requires_grad=False) - # Resetting the gradient + # Resets the gradient opt.zero_grad() - # Performing the foward pass + # Performs the foward pass fw_x = model.forward(x) output = loss.forward(fw_x, y) - # Performing backward pass + # Performs backward pass output.backward() - # Updating parameters + # Updates parameters opt.step() return output.item() def predict(model, x_val): - # Declaring validation variable + # Declares validation variable x = Variable(x_val, requires_grad=False) - # Performing backward pass with this variable + # Performs backward pass with this variable output = model.forward(x) - # Getting the index of the prediction + # Gets the index of the prediction y_val = output.data.numpy().argmax(axis=1) return y_val @@ -98,30 +98,30 @@ def enhanced_neural_network(opytimizer): batch_size = 100 epochs = 100 - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] eps = opytimizer[1][0] weight_decay = opytimizer[2][0] - # Declaring the loss function + # Declares the loss function loss = torch.nn.CrossEntropyLoss(reduction='mean') - # Declaring the optimization algorithm + # Declares the optimization algorithm opt = optim.Adam(model.parameters(), lr=learning_rate, eps=eps, weight_decay=weight_decay) - # Performing training loop + # Performs training loop for _ in range(epochs): # Initial cost as 0.0 cost = 0.0 - # Calculating the number of batches + # Calculates the number of batches num_batches = len(X_train) // batch_size # For every batch for k in range(num_batches): - # Declaring initial and ending for each batch + # Declares initial and ending for each batch start, end = k * batch_size, (k + 1) * batch_size # Cost will be the loss accumulated from model's fitting @@ -131,41 +131,27 @@ def enhanced_neural_network(opytimizer): # Predicting samples from evaluating set preds = predict(model, X_val) - # Calculating accuracy + # Calculates accuracy acc = np.mean(preds == Y_val) return 1 - acc -# Creating Function's object -f = Function(pointer=enhanced_neural_network) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 3 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0, 0) -upper_bound = (1, 1, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0, 0] +upper_bound = [1, 1, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(enhanced_neural_network) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/pytorch/linear_regression.py b/examples/integrations/pytorch/linear_regression.py index 0765dfed..01ed43d8 100644 --- a/examples/integrations/pytorch/linear_regression.py +++ b/examples/integrations/pytorch/linear_regression.py @@ -4,34 +4,34 @@ from torch.autograd import Variable from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace # Setting up a random seed torch.manual_seed(42) -# Creating X and Y data +# Creates X and Y data # Note that it is important to mantain consistency during opytimizer tasks X = torch.linspace(-1, 1, 101) Y = 2 * X + torch.randn(X.size()) * 0.33 def fit(model, loss, opt, x, y): - # Declaring initial variables + # Declares initial variables x = Variable(x, requires_grad=False) y = Variable(y, requires_grad=False) - # Resetting the gradient + # Resets the gradient opt.zero_grad() - # Performing the foward pass + # Performs the foward pass fw_x = model.forward(x.view(len(x), 1)).squeeze() output = loss.forward(fw_x, y) - # Performing backward pass + # Performs backward pass output.backward() - # Updating parameters + # Updates parameters opt.step() return output.item() @@ -47,67 +47,54 @@ def linear_regression(opytimizer): batch_size = 10 epochs = 100 - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] momentum = opytimizer[1][0] - # Declaring the loss function + # Declares the loss function loss = torch.nn.MSELoss(reduction='mean') - # Declaring the optimization algorithm + # Declares the optimization algorithm opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) - # Performing training loop + # Performs training loop for _ in range(epochs): # Initial cost as 0.0 cost = 0.0 - # Calculating the number of batches + # Calculates the number of batches num_batches = len(X) // batch_size # For every batch for k in range(num_batches): - # Declaring initial and ending for each batch + # Declares initial and ending for each batch start, end = k * batch_size, (k + 1) * batch_size # Cost will be the loss accumulated from model's fitting cost += fit(model, loss, opt, X[start:end], Y[start:end]) - # Calculating final cost + # Calculates final cost final_cost = cost / num_batches return final_cost -# Creating Function's object -f = Function(pointer=linear_regression) -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 2 -n_iterations = 100 -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0) -upper_bound = (1, 1) +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0] +upper_bound = [1, 1] -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(linear_regression) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) - -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) - -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/pytorch/logistic_regression.py b/examples/integrations/pytorch/logistic_regression.py index b48d642c..9da27253 100644 --- a/examples/integrations/pytorch/logistic_regression.py +++ b/examples/integrations/pytorch/logistic_regression.py @@ -6,56 +6,56 @@ from torch.autograd import Variable from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target -# Splitting the data +# Splits the data X_train, X_val, Y_train, Y_val = train_test_split( X, Y, test_size=0.5, random_state=42) -# Converting from numpy array to torch tensors +# Converts from numpy array to torch tensors X_train = torch.from_numpy(X_train).float() X_val = torch.from_numpy(X_val).float() Y_train = torch.from_numpy(Y_train).long() def fit(model, loss, opt, x, y): - # Declaring initial variables + # Declares initial variables x = Variable(x, requires_grad=False) y = Variable(y, requires_grad=False) - # Resetting the gradient + # Resets the gradient opt.zero_grad() - # Performing the foward pass + # Performs the foward pass fw_x = model.forward(x) output = loss.forward(fw_x, y) - # Performing backward pass + # Performs backward pass output.backward() - # Updating parameters + # Updates parameters opt.step() return output.item() def predict(model, x_val): - # Declaring validation variable + # Declares validation variable x = Variable(x_val, requires_grad=False) - # Performing backward pass with this variable + # Performs backward pass with this variable output = model.forward(x) - # Getting the index of the prediction + # Gets the index of the prediction y_val = output.data.numpy().argmax(axis=1) return y_val @@ -77,28 +77,28 @@ def logistic_regression(opytimizer): batch_size = 100 epochs = 100 - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] momentum = opytimizer[1][0] - # Declaring the loss function + # Declares the loss function loss = torch.nn.CrossEntropyLoss(reduction='mean') - # Declaring the optimization algorithm + # Declares the optimization algorithm opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) - # Performing training loop + # Performs training loop for _ in range(epochs): # Initial cost as 0.0 cost = 0.0 - # Calculating the number of batches + # Calculates the number of batches num_batches = len(X_train) // batch_size # For every batch for k in range(num_batches): - # Declaring initial and ending for each batch + # Declares initial and ending for each batch start, end = k * batch_size, (k + 1) * batch_size # Cost will be the loss accumulated from model's fitting @@ -108,41 +108,27 @@ def logistic_regression(opytimizer): # Predicting samples from evaluating set preds = predict(model, X_val) - # Calculating accuracy + # Calculates accuracy acc = np.mean(preds == Y_val) return 1 - acc -# Creating Function's object -f = Function(pointer=logistic_regression) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 2 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0) -upper_bound = (1, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0] +upper_bound = [1, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(logistic_regression) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/pytorch/lstm.py b/examples/integrations/pytorch/lstm.py index 2a04a5fe..11bc02f9 100644 --- a/examples/integrations/pytorch/lstm.py +++ b/examples/integrations/pytorch/lstm.py @@ -6,30 +6,30 @@ from torch.autograd import Variable from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target -# Splitting the data +# Splits the data X_train, X_val, Y_train, Y_val = train_test_split( X, Y, test_size=0.5, random_state=42) -# Reshaping the data +# Reshapes the data X_train = X_train.reshape(-1, 8, 8) X_val = X_val.reshape(-1, 8, 8) -# Converting to sequence shape +# Converts to sequence shape X_train = np.swapaxes(X_train, 0, 1) X_val = np.swapaxes(X_val, 0, 1) -# Converting from numpy array to torch tensors +# Converts from numpy array to torch tensors X_train = torch.from_numpy(X_train).float() X_val = torch.from_numpy(X_val).float() Y_train = torch.from_numpy(Y_train).long() @@ -43,14 +43,14 @@ def __init__(self, n_features, n_hidden, n_classes): # Saving number of hidden units as a property self.n_hidden = n_hidden - # Creating LSTM cell + # Creates LSTM cell self.lstm = torch.nn.LSTM(n_features, n_hidden) - # Creating linear layer + # Creates linear layer self.linear = torch.nn.Linear(n_hidden, n_classes, bias=False) def forward(self, x): - # Gathering batch size + # Gathers batch size batch_size = x.size()[1] # Variable to hold hidden state @@ -61,41 +61,41 @@ def forward(self, x): c0 = Variable(torch.zeros( [1, batch_size, self.n_hidden]), requires_grad=False) - # Performing forward pass + # Performs forward pass fx, _ = self.lstm.forward(x, (h0, c0)) return self.linear.forward(fx[-1]) def fit(model, loss, opt, x, y): - # Declaring initial variables + # Declares initial variables x = Variable(x, requires_grad=False) y = Variable(y, requires_grad=False) - # Resetting the gradient + # Resets the gradient opt.zero_grad() - # Performing the foward pass + # Performs the foward pass fw_x = model.forward(x) output = loss.forward(fw_x, y) - # Performing backward pass + # Performs backward pass output.backward() - # Updating parameters + # Updates parameters opt.step() return output.item() def predict(model, x_val): - # Declaring validation variable + # Declares validation variable x = Variable(x_val, requires_grad=False) - # Performing backward pass with this variable + # Performs backward pass with this variable output = model.forward(x) - # Getting the index of the prediction + # Gets the index of the prediction y_val = output.data.numpy().argmax(axis=1) return y_val @@ -114,28 +114,28 @@ def lstm(opytimizer): batch_size = 100 epochs = 5 - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] momentum = opytimizer[1][0] - # Declaring the loss function + # Declares the loss function loss = torch.nn.CrossEntropyLoss(reduction='mean') - # Declaring the optimization algorithm + # Declares the optimization algorithm opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) - # Performing training loop + # Performs training loop for _ in range(epochs): # Initial cost as 0.0 cost = 0.0 - # Calculating the number of batches + # Calculates the number of batches num_batches = len(Y_train) // batch_size # For every batch for k in range(num_batches): - # Declaring initial and ending for each batch + # Declares initial and ending for each batch start, end = k * batch_size, (k + 1) * batch_size # Cost will be the loss accumulated from model's fitting @@ -145,41 +145,27 @@ def lstm(opytimizer): # Predicting samples from evaluating set preds = predict(model, X_val) - # Calculating accuracy + # Calculates accuracy acc = np.mean(preds == Y_val) return 1 - acc -# Creating Function's object -f = Function(pointer=lstm) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 2 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0) -upper_bound = (1, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0] +upper_bound = [1, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(lstm) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/pytorch/neural_network.py b/examples/integrations/pytorch/neural_network.py index 19e3fc08..1db0db61 100644 --- a/examples/integrations/pytorch/neural_network.py +++ b/examples/integrations/pytorch/neural_network.py @@ -6,56 +6,56 @@ from torch.autograd import Variable from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target -# Splitting the data +# Splits the data X_train, X_val, Y_train, Y_val = train_test_split( X, Y, test_size=0.5, random_state=42) -# Converting from numpy array to torch tensors +# Converts from numpy array to torch tensors X_train = torch.from_numpy(X_train).float() X_val = torch.from_numpy(X_val).float() Y_train = torch.from_numpy(Y_train).long() def fit(model, loss, opt, x, y): - # Declaring initial variables + # Declares initial variables x = Variable(x, requires_grad=False) y = Variable(y, requires_grad=False) - # Resetting the gradient + # Resets the gradient opt.zero_grad() - # Performing the foward pass + # Performs the foward pass fw_x = model.forward(x) output = loss.forward(fw_x, y) - # Performing backward pass + # Performs backward pass output.backward() - # Updating parameters + # Updates parameters opt.step() return output.item() def predict(model, x_val): - # Declaring validation variable + # Declares validation variable x = Variable(x_val, requires_grad=False) - # Performing backward pass with this variable + # Performs backward pass with this variable output = model.forward(x) - # Getting the index of the prediction + # Gets the index of the prediction y_val = output.data.numpy().argmax(axis=1) return y_val @@ -84,28 +84,28 @@ def neural_network(opytimizer): batch_size = 100 epochs = 100 - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] momentum = opytimizer[1][0] - # Declaring the loss function + # Declares the loss function loss = torch.nn.CrossEntropyLoss(reduction='mean') - # Declaring the optimization algorithm + # Declares the optimization algorithm opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) - # Performing training loop + # Performs training loop for _ in range(epochs): # Initial cost as 0.0 cost = 0.0 - # Calculating the number of batches + # Calculates the number of batches num_batches = len(X_train) // batch_size # For every batch for k in range(num_batches): - # Declaring initial and ending for each batch + # Declares initial and ending for each batch start, end = k * batch_size, (k + 1) * batch_size # Cost will be the loss accumulated from model's fitting @@ -115,41 +115,27 @@ def neural_network(opytimizer): # Predicting samples from evaluating set preds = predict(model, X_val) - # Calculating accuracy + # Calculates accuracy acc = np.mean(preds == Y_val) return 1 - acc -# Creating Function's object -f = Function(pointer=neural_network) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 2 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0) -upper_bound = (1, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0] +upper_bound = [1, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(neural_network) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/sklearn/k_means_clustering.py b/examples/integrations/sklearn/k_means_clustering.py index d984419b..5fe01e57 100644 --- a/examples/integrations/sklearn/k_means_clustering.py +++ b/examples/integrations/sklearn/k_means_clustering.py @@ -4,62 +4,48 @@ from sklearn.datasets import load_digits from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target def k_means_clustering(opytimizer): - # Gathering hyperparams + # Gathers params n_clusters = int(opytimizer[0][0]) # Instanciating an KMeans class kmeans = KMeans(n_clusters=n_clusters, random_state=1).fit(X) - # Gathering predicitions + # Gathers predicitions preds = kmeans.labels_ - # Calculating adjusted rand index + # Calculates adjusted rand index ari = metrics.adjusted_rand_score(Y, preds) return 1 - ari -# Creating Function's object -f = Function(pointer=k_means_clustering) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 1 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (1,) -upper_bound = (100,) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [1] +upper_bound = [100] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(k_means_clustering) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/sklearn/svm.py b/examples/integrations/sklearn/svm.py index e801b5fc..cb1f75b2 100644 --- a/examples/integrations/sklearn/svm.py +++ b/examples/integrations/sklearn/svm.py @@ -4,65 +4,51 @@ from sklearn.model_selection import KFold, cross_val_score from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading digits dataset +# Loads digits dataset digits = load_digits() -# Gathering samples and targets +# Gathers samples and targets X = digits.data Y = digits.target def _svm(opytimizer): - # Gathering hyperparams + # Gathers params C = opytimizer[0][0] # Instanciating an SVC class svc = svm.SVC(C=C, kernel='linear') - # Creating a cross-validation holder + # Creates a cross-validation holder k_fold = KFold(n_splits=5) # Fitting model using cross-validation scores = cross_val_score(svc, X, Y, cv=k_fold, n_jobs=-1) - # Calculating scores mean + # Calculates scores mean mean_score = np.mean(scores) return 1 - mean_score -# Creating Function's object -f = Function(pointer=_svm) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 10 n_variables = 1 -n_iterations = 100 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0.00001,) -upper_bound = (10,) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0.000001] +upper_bound = [10] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(_svm) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=100) diff --git a/examples/integrations/tensorflow/cnn.py b/examples/integrations/tensorflow/cnn.py index e1ee940e..926c6810 100644 --- a/examples/integrations/tensorflow/cnn.py +++ b/examples/integrations/tensorflow/cnn.py @@ -4,19 +4,19 @@ from tensorflow.keras import datasets, layers, models, optimizers from opytimizer import Opytimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO -from opytimizer.spaces.search import SearchSpace +from opytimizer.core import Function +from opytimizer.optimizers.swarm import PSO +from opytimizer.spaces import SearchSpace -# Loading CIFAR-10 data +# Loads CIFAR-10 data (X_train, Y_train), (X_val, Y_val) = datasets.cifar10.load_data() -# Normalizing inputs between 0 and 1 +# Normalizes inputs between 0 and 1 X_train, X_val = X_train / 255.0, X_val / 255.0 def cnn(opytimizer): - # Gathering parameters from Opytimizer + # Gathers parameters from Opytimizer # Pay extremely attention to their order when declaring due to their bounds learning_rate = opytimizer[0][0] beta_1 = opytimizer[1][0] @@ -43,7 +43,7 @@ def cnn(opytimizer): # Fitting the model history = model.fit(X_train, Y_train, epochs=3, validation_data=(X_val, Y_val)) - # Gathering validation accuracy + # Gathers validation accuracy val_acc = history.history['val_accuracy'][-1] # Cleaning up memory @@ -56,35 +56,21 @@ def cnn(opytimizer): return 1 - val_acc -# Creating Function's object -f = Function(pointer=cnn) - -# Number of agents, decision variables and iterations +# Number of agents and decision variables n_agents = 5 n_variables = 2 -n_iterations = 3 - -# Lower and upper bounds (has to be the same size as n_variables) -lower_bound = (0, 0) -upper_bound = (0.001, 1) - -# Creating the SearchSpace class -s = SearchSpace(n_agents=n_agents, n_iterations=n_iterations, - n_variables=n_variables, lower_bound=lower_bound, - upper_bound=upper_bound) -# Hyperparameters for the optimizer -hyperparams = { - 'w': 0.7, - 'c1': 1.7, - 'c2': 1.7 -} +# Lower and upper bounds (has to be the same size as `n_variables`) +lower_bound = [0, 0] +upper_bound = [0.001, 1] -# Creating PSO's optimizer -p = PSO(hyperparams=hyperparams) +# Creates the space, optimizer and function +space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound) +optimizer = PSO() +function = Function(cnn) -# Finally, we can create an Opytimizer class -o = Opytimizer(space=s, optimizer=p, function=f) +# Bundles every piece into Opytimizer class +opt = Opytimizer(space, optimizer, function) -# Running the optimization task -history = o.start() +# Runs the optimization task +opt.start(n_iterations=3) diff --git a/examples/math/calculate_hypercomplex_numbers.py b/examples/math/calculate_hypercomplex_numbers.py index dc8dfa9c..14488eaa 100644 --- a/examples/math/calculate_hypercomplex_numbers.py +++ b/examples/math/calculate_hypercomplex_numbers.py @@ -2,15 +2,15 @@ import opytimizer.math.hyper as h -# Creating an array with ones +# Creates an array with ones a = np.ones((2, 4)) print(f'Array: {a}') -# Declaring lower and upper bounds +# Declares lower and upper bounds lb = np.array([-5, -5]) ub = np.array([-2, -2]) -# Calculating the hypercomplex number norm +# Calculates the hypercomplex number norm norm = h.norm(a) print(f'Norm Array: {norm}') diff --git a/examples/math/general_purpose.py b/examples/math/general_purpose.py index 8c51250d..966f9029 100644 --- a/examples/math/general_purpose.py +++ b/examples/math/general_purpose.py @@ -1,14 +1,14 @@ import opytimizer.math.general as g -# Creating a list for pairwising +# Creates a list for pairwising individuals = [1, 2, 3, 4] -# Creating pairwise from list +# Creates pairwise from list for pair in g.n_wise(individuals, 2): # Outputting pairs print(f'Pair: {pair}') -# Performing a tournmanet selection over list +# Performs a tournmanet selection over list selected = g.tournament_selection(individuals, 2) # Outputting selected individuals diff --git a/examples/math/generate_distributions.py b/examples/math/generate_distributions.py index ad1d03e8..3ed395a9 100644 --- a/examples/math/generate_distributions.py +++ b/examples/math/generate_distributions.py @@ -1,13 +1,13 @@ import opytimizer.math.distribution as d -# Generating a Bernoulli distribution +# Generates a Bernoulli distribution b = d.generate_bernoulli_distribution(prob=0.5, size=10) print(b) -# Generating a choice distribution +# Generates a choice distribution c = d.generate_choice_distribution(n=10, probs=None, size=10) print(c) -# Generating a Lévy distribution +# Generates a Lévy distribution l = d.generate_levy_distribution(beta=0.5, size=10) print(l) diff --git a/examples/math/generate_random_numbers.py b/examples/math/generate_random_numbers.py index 15497f4a..e576657a 100644 --- a/examples/math/generate_random_numbers.py +++ b/examples/math/generate_random_numbers.py @@ -1,21 +1,21 @@ import opytimizer.math.random as r -# Generating a binary random number array +# Generates a binary random number array b = r.generate_binary_random_number(size=10) print(b) -# Generating an Erlang/gamma random number array +# Generates an Erlang/gamma random number array e = r.generate_gamma_random_number(shape=1.0, scale=1.0, size=10) print(e) -# Generating an integer random number array +# Generates an integer random number array i = r.generate_integer_random_number(low=0, high=1, size=1) print(i) -# Generating a random uniform number array +# Generates a random uniform number array u = r.generate_uniform_random_number(low=0.0, high=1.0, size=1) print(u) -# Generating a random gaussian number array +# Generates a random gaussian number array g = r.generate_gaussian_random_number(mean=0.5, variance=1.0, size=10) print(g) diff --git a/examples/optimizers/boolean/create_bmrfo.py b/examples/optimizers/boolean/create_bmrfo.py index 625fe87e..afdc569f 100644 --- a/examples/optimizers/boolean/create_bmrfo.py +++ b/examples/optimizers/boolean/create_bmrfo.py @@ -4,9 +4,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'S': np.array([1]) } -# Creating a BMRFO optimizer -o = BMRFO(hyperparams=hyperparams) +# Creates a BMRFO optimizer +o = BMRFO(params=params) diff --git a/examples/optimizers/boolean/create_bpso.py b/examples/optimizers/boolean/create_bpso.py index a05b0062..4a13dd0d 100644 --- a/examples/optimizers/boolean/create_bpso.py +++ b/examples/optimizers/boolean/create_bpso.py @@ -4,11 +4,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'w': np.array([1]), 'c1': np.array([0]), 'c2': np.array([1]) } -# Creating a BPSO optimizer -o = BPSO(hyperparams=hyperparams) +# Creates a BPSO optimizer +o = BPSO(params=params) diff --git a/examples/optimizers/boolean/create_umda.py b/examples/optimizers/boolean/create_umda.py index 30cf4558..a3dfad60 100644 --- a/examples/optimizers/boolean/create_umda.py +++ b/examples/optimizers/boolean/create_umda.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'p_selection': 0.75, 'lower_bound': 0.05, 'upper_bound': 0.95 } -# Creating a UMDA optimizer -o = UMDA(hyperparams=hyperparams) +# Creates a UMDA optimizer +o = UMDA(params=params) diff --git a/examples/optimizers/evolutionary/create_bsa.py b/examples/optimizers/evolutionary/create_bsa.py index 63b045da..a6f4f973 100644 --- a/examples/optimizers/evolutionary/create_bsa.py +++ b/examples/optimizers/evolutionary/create_bsa.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'F': 3.0, 'mix_rate': 1 } -# Creating a BSA optimizer -o = BSA(hyperparams=hyperparams) +# Creates a BSA optimizer +o = BSA(params=params) diff --git a/examples/optimizers/evolutionary/create_cro.py b/examples/optimizers/evolutionary/create_cro.py index ac0aa426..b2b1e1e5 100644 --- a/examples/optimizers/evolutionary/create_cro.py +++ b/examples/optimizers/evolutionary/create_cro.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a CRO optimizer -o = CRO(hyperparams=hyperparams) +# Creates a CRO optimizer +o = CRO(params=params) diff --git a/examples/optimizers/evolutionary/create_de.py b/examples/optimizers/evolutionary/create_de.py index b92b265d..fd7d1294 100644 --- a/examples/optimizers/evolutionary/create_de.py +++ b/examples/optimizers/evolutionary/create_de.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'CR': 0.9, 'F': 0.7 } -# Creating a DE optimizer -o = DE(hyperparams=hyperparams) +# Creates a DE optimizer +o = DE(params=params) diff --git a/examples/optimizers/evolutionary/create_ep.py b/examples/optimizers/evolutionary/create_ep.py index 63458909..720d64ba 100644 --- a/examples/optimizers/evolutionary/create_ep.py +++ b/examples/optimizers/evolutionary/create_ep.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'bout_size': 0.1, 'clip_ratio': 0.05 } -# Creating an EP optimizer -o = EP(hyperparams=hyperparams) +# Creates an EP optimizer +o = EP(params=params) diff --git a/examples/optimizers/evolutionary/create_es.py b/examples/optimizers/evolutionary/create_es.py index 8df7f835..7e30e23a 100644 --- a/examples/optimizers/evolutionary/create_es.py +++ b/examples/optimizers/evolutionary/create_es.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'child_ratio': 0.5 } -# Creating an ES optimizer -o = ES(hyperparams=hyperparams) +# Creates an ES optimizer +o = ES(params=params) diff --git a/examples/optimizers/evolutionary/create_foa.py b/examples/optimizers/evolutionary/create_foa.py index 337bf51a..8e5d8b8d 100644 --- a/examples/optimizers/evolutionary/create_foa.py +++ b/examples/optimizers/evolutionary/create_foa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a FOA optimizer -o = FOA(hyperparams=hyperparams) +# Creates a FOA optimizer +o = FOA(params=params) diff --git a/examples/optimizers/evolutionary/create_ga.py b/examples/optimizers/evolutionary/create_ga.py index 3a7437d4..c214f709 100644 --- a/examples/optimizers/evolutionary/create_ga.py +++ b/examples/optimizers/evolutionary/create_ga.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'p_selection': 0.75, 'p_mutation': 0.25, 'p_crossover': 0.5, } -# Creating a GA optimizer -o = GA(hyperparams=hyperparams) +# Creates a GA optimizer +o = GA(params=params) diff --git a/examples/optimizers/evolutionary/create_ghs.py b/examples/optimizers/evolutionary/create_ghs.py index 43215752..dc776599 100644 --- a/examples/optimizers/evolutionary/create_ghs.py +++ b/examples/optimizers/evolutionary/create_ghs.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'HMCR': 0.7, 'PAR_min': 0.0, 'PAR_max': 1.0, @@ -10,5 +10,5 @@ 'bw_max': 10.0 } -# Creating an GHS optimizer -o = GHS(hyperparams=hyperparams) +# Creates an GHS optimizer +o = GHS(params=params) diff --git a/examples/optimizers/evolutionary/create_goghs.py b/examples/optimizers/evolutionary/create_goghs.py index a3c5f764..eadbdb74 100644 --- a/examples/optimizers/evolutionary/create_goghs.py +++ b/examples/optimizers/evolutionary/create_goghs.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'pm': 0.1 } -# Creating a GOGHS optimizer -o = GOGHS(hyperparams=hyperparams) +# Creates a GOGHS optimizer +o = GOGHS(params=params) diff --git a/examples/optimizers/evolutionary/create_gp.py b/examples/optimizers/evolutionary/create_gp.py index 7aad9cd8..6eeffcf4 100644 --- a/examples/optimizers/evolutionary/create_gp.py +++ b/examples/optimizers/evolutionary/create_gp.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'p_reproduction': 0.25, 'p_mutation': 0.1, 'p_crossover': 0.2, 'prunning_ratio': 0.0 } -# Creating a GP optimizer -o = GP(hyperparams=hyperparams) +# Creates a GP optimizer +o = GP(params=params) diff --git a/examples/optimizers/evolutionary/create_hs.py b/examples/optimizers/evolutionary/create_hs.py index a6ba1398..e25e0e23 100644 --- a/examples/optimizers/evolutionary/create_hs.py +++ b/examples/optimizers/evolutionary/create_hs.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'HMCR': 0.7, 'PAR': 0.7, 'bw': 1.0 } -# Creating a HS optimizer -o = HS(hyperparams=hyperparams) +# Creates a HS optimizer +o = HS(params=params) diff --git a/examples/optimizers/evolutionary/create_ihs.py b/examples/optimizers/evolutionary/create_ihs.py index fe3babfd..7f64e98c 100644 --- a/examples/optimizers/evolutionary/create_ihs.py +++ b/examples/optimizers/evolutionary/create_ihs.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'HMCR': 0.7, 'PAR_min': 0.0, 'PAR_max': 1.0, @@ -10,5 +10,5 @@ 'bw_max': 10.0 } -# Creating an IHS optimizer -o = IHS(hyperparams=hyperparams) +# Creates an IHS optimizer +o = IHS(params=params) diff --git a/examples/optimizers/evolutionary/create_iwo.py b/examples/optimizers/evolutionary/create_iwo.py index e4011a18..4ab38fc9 100644 --- a/examples/optimizers/evolutionary/create_iwo.py +++ b/examples/optimizers/evolutionary/create_iwo.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'min_seeds': 0, 'max_seeds': 5, 'e': 2, @@ -10,5 +10,5 @@ 'final_sigma': 0.001 } -# Creating an IWO optimizer -o = IWO(hyperparams=hyperparams) +# Creates an IWO optimizer +o = IWO(params=params) diff --git a/examples/optimizers/evolutionary/create_nghs.py b/examples/optimizers/evolutionary/create_nghs.py index a6d1f4fe..ec1daa58 100644 --- a/examples/optimizers/evolutionary/create_nghs.py +++ b/examples/optimizers/evolutionary/create_nghs.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'pm': 0.1 } -# Creating a NGHS optimizer -o = NGHS(hyperparams=hyperparams) +# Creates a NGHS optimizer +o = NGHS(params=params) diff --git a/examples/optimizers/evolutionary/create_rra.py b/examples/optimizers/evolutionary/create_rra.py index 51827543..21fb754d 100644 --- a/examples/optimizers/evolutionary/create_rra.py +++ b/examples/optimizers/evolutionary/create_rra.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an RRA optimizer -o = RRA(hyperparams=hyperparams) +# Creates an RRA optimizer +o = RRA(params=params) diff --git a/examples/optimizers/evolutionary/create_sghs.py b/examples/optimizers/evolutionary/create_sghs.py index 893d14a4..c54b9471 100644 --- a/examples/optimizers/evolutionary/create_sghs.py +++ b/examples/optimizers/evolutionary/create_sghs.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'HMCR': 0.7, 'PAR': 0.7, 'LP': 100, @@ -12,5 +12,5 @@ 'bw_max': 10.0 } -# Creating a SGHS optimizer -o = SGHS(hyperparams=hyperparams) +# Creates a SGHS optimizer +o = SGHS(params=params) diff --git a/examples/optimizers/misc/create_aoa.py b/examples/optimizers/misc/create_aoa.py index 0a6595ae..e994bef5 100644 --- a/examples/optimizers/misc/create_aoa.py +++ b/examples/optimizers/misc/create_aoa.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'a_min': 0.2, 'a_max': 1.0, 'alpha': 5, 'mu': 0.499 } -# Creating an AOA optimizer -o = AOA(hyperparams=hyperparams) +# Creates an AOA optimizer +o = AOA(params=params) diff --git a/examples/optimizers/misc/create_cem.py b/examples/optimizers/misc/create_cem.py index b393ef11..5d3a0636 100644 --- a/examples/optimizers/misc/create_cem.py +++ b/examples/optimizers/misc/create_cem.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'n_updates': 5, 'alpha': 0.7 } -# Creating a CEM optimizer -o = CEM(hyperparams=hyperparams) +# Creates a CEM optimizer +o = CEM(params=params) diff --git a/examples/optimizers/misc/create_doa.py b/examples/optimizers/misc/create_doa.py index aa794499..4b39dac9 100644 --- a/examples/optimizers/misc/create_doa.py +++ b/examples/optimizers/misc/create_doa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'r': 1.0 } -# Creating a DOA optimizer -o = DOA(hyperparams=hyperparams) +# Creates a DOA optimizer +o = DOA(params=params) diff --git a/examples/optimizers/misc/create_gs.py b/examples/optimizers/misc/create_gs.py index 0e30d2cb..929128f7 100644 --- a/examples/optimizers/misc/create_gs.py +++ b/examples/optimizers/misc/create_gs.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.misc.gs import GS -# Creating a grid-search optimizer +# Creates a grid-search optimizer o = GS() diff --git a/examples/optimizers/misc/create_hc.py b/examples/optimizers/misc/create_hc.py index 18bcf29c..6c356ead 100644 --- a/examples/optimizers/misc/create_hc.py +++ b/examples/optimizers/misc/create_hc.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'r_mean': 0, 'r_var': 0.1 } -# Creating a HC optimizer -o = HC(hyperparams=hyperparams) +# Creates a HC optimizer +o = HC(params=params) diff --git a/examples/optimizers/population/create_aeo.py b/examples/optimizers/population/create_aeo.py index 33c89fa4..35abf4c2 100644 --- a/examples/optimizers/population/create_aeo.py +++ b/examples/optimizers/population/create_aeo.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.population.aeo import AEO -# Creating an AEO optimizer +# Creates an AEO optimizer o = AEO() diff --git a/examples/optimizers/population/create_ao.py b/examples/optimizers/population/create_ao.py index 79a79bab..4ed45da7 100644 --- a/examples/optimizers/population/create_ao.py +++ b/examples/optimizers/population/create_ao.py @@ -2,13 +2,13 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { - 'alpha': 0.1 +params = { + 'alpha': 0.1, 'delta': 0.1, 'n_cycles': 10, 'U': 0.00565, 'w': 0.005 } -# Creating an AO optimizer -o = AO(hyperparams=hyperparams) +# Creates an AO optimizer +o = AO(params=params) diff --git a/examples/optimizers/population/create_coa.py b/examples/optimizers/population/create_coa.py index 0e0d3be0..7c5b7c36 100644 --- a/examples/optimizers/population/create_coa.py +++ b/examples/optimizers/population/create_coa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'n_p': 2 } -# Creating a COA optimizer -o = COA(hyperparams=hyperparams) +# Creates a COA optimizer +o = COA(params=params) diff --git a/examples/optimizers/population/create_epo.py b/examples/optimizers/population/create_epo.py index 3f90371a..2d22795b 100644 --- a/examples/optimizers/population/create_epo.py +++ b/examples/optimizers/population/create_epo.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'f': 2.0, 'l': 1.5 } -# Creating an EPO optimizer -o = EPO(hyperparams=hyperparams) +# Creates an EPO optimizer +o = EPO(params=params) diff --git a/examples/optimizers/population/create_gco.py b/examples/optimizers/population/create_gco.py index e53f829e..538b2fa8 100644 --- a/examples/optimizers/population/create_gco.py +++ b/examples/optimizers/population/create_gco.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'CR': 0.7, 'F': 1.25 } -# Creating a GCO optimizer -o = GCO(hyperparams=hyperparams) +# Creates a GCO optimizer +o = GCO(params=params) diff --git a/examples/optimizers/population/create_gwo.py b/examples/optimizers/population/create_gwo.py index ead9a48c..e780f131 100644 --- a/examples/optimizers/population/create_gwo.py +++ b/examples/optimizers/population/create_gwo.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.population.gwo import GWO -# Creating a GWO optimizer +# Creates a GWO optimizer o = GWO() diff --git a/examples/optimizers/population/create_hho.py b/examples/optimizers/population/create_hho.py index 4091779d..213de66b 100644 --- a/examples/optimizers/population/create_hho.py +++ b/examples/optimizers/population/create_hho.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.population.hho import HHO -# Creating an HHO optimizer +# Creates an HHO optimizer o = HHO() diff --git a/examples/optimizers/population/create_loa.py b/examples/optimizers/population/create_loa.py index e0928593..f1d82977 100644 --- a/examples/optimizers/population/create_loa.py +++ b/examples/optimizers/population/create_loa.py @@ -2,8 +2,8 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an LOA optimizer -o = LOA(hyperparams=hyperparams) +# Creates an LOA optimizer +o = LOA(params=params) diff --git a/examples/optimizers/population/create_lpoa.py b/examples/optimizers/population/create_lpoa.py index 0e06e1f7..07277370 100644 --- a/examples/optimizers/population/create_lpoa.py +++ b/examples/optimizers/population/create_lpoa.py @@ -2,8 +2,8 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an LPOA optimizer -o = LPOA(hyperparams=hyperparams) +# Creates an LPOA optimizer +o = LPOA(params=params) diff --git a/examples/optimizers/population/create_ppa.py b/examples/optimizers/population/create_ppa.py index 7d74148e..8cd1c64b 100644 --- a/examples/optimizers/population/create_ppa.py +++ b/examples/optimizers/population/create_ppa.py @@ -2,8 +2,8 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a PPA optimizer -o = PPA(hyperparams=hyperparams) +# Creates a PPA optimizer +o = PPA(params=params) diff --git a/examples/optimizers/population/create_pvs.py b/examples/optimizers/population/create_pvs.py index fe0b5e89..71e6d139 100644 --- a/examples/optimizers/population/create_pvs.py +++ b/examples/optimizers/population/create_pvs.py @@ -2,8 +2,8 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a PVS optimizer -o = PVS(hyperparams=hyperparams) +# Creates a PVS optimizer +o = PVS(params=params) diff --git a/examples/optimizers/science/create_aig.py b/examples/optimizers/science/create_aig.py index 29313dcd..3beb6e5d 100644 --- a/examples/optimizers/science/create_aig.py +++ b/examples/optimizers/science/create_aig.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an AIG optimizer -o = AIG(hyperparams=hyperparams) +# Creates an AIG optimizer +o = AIG(params=params) diff --git a/examples/optimizers/science/create_aso.py b/examples/optimizers/science/create_aso.py index 40988e4a..2241ef97 100644 --- a/examples/optimizers/science/create_aso.py +++ b/examples/optimizers/science/create_aso.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'alpha': 50.0, 'beta': 0.2 } -# Creating an ASO optimizer -o = ASO(hyperparams=hyperparams) +# Creates an ASO optimizer +o = ASO(params=params) diff --git a/examples/optimizers/science/create_bh.py b/examples/optimizers/science/create_bh.py index 367f6d73..671c5f6f 100644 --- a/examples/optimizers/science/create_bh.py +++ b/examples/optimizers/science/create_bh.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.science.bh import BH -# Creating a BH optimizer +# Creates a BH optimizer o = BH() diff --git a/examples/optimizers/science/create_efo.py b/examples/optimizers/science/create_efo.py index a4f8abdd..2de3ad7b 100644 --- a/examples/optimizers/science/create_efo.py +++ b/examples/optimizers/science/create_efo.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'positive_field': 0.1, 'negative_field': 0.5, 'ps_ratio': 0.1, 'r_ratio': 0.4, } -# Creating an EFO optimizer -o = EFO(hyperparams=hyperparams) +# Creates an EFO optimizer +o = EFO(params=params) diff --git a/examples/optimizers/science/create_eo.py b/examples/optimizers/science/create_eo.py index c3bda75b..50bb9535 100644 --- a/examples/optimizers/science/create_eo.py +++ b/examples/optimizers/science/create_eo.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'a1': 2, 'a2': 1, 'GP': 0.5, 'V': 1 } -# Creating an EO optimizer -o = EO(hyperparams=hyperparams) +# Creates an EO optimizer +o = EO(params=params) diff --git a/examples/optimizers/science/create_esa.py b/examples/optimizers/science/create_esa.py index a965df25..6f5f285c 100644 --- a/examples/optimizers/science/create_esa.py +++ b/examples/optimizers/science/create_esa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an ESA optimizer -o = ESA(hyperparams=hyperparams) +# Creates an ESA optimizer +o = ESA(params=params) diff --git a/examples/optimizers/science/create_gsa.py b/examples/optimizers/science/create_gsa.py index b287dafe..3906f79f 100644 --- a/examples/optimizers/science/create_gsa.py +++ b/examples/optimizers/science/create_gsa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'G': 2.467 } -# Creating a GSA optimizer -o = GSA(hyperparams=hyperparams) +# Creates a GSA optimizer +o = GSA(params=params) diff --git a/examples/optimizers/science/create_hgso.py b/examples/optimizers/science/create_hgso.py index b35efbc6..e588c24c 100644 --- a/examples/optimizers/science/create_hgso.py +++ b/examples/optimizers/science/create_hgso.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'n_clusters': 2, 'l1': 0.0005, 'l2': 100, @@ -12,5 +12,5 @@ 'K': 1.0 } -# Creating an HGSO optimizer -o = HGSO(hyperparams=hyperparams) +# Creates an HGSO optimizer +o = HGSO(params=params) diff --git a/examples/optimizers/science/create_lsa.py b/examples/optimizers/science/create_lsa.py index 1162e7c1..df03334c 100644 --- a/examples/optimizers/science/create_lsa.py +++ b/examples/optimizers/science/create_lsa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an LSA optimizer -o = LSA(hyperparams=hyperparams) +# Creates an LSA optimizer +o = LSA(params=params) diff --git a/examples/optimizers/science/create_moa.py b/examples/optimizers/science/create_moa.py index 05b96f16..a609ea01 100644 --- a/examples/optimizers/science/create_moa.py +++ b/examples/optimizers/science/create_moa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a MOA optimizer -o = MOA(hyperparams=hyperparams) +# Creates a MOA optimizer +o = MOA(params=params) diff --git a/examples/optimizers/science/create_mvo.py b/examples/optimizers/science/create_mvo.py index ff5001dd..e33a7a11 100644 --- a/examples/optimizers/science/create_mvo.py +++ b/examples/optimizers/science/create_mvo.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'WEP_min': 0.2, 'WEP_max': 1, 'p': 6 } -# Creating a MVO optimizer -o = MVO(hyperparams=hyperparams) +# Creates a MVO optimizer +o = MVO(params=params) diff --git a/examples/optimizers/science/create_sa.py b/examples/optimizers/science/create_sa.py index 53a2f2a4..ce1d2f46 100644 --- a/examples/optimizers/science/create_sa.py +++ b/examples/optimizers/science/create_sa.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'T': 100, 'beta': 0.999 } -# Creating a SA optimizer -o = SA(hyperparams=hyperparams) +# Creates a SA optimizer +o = SA(params=params) diff --git a/examples/optimizers/science/create_two.py b/examples/optimizers/science/create_two.py index 95107a34..da01782e 100644 --- a/examples/optimizers/science/create_two.py +++ b/examples/optimizers/science/create_two.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'mu_s': 1, 'mu_k': 1, 'delta_t': 1, @@ -10,5 +10,5 @@ 'beta': 0.05 } -# Creating a TWO optimizer -o = TWO(hyperparams=hyperparams) +# Creates a TWO optimizer +o = TWO(params=params) diff --git a/examples/optimizers/science/create_wca.py b/examples/optimizers/science/create_wca.py index 43083700..1ef843d4 100644 --- a/examples/optimizers/science/create_wca.py +++ b/examples/optimizers/science/create_wca.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'nsr': 10, 'd_max': 0.1 } -# Creating a WCA optimizer -o = WCA(hyperparams=hyperparams) +# Creates a WCA optimizer +o = WCA(params=params) diff --git a/examples/optimizers/science/create_wdo.py b/examples/optimizers/science/create_wdo.py index 72515145..9954ec3d 100644 --- a/examples/optimizers/science/create_wdo.py +++ b/examples/optimizers/science/create_wdo.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'v_max': 0.3, 'alpha': 0.8, 'g': 0.6, @@ -10,5 +10,5 @@ 'RT': 1.5 } -# Creating a WDO optimizer -o = WDO(hyperparams=hyperparams) +# Creates a WDO optimizer +o = WDO(params=params) diff --git a/examples/optimizers/science/create_weo.py b/examples/optimizers/science/create_weo.py index 0fb6ec2c..b10fb2fc 100644 --- a/examples/optimizers/science/create_weo.py +++ b/examples/optimizers/science/create_weo.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a WEO optimizer -o = WEO(hyperparams=hyperparams) +# Creates a WEO optimizer +o = WEO(params=params) diff --git a/examples/optimizers/science/create_wwo.py b/examples/optimizers/science/create_wwo.py index 67c541b2..f10b3930 100644 --- a/examples/optimizers/science/create_wwo.py +++ b/examples/optimizers/science/create_wwo.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'h_max': 5, 'alpha': 1.001, 'beta': 0.001, 'k_max': 1 } -# Creating a WWO optimizer -o = WWO(hyperparams=hyperparams) +# Creates a WWO optimizer +o = WWO(params=params) diff --git a/examples/optimizers/social/create_bso.py b/examples/optimizers/social/create_bso.py index acc980e9..a7668c77 100644 --- a/examples/optimizers/social/create_bso.py +++ b/examples/optimizers/social/create_bso.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an BSO optimizer -o = BSO(hyperparams=hyperparams) +# Creates an BSO optimizer +o = BSO(params=params) diff --git a/examples/optimizers/social/create_ci.py b/examples/optimizers/social/create_ci.py index a562bbe2..4a40a3b0 100644 --- a/examples/optimizers/social/create_ci.py +++ b/examples/optimizers/social/create_ci.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an CI optimizer -o = CI(hyperparams=hyperparams) +# Creates an CI optimizer +o = CI(params=params) diff --git a/examples/optimizers/social/create_isa.py b/examples/optimizers/social/create_isa.py index 252b1890..7ba4ba95 100644 --- a/examples/optimizers/social/create_isa.py +++ b/examples/optimizers/social/create_isa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an ISA optimizer -o = ISA(hyperparams=hyperparams) +# Creates an ISA optimizer +o = ISA(params=params) diff --git a/examples/optimizers/social/create_mvpa.py b/examples/optimizers/social/create_mvpa.py index 5c880332..b8deaa8e 100644 --- a/examples/optimizers/social/create_mvpa.py +++ b/examples/optimizers/social/create_mvpa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a MVPA optimizer -o = MVPA(hyperparams=hyperparams) +# Creates a MVPA optimizer +o = MVPA(params=params) diff --git a/examples/optimizers/social/create_qsa.py b/examples/optimizers/social/create_qsa.py index b820d6cd..aceaeeec 100644 --- a/examples/optimizers/social/create_qsa.py +++ b/examples/optimizers/social/create_qsa.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.social.qsa import QSA -# Creating an QSA optimizer +# Creates an QSA optimizer o = QSA() diff --git a/examples/optimizers/social/create_ssd.py b/examples/optimizers/social/create_ssd.py index 2a09268c..aebec29a 100644 --- a/examples/optimizers/social/create_ssd.py +++ b/examples/optimizers/social/create_ssd.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'c': 2.0, 'decay': 0.99 } -# Creating an SSD optimizer -o = SSD(hyperparams=hyperparams) +# Creates an SSD optimizer +o = SSD(params=params) diff --git a/examples/optimizers/swarm/create_abc.py b/examples/optimizers/swarm/create_abc.py index 69b69549..019fb01b 100644 --- a/examples/optimizers/swarm/create_abc.py +++ b/examples/optimizers/swarm/create_abc.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'n_trials': 10 } -# Creating an ABC optimizer -o = ABC(hyperparams=hyperparams) +# Creates an ABC optimizer +o = ABC(params=params) diff --git a/examples/optimizers/swarm/create_abo.py b/examples/optimizers/swarm/create_abo.py index 79b9ee0e..c23a72f5 100644 --- a/examples/optimizers/swarm/create_abo.py +++ b/examples/optimizers/swarm/create_abo.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'sunspot_ratio': 0.9, 'a': 2.0 } -# Creating an ABO optimizer -o = ABO(hyperparams=hyperparams) +# Creates an ABO optimizer +o = ABO(params=params) diff --git a/examples/optimizers/swarm/create_af.py b/examples/optimizers/swarm/create_af.py index a78e3541..66ca8216 100644 --- a/examples/optimizers/swarm/create_af.py +++ b/examples/optimizers/swarm/create_af.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an AF optimizer -o = AF(hyperparams=hyperparams) +# Creates an AF optimizer +o = AF(params=params) diff --git a/examples/optimizers/swarm/create_aiwpso.py b/examples/optimizers/swarm/create_aiwpso.py index c2a9ccc6..f0ab08e2 100644 --- a/examples/optimizers/swarm/create_aiwpso.py +++ b/examples/optimizers/swarm/create_aiwpso.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'w': 0.7, 'w_min': 0.1, 'w_max': 0.9, @@ -10,5 +10,5 @@ 'c2': 2 } -# Creating an AIWPSO optimizer -o = AIWPSO(hyperparams=hyperparams) +# Creates an AIWPSO optimizer +o = AIWPSO(params=params) diff --git a/examples/optimizers/swarm/create_ba.py b/examples/optimizers/swarm/create_ba.py index 7e8c8af0..3acf4948 100644 --- a/examples/optimizers/swarm/create_ba.py +++ b/examples/optimizers/swarm/create_ba.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'f_min': 0, 'f_max': 2, 'A': 0.5, 'r': 0.5 } -# Creating a BA optimizer -o = BA(hyperparams=hyperparams) +# Creates a BA optimizer +o = BA(params=params) diff --git a/examples/optimizers/swarm/create_bfo.py b/examples/optimizers/swarm/create_bfo.py index 64338b29..59dfb98f 100644 --- a/examples/optimizers/swarm/create_bfo.py +++ b/examples/optimizers/swarm/create_bfo.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an BFO optimizer -o = BFO(hyperparams=hyperparams) +# Creates an BFO optimizer +o = BFO(params=params) diff --git a/examples/optimizers/swarm/create_boa.py b/examples/optimizers/swarm/create_boa.py index cf08fa75..484bee32 100644 --- a/examples/optimizers/swarm/create_boa.py +++ b/examples/optimizers/swarm/create_boa.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'c': 0.01, 'a': 0.1, 'p': 0.8 } -# Creating a BOA optimizer -o = BOA(hyperparams=hyperparams) +# Creates a BOA optimizer +o = BOA(params=params) diff --git a/examples/optimizers/swarm/create_bwo.py b/examples/optimizers/swarm/create_bwo.py index c58f097d..1548a158 100644 --- a/examples/optimizers/swarm/create_bwo.py +++ b/examples/optimizers/swarm/create_bwo.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'pp': 0.6, 'cr': 0.44, 'pm': 0.4 } -# Creating a BWO optimizer -o = BWO(hyperparams=hyperparams) +# Creates a BWO optimizer +o = BWO(params=params) diff --git a/examples/optimizers/swarm/create_cs.py b/examples/optimizers/swarm/create_cs.py index e6a86f47..2d11aedb 100644 --- a/examples/optimizers/swarm/create_cs.py +++ b/examples/optimizers/swarm/create_cs.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'alpha': 0.3, 'beta': 1.5, 'p': 0.2 } -# Creating a CS optimizer -o = CS(hyperparams=hyperparams) +# Creates a CS optimizer +o = CS(params=params) diff --git a/examples/optimizers/swarm/create_csa.py b/examples/optimizers/swarm/create_csa.py index 25d5682f..16f593aa 100644 --- a/examples/optimizers/swarm/create_csa.py +++ b/examples/optimizers/swarm/create_csa.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'fl': 2.0, 'AP': 0.1 } -# Creating a CSA optimizer -o = CSA(hyperparams=hyperparams) +# Creates a CSA optimizer +o = CSA(params=params) diff --git a/examples/optimizers/swarm/create_eho.py b/examples/optimizers/swarm/create_eho.py index df5d4e91..31721f5b 100644 --- a/examples/optimizers/swarm/create_eho.py +++ b/examples/optimizers/swarm/create_eho.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'alpha': 0.5, 'beta': 0.1, 'n_clans': 10 } -# Creating an EHO optimizer -o = EHO(hyperparams=hyperparams) +# Creates an EHO optimizer +o = EHO(params=params) diff --git a/examples/optimizers/swarm/create_fa.py b/examples/optimizers/swarm/create_fa.py index 00e9c27f..ef22d569 100644 --- a/examples/optimizers/swarm/create_fa.py +++ b/examples/optimizers/swarm/create_fa.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'alpha': 0.5, 'beta': 0.2, 'gamma': 1.0 } -# Creating a FA optimizer -o = FA(hyperparams=hyperparams) +# Creates a FA optimizer +o = FA(params=params) diff --git a/examples/optimizers/swarm/create_ffoa.py b/examples/optimizers/swarm/create_ffoa.py index c46a434a..1ef8e7af 100644 --- a/examples/optimizers/swarm/create_ffoa.py +++ b/examples/optimizers/swarm/create_ffoa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a FFOA optimizer -o = FFOA(hyperparams=hyperparams) +# Creates a FFOA optimizer +o = FFOA(params=params) diff --git a/examples/optimizers/swarm/create_fpa.py b/examples/optimizers/swarm/create_fpa.py index 3ae6f402..5deb02e8 100644 --- a/examples/optimizers/swarm/create_fpa.py +++ b/examples/optimizers/swarm/create_fpa.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'beta': 1.5, 'eta': 0.2, 'p': 0.8 } -# Creating a FPA optimizer -o = FPA(hyperparams=hyperparams) +# Creates a FPA optimizer +o = FPA(params=params) diff --git a/examples/optimizers/swarm/create_fso.py b/examples/optimizers/swarm/create_fso.py index ad3f4ffb..020e4970 100644 --- a/examples/optimizers/swarm/create_fso.py +++ b/examples/optimizers/swarm/create_fso.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating a FSO optimizer -o = FSO(hyperparams=hyperparams) +# Creates a FSO optimizer +o = FSO(params=params) diff --git a/examples/optimizers/swarm/create_goa.py b/examples/optimizers/swarm/create_goa.py index 3edaac0f..2ea68dfb 100644 --- a/examples/optimizers/swarm/create_goa.py +++ b/examples/optimizers/swarm/create_goa.py @@ -2,12 +2,12 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'c_min': 0.00001, 'c_max': 1, 'f': 0.5, 'l': 1.5 } -# Creating a GOA optimizer -o = GOA(hyperparams=hyperparams) +# Creates a GOA optimizer +o = GOA(params=params) diff --git a/examples/optimizers/swarm/create_js.py b/examples/optimizers/swarm/create_js.py index 2b57bd37..91269914 100644 --- a/examples/optimizers/swarm/create_js.py +++ b/examples/optimizers/swarm/create_js.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'eta': 4.0, 'beta': 3.0, 'gamma': 0.1 } -# Creating a JS optimizer -o = JS(hyperparams=hyperparams) +# Creates a JS optimizer +o = JS(params=params) diff --git a/examples/optimizers/swarm/create_kh.py b/examples/optimizers/swarm/create_kh.py index 4141320b..f0d1ba53 100644 --- a/examples/optimizers/swarm/create_kh.py +++ b/examples/optimizers/swarm/create_kh.py @@ -2,7 +2,7 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'N_max': 0.01, 'w_n': 0.42, 'NN': 5, @@ -14,5 +14,5 @@ 'Mu': 0.05 } -# Creating a KH optimizer -o = KH(hyperparams=hyperparams) +# Creates a KH optimizer +o = KH(params=params) diff --git a/examples/optimizers/swarm/create_mfo.py b/examples/optimizers/swarm/create_mfo.py index c349669e..01d932a7 100644 --- a/examples/optimizers/swarm/create_mfo.py +++ b/examples/optimizers/swarm/create_mfo.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'b': 1 } -# Creating a MFO optimizer -o = MFO(hyperparams=hyperparams) +# Creates a MFO optimizer +o = MFO(params=params) diff --git a/examples/optimizers/swarm/create_mrfo.py b/examples/optimizers/swarm/create_mrfo.py index 8b826036..c9980b42 100644 --- a/examples/optimizers/swarm/create_mrfo.py +++ b/examples/optimizers/swarm/create_mrfo.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'S': 2.0 } -# Creating an MRFO optimizer -o = MRFO(hyperparams=hyperparams) +# Creates an MRFO optimizer +o = MRFO(params=params) diff --git a/examples/optimizers/swarm/create_nbjs.py b/examples/optimizers/swarm/create_nbjs.py index 760ee653..d92d3473 100644 --- a/examples/optimizers/swarm/create_nbjs.py +++ b/examples/optimizers/swarm/create_nbjs.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'eta': 4.0, 'beta': 3.0, 'gamma': 0.1 } -# Creating a NBJS optimizer -o = NBJS(hyperparams=hyperparams) +# Creates a NBJS optimizer +o = NBJS(params=params) diff --git a/examples/optimizers/swarm/create_pio.py b/examples/optimizers/swarm/create_pio.py index 71335c13..66a7c58f 100644 --- a/examples/optimizers/swarm/create_pio.py +++ b/examples/optimizers/swarm/create_pio.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'n_c1': 150, 'n_c2': 200, 'R': 0.2 } -# Creating an PIO optimizer -o = PIO(hyperparams=hyperparams) +# Creates an PIO optimizer +o = PIO(params=params) diff --git a/examples/optimizers/swarm/create_pso.py b/examples/optimizers/swarm/create_pso.py index c877dc9a..0035c6d9 100644 --- a/examples/optimizers/swarm/create_pso.py +++ b/examples/optimizers/swarm/create_pso.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'w': 0.7, 'c1': 1.7, 'c2': 1.7 } -# Creating a PSO optimizer -o = PSO(hyperparams=hyperparams) +# Creates a PSO optimizer +o = PSO(params=params) diff --git a/examples/optimizers/swarm/create_rpso.py b/examples/optimizers/swarm/create_rpso.py index e9e3d8fd..d5103da8 100644 --- a/examples/optimizers/swarm/create_rpso.py +++ b/examples/optimizers/swarm/create_rpso.py @@ -2,10 +2,10 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'c1': 1.7, 'c2': 1.7 } -# Creating an RPSO optimizer -o = RPSO(hyperparams=hyperparams) +# Creates an RPSO optimizer +o = RPSO(params=params) diff --git a/examples/optimizers/swarm/create_savpso.py b/examples/optimizers/swarm/create_savpso.py index 9577094f..6b4fc178 100644 --- a/examples/optimizers/swarm/create_savpso.py +++ b/examples/optimizers/swarm/create_savpso.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'w': 0.7, 'c1': 1.7, 'c2': 1.7 } -# Creating an SAVPSO optimizer -o = SAVPSO(hyperparams=hyperparams) +# Creates an SAVPSO optimizer +o = SAVPSO(params=params) diff --git a/examples/optimizers/swarm/create_sbo.py b/examples/optimizers/swarm/create_sbo.py index 2d0ed964..ab38a58a 100644 --- a/examples/optimizers/swarm/create_sbo.py +++ b/examples/optimizers/swarm/create_sbo.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'alpha': 0.9, 'p_mutation': 0.05, 'z': 0.02 } -# Creating a SBO optimizer -o = SBO(hyperparams=hyperparams) +# Creates a SBO optimizer +o = SBO(params=params) diff --git a/examples/optimizers/swarm/create_sca.py b/examples/optimizers/swarm/create_sca.py index 6d91e53d..6f16a7f9 100644 --- a/examples/optimizers/swarm/create_sca.py +++ b/examples/optimizers/swarm/create_sca.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'r_min': 0, 'r_max': 2, 'a': 3 } -# Creating a SCA optimizer -o = SCA(hyperparams=hyperparams) +# Creates a SCA optimizer +o = SCA(params=params) diff --git a/examples/optimizers/swarm/create_sfo.py b/examples/optimizers/swarm/create_sfo.py index 36664b9e..0bfc7a9e 100644 --- a/examples/optimizers/swarm/create_sfo.py +++ b/examples/optimizers/swarm/create_sfo.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'PP': 0.1, 'A': 4, 'e': 0.001 } -# Creating a SFO optimizer -o = SFO(hyperparams=hyperparams) +# Creates a SFO optimizer +o = SFO(params=params) diff --git a/examples/optimizers/swarm/create_sos.py b/examples/optimizers/swarm/create_sos.py index 75048490..38028e2c 100644 --- a/examples/optimizers/swarm/create_sos.py +++ b/examples/optimizers/swarm/create_sos.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.swarm.sos import SOS -# Creating an SOS optimizer +# Creates an SOS optimizer o = SOS() diff --git a/examples/optimizers/swarm/create_ssa.py b/examples/optimizers/swarm/create_ssa.py index 8f078d10..520d6b75 100644 --- a/examples/optimizers/swarm/create_ssa.py +++ b/examples/optimizers/swarm/create_ssa.py @@ -1,4 +1,4 @@ from opytimizer.optimizers.swarm.ssa import SSA -# Creating a SSA optimizer +# Creates a SSA optimizer o = SSA() diff --git a/examples/optimizers/swarm/create_sso.py b/examples/optimizers/swarm/create_sso.py index 721e2d25..7c840964 100644 --- a/examples/optimizers/swarm/create_sso.py +++ b/examples/optimizers/swarm/create_sso.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'C_w': 0.1, 'C_p': 0.4, 'C_g': 0.9 } -# Creating a SSO optimizer -o = SSO(hyperparams=hyperparams) +# Creates a SSO optimizer +o = SSO(params=params) diff --git a/examples/optimizers/swarm/create_stoa.py b/examples/optimizers/swarm/create_stoa.py index 94577e86..6422ca9d 100644 --- a/examples/optimizers/swarm/create_stoa.py +++ b/examples/optimizers/swarm/create_stoa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { } -# Creating an STOA optimizer -o = STOA(hyperparams=hyperparams) +# Creates an STOA optimizer +o = STOA(params=params) diff --git a/examples/optimizers/swarm/create_vpso.py b/examples/optimizers/swarm/create_vpso.py index 99bc3d96..deeaa88b 100644 --- a/examples/optimizers/swarm/create_vpso.py +++ b/examples/optimizers/swarm/create_vpso.py @@ -2,11 +2,11 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'w': 0.7, 'c1': 1.7, 'c2': 1.7 } -# Creating an VPSO optimizer -o = VPSO(hyperparams=hyperparams) +# Creates an VPSO optimizer +o = VPSO(params=params) diff --git a/examples/optimizers/swarm/create_woa.py b/examples/optimizers/swarm/create_woa.py index d622f1f4..6199e077 100644 --- a/examples/optimizers/swarm/create_woa.py +++ b/examples/optimizers/swarm/create_woa.py @@ -2,9 +2,9 @@ # One should declare a hyperparameters object based # on the desired algorithm that will be used -hyperparams = { +params = { 'b': 1 } -# Creating an WOA optimizer -o = WOA(hyperparams=hyperparams) +# Creates an WOA optimizer +o = WOA(params=params) diff --git a/examples/spaces/create_boolean_space.py b/examples/spaces/create_boolean_space.py index f0f7212c..bebf88e6 100644 --- a/examples/spaces/create_boolean_space.py +++ b/examples/spaces/create_boolean_space.py @@ -1,9 +1,13 @@ -from opytimizer.spaces.boolean import BooleanSpace +from opytimizer.spaces import BooleanSpace -# We need to define the number of agents, decision variables and iterations +# Defines the number of agents and decision variables n_agents = 2 n_variables = 5 -n_iterations = 10 -# Creating the BooleanSpace object -s = BooleanSpace(n_agents=n_agents, n_variables=n_variables, n_iterations=n_iterations) +# Creates the BooleanSpace +s = BooleanSpace(n_agents, n_variables) + +# Prints out some properties +print(s.n_agents, s.n_variables) +print(s.agents, s.best_agent) +print(s.best_agent.position) diff --git a/examples/spaces/create_grid_space.py b/examples/spaces/create_grid_space.py index 2107cd11..7ea26519 100644 --- a/examples/spaces/create_grid_space.py +++ b/examples/spaces/create_grid_space.py @@ -1,15 +1,18 @@ -from opytimizer.spaces.grid import GridSpace +from opytimizer.spaces import GridSpace -# We need to define the number of decision variables +# Define the number of decision variables n_variables = 2 -# And also the size of the step in the grid -step = (0.1, 1) +# Also defines the step size of each variable +# and their corresponding lower and upper bounds +step = [0.1, 1] +lower_bound = [0.5, 1] +upper_bound = [2.0, 2] -# Finally, we define the lower and upper bounds -# Note that they have to be the same size as n_variables -lower_bound = (0.5, 1) -upper_bound = (2.0, 2) +# Creates the GridSpace +s = GridSpace(n_variables, step, lower_bound, upper_bound) -# Creating the GridSpace object -s = GridSpace(n_variables=n_variables, step=step, lower_bound=lower_bound, upper_bound=upper_bound) +# Prints out some properties +print(s.n_agents, s.n_variables) +print(s.agents, s.best_agent) +print(s.best_agent.position) diff --git a/examples/spaces/create_hypercomplex_space.py b/examples/spaces/create_hypercomplex_space.py index 655f4e32..1a420875 100644 --- a/examples/spaces/create_hypercomplex_space.py +++ b/examples/spaces/create_hypercomplex_space.py @@ -1,17 +1,15 @@ -from opytimizer.spaces.hyper_complex import HyperComplexSpace +from opytimizer.spaces import HyperComplexSpace -# We need to define the number of agents, decision variables, dimensions and iterations +# Defines the number of agents, decision variables, +# and search space dimensions n_agents = 2 n_variables = 5 n_dimensions = 4 -n_iterations = 10 -# Finally, we define the lower and upper bounds -# Note that they have to be the same size as n_variables -lower_bound = (0.1, 0.3, 0.5, 0.7, 0.9) -upper_bound = (0.2, 0.4, 0.6, 0.8, 1.0) +# Creates the HyperComplexSpace +s = HyperComplexSpace(n_agents=n_agents, n_variables=n_variables, n_dimensions=n_dimensions) -# Creating the HyperComplexSpace object -s = HyperComplexSpace(n_agents=n_agents, n_variables=n_variables, - n_dimensions=n_dimensions, n_iterations=n_iterations, - lower_bound=lower_bound, upper_bound=upper_bound) +# Prints out some properties +print(s.n_agents, s.n_variables, s.n_dimensions) +print(s.agents, s.best_agent) +print(s.best_agent.position) diff --git a/examples/spaces/create_search_space.py b/examples/spaces/create_search_space.py index cc248343..397e668b 100644 --- a/examples/spaces/create_search_space.py +++ b/examples/spaces/create_search_space.py @@ -1,16 +1,19 @@ from opytimizer.spaces.search import SearchSpace -# We need to define the number of agents, decision variables and iterations +# Define the number of agents and decision variables n_agents = 2 n_variables = 5 -n_iterations = 10 -# Finally, we define the lower and upper bounds -# Note that they have to be the same size as n_variables -lower_bound = (0.1, 0.3, 0.5, 0.7, 0.9) -upper_bound = (0.2, 0.4, 0.6, 0.8, 1.0) +# Also defines the corresponding lower and upper bounds +# Note that they have to be the same size as `n_variables` +lower_bound = [0.1, 0.3, 0.5, 0.7, 0.9] +upper_bound = [0.2, 0.4, 0.6, 0.8, 1.0] -# Creating the SearchSpace object +# Creates the SearchSpace s = SearchSpace(n_agents=n_agents, n_variables=n_variables, - n_iterations=n_iterations, lower_bound=lower_bound, - upper_bound=upper_bound) + lower_bound=lower_bound, upper_bound=upper_bound) + +# Prints out some properties +print(s.n_agents, s.n_variables) +print(s.agents, s.best_agent) +print(s.best_agent.position) diff --git a/examples/spaces/create_tree_space.py b/examples/spaces/create_tree_space.py index 0aa207da..22362cc0 100644 --- a/examples/spaces/create_tree_space.py +++ b/examples/spaces/create_tree_space.py @@ -1,35 +1,30 @@ from opytimizer.spaces.tree import TreeSpace -# We need to define the number of trees, number of terminals, decision variables and iterations -n_trees = 2 -n_terminals = 2 +# Define the number of agents, decision variables and terminals +n_agents = 2 n_variables = 5 -n_iterations = 10 +n_terminals = 2 # Minimum and maximum depths of the trees min_depth = 2 max_depth = 5 -# List of functions nodes +# Function nodes func_nodes = ['SUM', 'SUB', 'MUL', 'DIV'] -# Finally, we define the lower and upper bounds -# Note that they have to be the same size as n_variables -lower_bound = (0.1, 0.3, 0.5, 0.7, 0.9) -upper_bound = (0.2, 0.4, 0.6, 0.8, 1.0) +# Also defines the corresponding lower and upper bounds +# Note that they have to be the same size as `n_variables` +lower_bound = [0.1, 0.3, 0.5, 0.7, 0.9] +upper_bound = [0.2, 0.4, 0.6, 0.8, 1.0] -# Creating the TreeSpace object -s = TreeSpace(n_trees=n_trees, n_terminals=n_terminals, n_variables=n_variables, - n_iterations=n_iterations, min_depth=min_depth, max_depth=max_depth, - functions=func_nodes, lower_bound=lower_bound, upper_bound=upper_bound) +# Creates the TreeSpace +s = TreeSpace(n_agents, n_variables, lower_bound, upper_bound, + n_terminals, min_depth, max_depth, func_nodes) -# Outputting the whole tree +# Prints out some properties print(s.trees[0]) - -# Outputting the tree's current position (solution) print(f'Position: {s.trees[0].position}') - -# Outputting valuable information about the tree print(f'\nPre Order: {s.trees[0].pre_order}') print(f'\nPost Order: {s.trees[0].post_order}') -print(f'\nNodes: {s.trees[0].n_nodes} | Leaves: {s.trees[0].n_leaves} | Minimum Depth: {s.trees[0].min_depth} | Maximum Depth: {s.trees[0].max_depth}') +print(f'\nNodes: {s.trees[0].n_nodes} | Leaves: {s.trees[0].n_leaves} | ' + f'Minimum Depth: {s.trees[0].min_depth} | Maximum Depth: {s.trees[0].max_depth}') diff --git a/examples/utils/custom_callbacks.py b/examples/utils/custom_callbacks.py new file mode 100644 index 00000000..952174ad --- /dev/null +++ b/examples/utils/custom_callbacks.py @@ -0,0 +1,58 @@ +from opytimizer.utils.callback import Callback + + +class CustomCallback(Callback): + """A CustomCallback can be created by override its parent `Callback` class + and by implementing the desired logic in its available methods. + + """ + + def __init__(self): + """Initialization method for the customized callback. + + """ + + # You only need to override its parent class + super(CustomCallback).__init__() + + def on_iteration_begin(self, iteration, opt_model): + """Called at the beginning of an iteration. + + """ + + pass + + def on_iteration_end(self, iteration, opt_model): + """Called at the end of an iteration. + + """ + + pass + + def on_evaluate_before(self, *evaluate_args): + """Called before the `evaluate` method. + + """ + + pass + + def on_evaluate_after(self, *evaluate_args): + """Called after the `evaluate` method. + + """ + + pass + + def on_update_before(self, *update_args): + """Called before the `update` method. + + """ + + pass + + def on_update_after(self, *update_args): + """Called after the `update` method. + + """ + + pass diff --git a/examples/utils/interact_with_history.py b/examples/utils/interact_with_history.py new file mode 100644 index 00000000..3131d73b --- /dev/null +++ b/examples/utils/interact_with_history.py @@ -0,0 +1,17 @@ +from opytimizer.utils.history import History + +# Instantiates the History +h = History() + +# Dumps a variable (it will be converted into a list) +h.dump(x=1) +h.dump(x=2) +h.dump(x=3) + +# Any variable will be converted into a list +# Even lists, dictionaries, etc +h.dump(y=[1]) + +# Access the variables +print(h.x) +print(h.y) diff --git a/examples/utils/load_history.py b/examples/utils/load_history.py deleted file mode 100644 index 45d785e0..00000000 --- a/examples/utils/load_history.py +++ /dev/null @@ -1,13 +0,0 @@ -from opytimizer.utils.history import History - -# File name to be loaded -file_name = '' - -# Creating an empty History object -h = History() - -# Loading history from pickle file -h.load(file_name) - -# Displaying content -print(h) diff --git a/examples/utils/load_mnist.py b/examples/utils/load_mnist.py deleted file mode 100644 index 3c224400..00000000 --- a/examples/utils/load_mnist.py +++ /dev/null @@ -1,94 +0,0 @@ -import gzip -import os -import urllib.request as request -from os import path - -import numpy as np - -FILES_MNIST = ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz", - "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"] - - -def download(url, file_name): - # Creates the directory path for further downloading - dir_name = path.dirname(file_name) - - # Checks if the path exists - if not path.exists(dir_name): - # If not, create its directory - os.makedirs(dir_name) - - # Retrieves the file - request.urlretrieve(url, file_name) - - -def download_mnist(file_name): - # URL for MNIST dataset - url = 'http://yann.lecun.com/exdb/mnist/' - - # For each possible file - for mnist in FILES_MNIST: - # We create a path to download the file - mnist_path = os.path.join(file_name, mnist) - - # If the path does not exists - if not path.exists(mnist_path): - # Downloads the file - download(url + mnist, mnist_path) - - -def load_single_mnist(dir_mnist, file_mnist, bits, shape): - # Trying to open desired file - with gzip.open(os.path.join(dir_mnist, file_mnist)) as fd: - # Reading to buffer - buf = fd.read() - - # From buffer, we actually load the file - loaded = np.frombuffer(buf, dtype=np.uint8) - - # Reshaping the data - data = loaded[bits:].reshape(shape) - - return data - - -def load_mnist(): - # Directory to MNIST dataset - dir_mnist = 'datasets/mnist/' - - # If there is no directory - if not path.exists(dir_mnist): - # Downloads the dataset - download_mnist(dir_mnist) - - # If there is a directory - else: - # Check if files have been downloaded - exists = [path.exists(os.path.join(dir_mnist, f)) for f in FILES_MNIST] - - # If they have not been downloaded - if not np.all(exists): - # Downloads the dataset - download_mnist(dir_mnist) - - # Loading training samples - X_train = load_single_mnist( - dir_mnist, 'train-images-idx3-ubyte.gz', 16, (60000, 28 * 28)).astype(float) - - # Loading training labels - Y_train = load_single_mnist( - dir_mnist, 'train-labels-idx1-ubyte.gz', 8, (60000)) - - # Loading validation samples - X_val = load_single_mnist( - dir_mnist, 't10k-images-idx3-ubyte.gz', 16, (10000, 28 * 28)).astype(float) - - # Loading validation labels - Y_val = load_single_mnist( - dir_mnist, 't10k-labels-idx1-ubyte.gz', 8, (10000)) - - # Normalizing samples - X_train /= 255. - X_val /= 255. - - return X_train, Y_train, X_val, Y_val diff --git a/examples/visualization/convergence_plotting.py b/examples/visualization/convergence_plotting.py index d7ac71e6..e5d459b8 100644 --- a/examples/visualization/convergence_plotting.py +++ b/examples/visualization/convergence_plotting.py @@ -1,20 +1,12 @@ import opytimizer.visualization.convergence as c -from opytimizer.utils.history import History -# Creating the history object -history = History() +# Defines agent's position and fitness +agent_pos = [[0.5, 0.4, 0.3], [0.5, 0.4, 0.3]] +agent_fit = [0.5, 0.32, 0.18] -# Loading saved optimization task -history.load('') - -# Gathering desired keys from the object -# In this case, we will the first agent's position and fitness -agent_pos = history.get(key='agents', index=(0, 0)) -agent_fit = history.get(key='agents', index=(0, 1)) - -# We will also gather the best agent's position and fitness -best_agent_pos = history.get(key='best_agent', index=(0,)) -best_agent_fit = history.get(key='best_agent', index=(1,)) +# Defines best agent's position and fitness +best_agent_pos = [[0.01, 0.005, 0.0001], [0.01, 0.005, 0.0001]] +best_agent_fit = [0.0002, 0.00005, 0.00002] # Plotting the convergence of agent's positions c.plot(agent_pos[0], agent_pos[1], labels=['$x_0$', '$x_1$'], diff --git a/examples/visualization/function_surface_plotting.py b/examples/visualization/function_surface_plotting.py index 407b70ff..3682b0d9 100644 --- a/examples/visualization/function_surface_plotting.py +++ b/examples/visualization/function_surface_plotting.py @@ -7,16 +7,16 @@ def f(x, y): return x**2 + y**2 -# Defining both `x` and `y` arrays +# Defines both `x` and `y` arrays x = y = np.linspace(-10, 10, 100) -# Creating a meshgrid from both `x` and `y` +# Creates a meshgrid from both `x` and `y` x, y = np.meshgrid(x, y) -# Calculating f(x, y) +# Calculates f(x, y) z = f(x, y) -# Creating final array for further plotting +# Creates final array for further plotting points = np.asarray([x, y, z]) # Plotting the surface diff --git a/opytimizer/__init__.py b/opytimizer/__init__.py index 9fcc6232..30348a43 100644 --- a/opytimizer/__init__.py +++ b/opytimizer/__init__.py @@ -1,7 +1,7 @@ -"""This is opytimizer main library. Note that this library consists +"""Opytimizer main library. Note that it consists of several modules and sub-modules. """ from opytimizer.opytimizer import Opytimizer -__version__ = '2.1.4' +__version__ = '3.0.0' diff --git a/opytimizer/core/__init__.py b/opytimizer/core/__init__.py index 296dab93..8574954e 100644 --- a/opytimizer/core/__init__.py +++ b/opytimizer/core/__init__.py @@ -1,2 +1,8 @@ -"""A core package for all common opytimizer modules. +"""Core package for all common opytimizer modules. """ + +from opytimizer.core.agent import Agent +from opytimizer.core.function import Function +from opytimizer.core.node import Node +from opytimizer.core.optimizer import Optimizer +from opytimizer.core.space import Space diff --git a/opytimizer/core/agent.py b/opytimizer/core/agent.py index e34d0555..dd731e31 100644 --- a/opytimizer/core/agent.py +++ b/opytimizer/core/agent.py @@ -1,9 +1,10 @@ -"""Agent structure. +"""Agent. """ import numpy as np -import opytimizer.utils.constants as c +import opytimizer.math.random as r +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.logging as l @@ -15,32 +16,34 @@ class Agent: """ - def __init__(self, n_variables=1, n_dimensions=1): + def __init__(self, n_variables, n_dimensions, lower_bound, upper_bound): """Initialization method. Args: n_variables (int): Number of decision variables. n_dimensions (int): Number of dimensions. + lower_bound (list, tuple, np.array): Minimum possible values. + upper_bound (list, tuple, np.array): Maximum possible values. """ - # Initially, an agent needs its number of variables + # Number of decision variables self.n_variables = n_variables - # And also, its number of dimensions + # Number of dimensions self.n_dimensions = n_dimensions - # Create the position vector based on the number of variables and dimensions + # N-dimensional array of positions self.position = np.zeros((n_variables, n_dimensions)) - # Fitness value is initialized with float's largest number + # Fitness value (largest float number) self.fit = c.FLOAT_MAX - # Lower bounds are initialized as zeros - self.lb = np.zeros(n_variables) + # Lower bounds + self.lb = np.asarray(lower_bound) - # Upper bounds are initialized as ones - self.ub = np.ones(n_variables) + # Upper bounds + self.ub = np.asarray(upper_bound) @property def n_variables(self): @@ -78,7 +81,7 @@ def n_dimensions(self, n_dimensions): @property def position(self): - """np.array: N-dimensional array of values. + """np.array: N-dimensional array of positions. """ @@ -118,6 +121,10 @@ def lb(self): def lb(self, lb): if not isinstance(lb, np.ndarray): raise e.TypeError('`lb` should be a numpy array') + if not lb.shape: + lb = np.expand_dims(lb, -1) + if lb.shape[0] != self.n_variables: + raise e.SizeError('`lb` should be the same size as `n_variables`') self._lb = lb @@ -133,15 +140,62 @@ def ub(self): def ub(self, ub): if not isinstance(ub, np.ndarray): raise e.TypeError('`ub` should be a numpy array') + if not ub.shape: + ub = np.expand_dims(ub, -1) + if ub.shape[0] != self.n_variables: + raise e.SizeError('`ub` should be the same size as `n_variables`') self._ub = ub - def clip_limits(self): + def clip_by_bound(self): """Clips the agent's decision variables to the bounds limits. """ # Iterates through all the decision variables for j, (lb, ub) in enumerate(zip(self.lb, self.ub)): - # Clips the array based on variables' lower and upper bounds + # Clips the array based on variable's lower and upper bounds self.position[j] = np.clip(self.position[j], lb, ub) + + def fill_with_binary(self): + """Fills the agent's decision variables with a binary distribution. + + """ + + # Iterates through all the decision variables + for j in range(self.n_variables): + # Fills the array based on a binary distribution + self.position[j] = r.generate_binary_random_number(self.n_dimensions) + + def fill_with_static(self, values): + """Fills the agent's decision variables with static values. Note that this + method ignore the agent's bounds, so use it carefully. + + Args: + values (list, tuple, np.array): Values to be filled. + + """ + + # Makes sure that `values` is a numpy array + # and has the same size of `n_variables` + values = np.asarray(values) + if not values.shape: + values = np.expand_dims(values, -1) + if values.shape[0] != self.n_variables: + raise e.SizeError('`values` should be the same size as `n_variables`') + + # Iterates through all the decision variables + for j, value in enumerate(values): + # Fills the array based on a static value + self.position[j] = value + + def fill_with_uniform(self): + """Fills the agent's decision variables with a uniform distribution + based on bounds limits. + + """ + + # Iterates through all the decision variables + for j, (lb, ub) in enumerate(zip(self.lb, self.ub)): + # Fills the array based on a uniform distribution + self.position[j] = r.generate_uniform_random_number(lb, ub, self.n_dimensions) diff --git a/opytimizer/core/function.py b/opytimizer/core/function.py index 0aba4dac..88c1af17 100644 --- a/opytimizer/core/function.py +++ b/opytimizer/core/function.py @@ -1,4 +1,4 @@ -"""Standard objective function. +"""Single-objective functions. """ from inspect import signature @@ -10,119 +10,50 @@ class Function: - """A Function class for using with objective functions - that will be further evaluated. - - It serves as the basis class for holding in-code related - objective functions. + """A Function class used to hold single-objective functions. """ - def __init__(self, pointer=callable, constraints=None, penalty=0.0): + def __init__(self, pointer): """Initialization method. Args: - pointer (callable): This should be a pointer to a function that will return the fitness value. - constraints (list): List of constraints to be applied to the fitness function. - penalty (float): Penalization factor when a constraint is not valid. + pointer (callable): Pointer to a function that will return the fitness value. """ logger.info('Creating class: Function.') - # Checking if pointer is actually a callable + # Pointer's callable + self.pointer = pointer + + # Name of the pointer (derived from its method or class) if hasattr(pointer, '__name__'): - # If yes, applies the callable name self.name = pointer.__name__ - - # If pointer comes from a class else: - # Applies its name as the class' name self.name = pointer.__class__.__name__ - # Checks if constraints do not exist - if constraints is None: - # Creates an empty list for compatibility - self.constraints = [] - - # If constraints exist - else: - # Save the constraints for further inspection - self.constraints = constraints - - # Creates a property for holding the penalization factor - self.penalty = penalty - - # Also, we need to create a callable to point to the actual function - self._create_pointer(pointer) - - # Indicates whether the function is built or not + # If no errors were shown, we can declare the function as `built` self.built = True + # Logs the attributes + logger.debug('Function: %s | Built: %s.', + self.name, self.built) logger.info('Class created.') - logger.debug('Function: %s | Constraints: %s | Penalty: %s | Built: %s', - self.name, self.constraints, self.penalty, self.built) def __call__(self, x): - """Defines a callable to this class in order to avoid using directly the property. + """Callable to avoid using the `pointer` property. Args: - x (np.array): Array of positions to be calculated. + x (np.array): Array of positions. Returns: - The output of the objective function. + Single-objective function fitness. """ return self.pointer(x) - @property - def name(self): - """str: Name of the function. - - """ - - return self._name - - @name.setter - def name(self, name): - if not isinstance(name, str): - raise e.TypeError('`name` should be a string') - - self._name = name - - @property - def constraints(self): - """list: List of constraints to be applied to the fitness function. - - """ - - return self._constraints - - @constraints.setter - def constraints(self, constraints): - if not isinstance(constraints, list): - raise e.TypeError('`constraints` should be a list') - - self._constraints = constraints - - @property - def penalty(self): - """float: Constraint penalization factor. - - """ - - return self._penalty - - @penalty.setter - def penalty(self, penalty): - if not isinstance(penalty, (float, int)): - raise e.TypeError('`penalty` should be a float or integer') - if penalty < 0: - raise e.ValueError('`penalty` should be >= 0') - - self._penalty = penalty - @property def pointer(self): """callable: Points to the actual function. @@ -135,62 +66,34 @@ def pointer(self): def pointer(self, pointer): if not callable(pointer): raise e.TypeError('`pointer` should be a callable') + if len(signature(pointer).parameters) > 1: + raise e.ArgumentError('`pointer` should only have 1 argument') self._pointer = pointer @property - def built(self): - """bool: Indicate whether the function is built. + def name(self): + """str: Name of the function. """ - return self._built + return self._name - @built.setter - def built(self, built): - self._built = built + @name.setter + def name(self, name): + if not isinstance(name, str): + raise e.TypeError('`name` should be a string') - def _create_pointer(self, pointer): - """Wraps the fitness function if there are any constraints to be evaluated. + self._name = name - Args: - pointer (callable): Pointer to the actual function. + @property + def built(self): + """bool: Indicates whether the function is built. """ - # Checks if provided function has only one parameter - if len(signature(pointer).parameters) > 1: - # If not, raises an ArgumentError - raise e.ArgumentError('`pointer` should only have 1 argument') - - def _constrain_pointer(x): - """Applies the constraints and penalizes the fitness function if one of them are not valid. - - Args: - x (np.array): Array to be evaluated. - - Returns: - The value of the fitness function. - - """ - - # Calculates the fitness function - fitness = pointer(x) - - # For every possible constraint - for constraint in self.constraints: - # Check if constraint is valid - if constraint(x): - # If yes, just keep going - pass - - # If a constraint is not valid - else: - # Penalizes the objective function - fitness += self.penalty * fitness - - # If all constraints are satisfied, return the fitness function - return fitness + return self._built - # Applying to the pointer property the return of constrained function - self.pointer = _constrain_pointer + @built.setter + def built(self, built): + self._built = built diff --git a/opytimizer/core/node.py b/opytimizer/core/node.py index 11e5537c..b0f90089 100644 --- a/opytimizer/core/node.py +++ b/opytimizer/core/node.py @@ -1,9 +1,9 @@ -"""Node structure. +"""Node. """ import numpy as np -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e @@ -12,12 +12,12 @@ class Node: """ - def __init__(self, name, node_type, value=None, left=None, right=None, parent=None): + def __init__(self, name, category, value=None, left=None, right=None, parent=None): """Initialization method. Args: name (str, int): Name of the node (e.g., it should be the terminal identifier or function name). - node_type (str): Type of the node (e.g., TERMINAL or FUNCTION). + category (str): Category of the node (e.g., TERMINAL or FUNCTION). value (np.array): Value of the node (only used if it is a terminal). left (Node): Pointer to node's left child. right (Node): Pointer to node's right child. @@ -25,36 +25,32 @@ def __init__(self, name, node_type, value=None, left=None, right=None, parent=No """ - # Name of the node (e.g., it should be the terminal identifier or function name) + # Name of the node (terminal identifier or function name) self.name = name - # Type of the node (e.g., TERMINAL or FUNCTION) - self.node_type = node_type + # Category of the node (`TERMINAL` or `FUNCTION`) + self.category = category - # Value of the node (only if it is a terminal node) + # Value of the node (only for terminal nodes) self.value = value - # Pointer to node's left child + # Pointers to the node's children and parent self.left = left - - # Pointer to node's right child self.right = right - - # Pointer to node's parent self.parent = parent # Flag to identify whether the node is a left child self.flag = True def __repr__(self): - """Object representation as a formal string. + """Representation of a formal string. """ - return f'{self.node_type}:{self.name}:{self.flag}' + return f'{self.category}:{self.name}:{self.flag}' def __str__(self): - """Object representation as an informal string. + """Representation of an informal string. """ @@ -65,7 +61,7 @@ def __str__(self): @property def name(self): - """str: Node's identifier. + """str: Name of the node. """ @@ -79,23 +75,23 @@ def name(self, name): self._name = name @property - def node_type(self): - """str: Type of the node (e.g., TERMINAL or FUNCTION). + def category(self): + """str: Category of the node. """ - return self._node_type + return self._category - @node_type.setter - def node_type(self, node_type): - if node_type not in ['TERMINAL', 'FUNCTION']: - raise e.ValueError('`node_type` should be `TERMINAL` or `FUNCTION`') + @category.setter + def category(self, category): + if category not in ['TERMINAL', 'FUNCTION']: + raise e.ValueError('`category` should be `TERMINAL` or `FUNCTION`') - self._node_type = node_type + self._category = category @property def value(self): - """np.array: Value of the node (only if it is a terminal node). + """np.array: Value of the node. """ @@ -103,7 +99,7 @@ def value(self): @value.setter def value(self, value): - if self.node_type != 'TERMINAL': + if self.category != 'TERMINAL': self._value = None else: if not isinstance(value, np.ndarray): @@ -113,7 +109,7 @@ def value(self, value): @property def left(self): - """Node: Pointer to node's left child. + """Node: Pointer to the node's left child. """ @@ -121,15 +117,14 @@ def left(self): @left.setter def left(self, left): - if left: - if not isinstance(left, Node): - raise e.TypeError('`left` should be a Node') + if left and not isinstance(left, Node): + raise e.TypeError('`left` should be a Node') self._left = left @property def right(self): - """Node: Pointer to node's right child. + """Node: Pointer to the node's right child. """ @@ -137,15 +132,14 @@ def right(self): @right.setter def right(self, right): - if right: - if not isinstance(right, Node): - raise e.TypeError('`right` should be a Node') + if right and not isinstance(right, Node): + raise e.TypeError('`right` should be a Node') self._right = right @property def parent(self): - """Node: Pointer to node's parent. + """Node: Pointer to the node's parent. """ @@ -153,9 +147,8 @@ def parent(self): @parent.setter def parent(self, parent): - if parent: - if not isinstance(parent, Node): - raise e.TypeError('`parent` should be a Node') + if parent and not isinstance(parent, Node): + raise e.TypeError('`parent` should be a Node') self._parent = parent @@ -208,7 +201,7 @@ def n_nodes(self): @property def position(self): - """np.array: Position after traversing the nodes. + """np.array: Position after traversing the node. """ @@ -216,7 +209,7 @@ def position(self): @property def post_order(self): - """list: Traverses the nodes in post-order. + """list: Traverses the node in post-order. """ @@ -269,7 +262,7 @@ def post_order(self): @property def pre_order(self): - """list: Traverses the nodes in pre-order. + """list: Traverses the node in pre-order. """ @@ -303,7 +296,7 @@ def find_node(self, position): position (int): Position of the node. Returns: - The node at desired position. + Node at desired position. """ @@ -316,13 +309,13 @@ def find_node(self, position): node = pre_order[position] # If the node is a terminal - if node.node_type == 'TERMINAL': + if node.category == 'TERMINAL': return node.parent, node.flag # If the node is a function - if node.node_type == 'FUNCTION': + if node.category == 'FUNCTION': # If it is a function node, we need to return the parent of its parent - if node.parent.parent: + if node.parent and node.parent.parent: return node.parent.parent, node.parent.flag return None, False @@ -340,7 +333,7 @@ def _build_string(node): node (Node): An instance of the Node class (can be a tree of Nodes). Returns: - A formatted string ready to be printed. + Formatted string ready to be printed. """ @@ -453,7 +446,7 @@ def _evaluate(node): node (Node): An instance of the Node class (can be a tree of Nodes). Returns: - An output solution of size (n_variables x n_dimensions). + Output solution of size (n_variables x n_dimensions). """ @@ -464,7 +457,7 @@ def _evaluate(node): y = _evaluate(node.right) # If the node is an agent or constant - if node.node_type == 'TERMINAL': + if node.category == 'TERMINAL': return node.value # Checks if its a summation @@ -511,13 +504,13 @@ def _evaluate(node): def _properties(node): - """Traverses the nodes and returns some useful properties. + """Traverses the node and returns some useful properties. Args: node (Node): An instance of the Node class (can be a tree of Nodes). Returns: - A dictionary containing some useful properties: `min_depth`, `max_depth`, + Dictionary containing some useful properties: `min_depth`, `max_depth`, `n_leaves` and `n_nodes`. """ @@ -528,7 +521,7 @@ def _properties(node): # Initializing number of leaves and nodes as 0 n_leaves = n_nodes = 0 - # Gathering a list of possible nodes + # Gathers a list of possible nodes nodes = [node] # While there is a nonde diff --git a/opytimizer/core/optimizer.py b/opytimizer/core/optimizer.py index b7dc2051..caed7547 100644 --- a/opytimizer/core/optimizer.py +++ b/opytimizer/core/optimizer.py @@ -1,9 +1,8 @@ -"""Optimizer structure. +"""Optimizer. """ import copy -import opytimizer.utils.decorator as d import opytimizer.utils.exception as e import opytimizer.utils.logging as l @@ -11,30 +10,28 @@ class Optimizer: - """An Optimizer class that serves as meta-heuristics' parent. + """An Optimizer class that holds meta-heuristics-related properties + and methods. """ - def __init__(self, algorithm=''): + def __init__(self): """Initialization method. - Args: - algorithm (str): Indicates the algorithm name. - """ - # We define the algorithm's name - self.algorithm = algorithm + # Algorithm's name + self.algorithm = self.__class__.__name__ - # Also, we initialize hyperparameters as None - self.hyperparams = None + # Key-value parameters + self.params = {} # Indicates whether the optimizer is built or not self.built = False @property def algorithm(self): - """str: Indicates the algorithm name. + """str: Algorithm's name. """ @@ -42,82 +39,84 @@ def algorithm(self): @algorithm.setter def algorithm(self, algorithm): + if not isinstance(algorithm, str): + raise e.TypeError('`algorithm` should be a string') + self._algorithm = algorithm @property - def hyperparams(self): - """dict: Contains the key-value parameters to meta-heuristics. + def built(self): + """bool: Indicates whether the optimizer is built. """ - return self._hyperparams + return self._built - @hyperparams.setter - def hyperparams(self, hyperparams): - if not (isinstance(hyperparams, dict) or hyperparams is None): - raise e.TypeError('`hyperparams` should be a dictionary') + @built.setter + def built(self, built): + if not isinstance(built, bool): + raise e.TypeError('`built` should be a boolean') - self._hyperparams = hyperparams + self._built = built @property - def built(self): - """bool: Indicates whether the optimizer is built. + def params(self): + """dict: Key-value parameters. """ - return self._built + return self._params - @built.setter - def built(self, built): - self._built = built + @params.setter + def params(self, params): + if not isinstance(params, dict): + raise e.TypeError('`params` should be a dictionary') - def _build(self, hyperparams): - """This method serves as the object building process. + self._params = params + + def build(self, params): + """Builds the object by creating its parameters. Args: - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Key-value parameters to the meta-heuristic. """ - logger.debug('Running private method: build().') - - # We need to save the hyperparams object for faster looking up - self.hyperparams = hyperparams + # Checks if `params` are really provided + if params: + # Saves the `params` for faster looking up + self.params = params - # Checks if hyperparams are really provided - if hyperparams: - # If one can find any hyperparam inside its object - for k, v in hyperparams.items(): - # Set it as the one that will be used + # Iterates through all parameters + for k, v in params.items(): + # Sets its key-value pair setattr(self, k, v) - # Set built variable to 'True' + # Sets the `built` variable to true self.built = True - # Logging attributes - logger.debug('Algorithm: %s | Hyperparameters: %s | ' - 'Built: %s.', - self.algorithm, str(hyperparams), - self.built) - - def _update(self): - """Updates the agents' position array. + # Logs the properties + logger.debug('Algorithm: %s | Custom Parameters: %s | Built: %s.', + self.algorithm, str(params), self.built) - As each optimizer child can have a different procedure of update, - you will need to implement it directly on child's class. + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. - Raises: - NotImplementedError. + This method is called before the optimization procedure and makes sure + that the additional variable is available as a property. """ - raise NotImplementedError + pass - @d.pre_evaluate - def _evaluate(self, space, function): + def evaluate(self, space, function): """Evaluates the search space according to the objective function. - If you need a specific evaluate method, please re-implement it on child's class. + If you need a specific evaluate method, please re-implement + it on child's class. + + Also, note that function only accept arguments that are + found on Opytimizer class. Args: space (Space): A Space object that will be evaluated. @@ -136,21 +135,15 @@ def _evaluate(self, space, function): space.best_agent.position = copy.deepcopy(agent.position) space.best_agent.fit = copy.deepcopy(agent.fit) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - As each optimizer child can have a different optimization pipeline, - you will need to implement it directly on child's class. + def update(self): + """Updates the agents' position array. - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): Method to be executed before evaluating the `function` being optimized. + As each child has a different procedure of update, you will need + to implement it directly on its class. - Raises: - NotImplementedError. + Also, note that function only accept arguments that are + found on Opytimizer class. """ - raise NotImplementedError + pass diff --git a/opytimizer/core/space.py b/opytimizer/core/space.py index 321182fc..84317b61 100644 --- a/opytimizer/core/space.py +++ b/opytimizer/core/space.py @@ -1,13 +1,11 @@ -"""Standard search space. +"""Search space. """ -import copy - import numpy as np import opytimizer.utils.exception as e import opytimizer.utils.logging as l -from opytimizer.core.agent import Agent +from opytimizer.core import Agent logger = l.get_logger(__name__) @@ -18,14 +16,15 @@ class Space: """ - def __init__(self, n_agents=1, n_variables=1, n_dimensions=1, n_iterations=10): + def __init__(self, n_agents=1, n_variables=1, n_dimensions=1, lower_bound=0.0, upper_bound=1.0): """Initialization method. Args: n_agents (int): Number of agents. n_variables (int): Number of decision variables. n_dimensions (int): Dimension of search space. - n_iterations (int): Number of iterations. + lower_bound (float, list, tuple, np.array): Minimum possible values. + upper_bound (float, list, tuple, np.array): Maximum possible values. """ @@ -38,20 +37,17 @@ def __init__(self, n_agents=1, n_variables=1, n_dimensions=1, n_iterations=10): # Number of dimensions self.n_dimensions = n_dimensions - # Number of iterations - self.n_iterations = n_iterations - - # List of agents - self.agents = [] - - # Best agent object - self.best_agent = Agent() - # Lower bounds - self.lb = np.zeros(n_variables) + self.lb = np.asarray(lower_bound) # Upper bounds - self.ub = np.ones(n_variables) + self.ub = np.asarray(upper_bound) + + # Agents + self.agents = [] + + # Best agent + self.best_agent = Agent(n_variables, n_dimensions, lower_bound, upper_bound) # Indicates whether the space is built or not self.built = False @@ -92,7 +88,7 @@ def n_variables(self, n_variables): @property def n_dimensions(self): - """int: Dimension of search space. + """int: Number of search space dimensions. """ @@ -108,25 +104,46 @@ def n_dimensions(self, n_dimensions): self._n_dimensions = n_dimensions @property - def n_iterations(self): - """int: Number of iterations. + def lb(self): + """np.array: Minimum possible values. + + """ + + return self._lb + + @lb.setter + def lb(self, lb): + if not isinstance(lb, np.ndarray): + raise e.TypeError('`lb` should be a numpy array') + if not lb.shape: + lb = np.expand_dims(lb, -1) + if lb.shape[0] != self.n_variables: + raise e.SizeError('`lb` should be the same size as `n_variables`') + + self._lb = lb + + @property + def ub(self): + """np.array: Maximum possible values. """ - return self._n_iterations + return self._ub - @n_iterations.setter - def n_iterations(self, n_iterations): - if not isinstance(n_iterations, int): - raise e.TypeError('`n_iterations` should be an integer') - if n_iterations <= 0: - raise e.ValueError('`n_iterations` should be > 0') + @ub.setter + def ub(self, ub): + if not isinstance(ub, np.ndarray): + raise e.TypeError('`ub` should be a numpy array') + if not ub.shape: + ub = np.expand_dims(ub, -1) + if not ub.shape or ub.shape[0] != self.n_variables: + raise e.SizeError('`ub` should be the same size as `n_variables`') - self._n_iterations = n_iterations + self._ub = ub @property def agents(self): - """list: List of agents that belongs to Space. + """list: Agents that belongs to the space. """ @@ -141,7 +158,7 @@ def agents(self, agents): @property def best_agent(self): - """Agent: A best agent object from Agent class. + """Agent: Best agent. """ @@ -154,43 +171,9 @@ def best_agent(self, best_agent): self._best_agent = best_agent - @property - def lb(self): - """np.array: Lower bound array with the minimum possible values. - - """ - - return self._lb - - @lb.setter - def lb(self, lb): - if not isinstance(lb, np.ndarray): - raise e.TypeError('`lb` should be a numpy array') - if lb.shape[0] != self.n_variables: - raise e.SizeError('`lb` should be the same size as `n_variables`') - - self._lb = lb - - @property - def ub(self): - """np.array: Upper bound array with the maximum possible values. - - """ - - return self._ub - - @ub.setter - def ub(self, ub): - if not isinstance(ub, np.ndarray): - raise e.TypeError('`ub` should be a numpy array') - if ub.shape[0] != self.n_variables: - raise e.SizeError('`ub` should be the same size as `n_variables`') - - self._ub = ub - @property def built(self): - """bool: A boolean to indicate whether the space is built. + """bool: Indicates whether the space is built. """ @@ -198,72 +181,56 @@ def built(self): @built.setter def built(self, built): - self._built = built + if not isinstance(built, bool): + raise e.TypeError('`built` should be a boolean') - def _build(self, lower_bound, upper_bound): - """This method serves as the object building process. - - One can define several commands here that does not necessarily - needs to be on its initialization. + self._built = built - Args: - lower_bound (tuple): Lower bound array with the minimum possible values. - upper_bound (tuple): Upper bound array with the maximum possible values. + def _create_agents(self): + """Creates a list of agents. """ - logger.debug('Running private method: build().') - - # Creating lower and upper bound arrays - self.lb = np.asarray(lower_bound) - self.ub = np.asarray(upper_bound) - - # Creating agents - self._create_agents() - - # If no errors were shown, we can declare the Space as built - self.built = True - - # Logging attributes - logger.debug('Agents: %d | Size: (%d, %d) | Iterations: %d | ' - 'Lower Bound: %s | Upper Bound: %s | Built: %s.', - self.n_agents, self.n_variables, self.n_dimensions, self.n_iterations, - self.lb, self.ub, self.built) + # List of agents + self.agents = [Agent(self.n_variables, self.n_dimensions, + self.lb, self.ub) for _ in range(self.n_agents)] - def _create_agents(self): - """Creates a list of agents and the best agent. + def _initialize_agents(self): + """Initializes agents with their positions and defines a best agent. - Also defines a random best agent, only for initialization purposes. + As each child has a different procedure of initialization, + you will need to implement it directly on its class. """ - logger.debug('Running private method: create_agents().') + pass - # Creating a list of agents - self.agents = [Agent(self.n_variables, self.n_dimensions) for _ in range(self.n_agents)] + def build(self): + """Builds the object by creating and initializing the agents. - # Apply the first agent as the best one - self.best_agent = copy.deepcopy(self.agents[0]) - - def _initialize_agents(self): - """Initialize agents' position array. + """ - As each space child can have a different procedure of initializing agents, - you will need to implement it directly on the child's class. + # Creates the agents + self._create_agents() - Raises: - NotImplementedError. + # Initializes the agents + self._initialize_agents() - """ + # If no errors were shown, we can declare the space as `built` + self.built = True - raise NotImplementedError + # Logs the properties + logger.debug('Agents: %d | Size: (%d, %d) | ' + 'Lower Bound: %s | Upper Bound: %s | Built: %s.', + self.n_agents, self.n_variables, self.n_dimensions, + self.lb, self.ub, self.built) - def clip_limits(self): - """Clips the space agents' position to the bounds limits. + def clip_by_bound(self): + """Clips the agents' decision variables to the bounds limits. """ # Iterates through all agents for agent in self.agents: - # Clips the agent's limits - agent.clip_limits() + # Clips its limits + agent.clip_by_bound() diff --git a/opytimizer/functions/__init__.py b/opytimizer/functions/__init__.py index 82aeb5ec..26d46f5f 100644 --- a/opytimizer/functions/__init__.py +++ b/opytimizer/functions/__init__.py @@ -1,2 +1,5 @@ -"""An own-idealized functions package for all common opytimizer modules. +"""Functions package for all common opytimizer modules. """ + +from opytimizer.functions.constrained import ConstrainedFunction +from opytimizer.functions.weighted import WeightedFunction diff --git a/opytimizer/functions/constrained.py b/opytimizer/functions/constrained.py new file mode 100644 index 00000000..659fee7e --- /dev/null +++ b/opytimizer/functions/constrained.py @@ -0,0 +1,99 @@ +"""Constrained single-objective functions. +""" + +import opytimizer.utils.exception as e +import opytimizer.utils.logging as l +from opytimizer.core import Function + +logger = l.get_logger(__name__) + + +class ConstrainedFunction(Function): + """A ConstrainedFunction class used to hold constrained single-objective functions. + + """ + + def __init__(self, pointer, constraints, penalty=0.0): + """Initialization method. + + Args: + pointer (callable): Pointer to a function that will return the fitness value. + constraints (list): Constraints to be applied to the fitness function. + penalty (float): Penalization factor when a constraint is not valid. + + """ + + logger.info('Overriding class: Function -> ConstrainedFunction.') + + # Overrides its parent class with the receiving arguments + super(ConstrainedFunction, self).__init__(pointer) + + # List of constraints + self.constraints = constraints or [] + + # Penalization factor + self.penalty = penalty + + # Logs the attributes + logger.debug('Constraints: %s | Penalty: %s.', + self.constraints, self.penalty) + logger.info('Class overrided.') + + @property + def constraints(self): + """list: Constraints to be applied to the fitness function. + + """ + + return self._constraints + + @constraints.setter + def constraints(self, constraints): + if not isinstance(constraints, list): + raise e.TypeError('`constraints` should be a list') + + self._constraints = constraints + + @property + def penalty(self): + """float: Penalization factor. + + """ + + return self._penalty + + @penalty.setter + def penalty(self, penalty): + if not isinstance(penalty, (float, int)): + raise e.TypeError('`penalty` should be a float or integer') + if penalty < 0: + raise e.ValueError('`penalty` should be >= 0') + + self._penalty = penalty + + def __call__(self, x): + """Callable to avoid using the `pointer` property. + + Args: + x (np.array): Array of positions. + + Returns: + Constrained single-objective function fitness. + + """ + + # Calculates the fitness function + fitness = self.pointer(x) + + # For every possible constraint + for constraint in self.constraints: + # Checks if constraint is valid + if constraint(x): + pass + + # If the constraint is not valid + else: + # Penalizes the objective function + fitness += self.penalty * fitness + + return fitness diff --git a/opytimizer/functions/weighted.py b/opytimizer/functions/weighted.py index 2a3d2726..c4432a8a 100644 --- a/opytimizer/functions/weighted.py +++ b/opytimizer/functions/weighted.py @@ -1,69 +1,63 @@ -"""Weighted-based multi-objective functions. +"""Weighted multi-objective functions. """ import opytimizer.utils.exception as e import opytimizer.utils.logging as l -from opytimizer.core.function import Function +from opytimizer.core import Function logger = l.get_logger(__name__) class WeightedFunction: - """A WeightedFunction class for using with multi objective functions - based on the weight sum strategy. + """A WeightedFunction class used to hold weighted multi-objective functions. """ - def __init__(self, functions=None, weights=None, constraints=None, penalty=0.0): + def __init__(self, functions, weights): """Initialization method. Args: functions (list): Pointers to functions that will return the fitness value. - weights (list): Weights for weighted sum strategy. - constraints (list): List of constraints to be applied to the fitness functions. - penalty (float): Penalization factor when a constraint is not valid. + weights (list): Weights for weighted-sum strategy. """ logger.info('Creating class: WeightedFunction.') - # Checks if functions do not exist - if functions is None: - # Creates a list for compatibility - self.functions = [] + # List of functions + self.functions = [Function(f) for f in functions] or [] - # If functions really exist - else: - # Creating the functions property - self.functions = functions + # List of weights + self.weights = weights or [] - # Checks if weights do not exist - if weights is None: - # Creates a list for compatibility - self.weights = [] - - # If weights really exist - else: - # Creating the weights property - self.weights = weights - - # Now, we need to build this class up - self._build(constraints, penalty) + # Set built variable to 'True' + self.built = True + # Logging attributes + logger.debug('Functions: %s | Weights: %s | Built: %s', + [f.name for f in self.functions], self.weights, self.built) logger.info('Class created.') def __call__(self, x): - """Defines a callable to this class in order to avoid using directly the property. + """Callable to avoid using the `pointer` property. Args: - x (np.array): Array of positions to be calculated. + x (np.array): Array of positions. Returns: - The output of the objective function. + Weighted multi-objective function fitness. """ - return self.pointer(x) + # Defines a variable to hold the total fitness + z = 0 + + # Iterates through every function + for (f, w) in zip(self.functions, self.weights): + # Applies w * f(x) + z += w * f.pointer(x) + + return z @property def functions(self): @@ -92,61 +86,7 @@ def weights(self): def weights(self, weights): if not isinstance(weights, list): raise e.TypeError('`weights` should be a list') + if len(weights) != len(self.functions): + raise e.SizeError('`weights` should have the same size of `functions`') self._weights = weights - - def _create_multi_objective(self): - """Creates a multi-objective strategy as the real pointer. - - """ - - def _weighted_pointer(x): - """Weights and sums the functions according to their weights. - - Args: - x (np.array): Array to be evaluated. - - Returns: - The value of the weighted function. - - """ - # Defining value to hold strategy - z = 0 - - # Iterates through every function - for (f, w) in zip(self.functions, self.weights): - # Applies w * f(x) - z += w * f.pointer(x) - - return z - - # Applying to the pointer property the return of weighted method - self.pointer = _weighted_pointer - - def _build(self, constraints, penalty): - """This method serves as the object building process. - - One can define several commands here that does not necessarily - needs to be on its initialization. - - Args: - constraints (list): List of constraints to be applied to the fitness function. - penalty (float): Penalization factor when a constraint is not valid. - - """ - - logger.debug('Running private method: build().') - - # Populating pointers with real functions - self.functions = [Function(f, constraints, penalty) - for f in self.functions] - - # Creating a multi-objective method strategy as the real pointer - self._create_multi_objective() - - # Set built variable to 'True' - self.built = True - - # Logging attributes - logger.debug('Functions: %s | Weights: %s | Built: %s', - [f.name for f in self.functions], self.weights, self.built) diff --git a/opytimizer/math/__init__.py b/opytimizer/math/__init__.py index a7ecb15e..ddeba317 100644 --- a/opytimizer/math/__init__.py +++ b/opytimizer/math/__init__.py @@ -1,2 +1,2 @@ -"""A mathematical package for all common opytimizer modules. +"""Mathematical package for all common opytimizer modules. """ diff --git a/opytimizer/math/distribution.py b/opytimizer/math/distribution.py index 04b5f9b3..d0ec2fb0 100644 --- a/opytimizer/math/distribution.py +++ b/opytimizer/math/distribution.py @@ -16,17 +16,17 @@ def generate_bernoulli_distribution(prob=0.0, size=1): size (int): Size of array. Returns: - A Bernoulli distribution n-dimensional array. + Bernoulli distribution n-dimensional array. """ - # Creating bernoulli array + # Creates the bernoulli array bernoulli_array = np.zeros(size) - # Generating random number + # Generates a random number r1 = r.generate_uniform_random_number(0, 1, size) - # Masking the array + # Masks the array based on input probability bernoulli_array[r1 < prob] = 1 return bernoulli_array @@ -41,7 +41,7 @@ def generate_choice_distribution(n=1, probs=None, size=1): size (int): Size of array. Returns: - A choice distribution array. + Choice distribution array. """ @@ -63,7 +63,7 @@ def generate_levy_distribution(beta=0.1, size=1): size (int): Size of array. Returns: - A Lévy distribution n-dimensional array. + Lévy distribution n-dimensional array. """ @@ -71,14 +71,14 @@ def generate_levy_distribution(beta=0.1, size=1): num = gamma(1 + beta) * sin(pi * beta / 2) den = gamma((1 + beta) / 2) * beta * (2 ** ((beta - 1) / 2)) - # Calculates the sigma for further distribution generation + # Calculates `sigma` sigma = (num / den) ** (1 / beta) - # Calculates the 'u' and `v` distributions + # Calculates 'u' and `v` distributions u = r.generate_gaussian_random_number(size=size) * sigma v = r.generate_gaussian_random_number(size=size) - # Finally, we can calculate the Lévy distribution + # Calculates the Lévy distribution levy_array = u / np.fabs(v) ** (1 / beta) return levy_array diff --git a/opytimizer/math/general.py b/opytimizer/math/general.py index 3cfecb1e..7cf9f229 100644 --- a/opytimizer/math/general.py +++ b/opytimizer/math/general.py @@ -6,22 +6,22 @@ import numpy as np import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c def euclidean_distance(x, y): - """Calculates the euclidean distance between two n-dimensional points. + """Calculates the Euclidean distance between two n-dimensional points. Args: - x (np.array): First n-dimensional point. - y (np.array): Second n-dimensional point. + x (np.array): N-dimensional point. + y (np.array): N-dimensional point. Returns: - The euclidean distance between x and y. + Euclidean distance between `x` and `y`. """ - # Calculates the euclidean distance + # Calculates the Euclidean distance distance = np.linalg.norm(x - y) ** 2 return distance @@ -39,29 +39,28 @@ def n_wise(x, size=2): """ - # Creats an iterator from `x` + # Creates an iterator from `x` iterator = iter(x) - # Splits into pairs and returns a new iterator return iter(lambda: tuple(islice(iterator, size)), ()) def tournament_selection(fitness, n): - """Selects `n` individuals based on a tournament selection algorithm. + """Selects n-individuals based on a tournament selection. Args: fitness (list): List of individuals fitness. n (int): Number of individuals to be selected. Returns: - A list with the indexes of the selected individuals. + Indexes of selected individuals. """ - # Creating a list to append selected individuals + # Creates a list to append selected individuals selected = [] - # For every `n` individual to be selected + # For every n-individual to be selected for _ in range(n): # For every tournament round, we select `TOURNAMENT_SIZE` individuals step = [np.random.choice(fitness) for _ in range(c.TOURNAMENT_SIZE)] @@ -79,7 +78,7 @@ def weighted_wheel_selection(weights): weights (list): List of individuals weights. Returns: - A roulette selected individual. + Weight-based roulette individual. """ @@ -93,7 +92,6 @@ def weighted_wheel_selection(weights): for i, c_sum in enumerate(cumulative_sum): # If individual's cumulative sum is bigger than selection probability if c_sum > prob: - # Returns the individual return i return None diff --git a/opytimizer/math/hyper.py b/opytimizer/math/hyper.py index 3bc60b89..d538c031 100644 --- a/opytimizer/math/hyper.py +++ b/opytimizer/math/hyper.py @@ -1,6 +1,8 @@ """Hypercomplex-based mathematical helpers. """ +from functools import wraps + import numpy as np @@ -12,36 +14,80 @@ def norm(array): array (np.array): A 2-dimensional input array. Returns: - The norm calculated over the second axis, such as (2, 4) array shape + Norm calculated over the second axis, such as (2, 4) array shape will result in a norm (2, ) shape. """ - # Calculating the norm over the hypercomplex numbers + # Calculates the norm over a hypercomplex number array_norm = np.linalg.norm(array, axis=1) return array_norm -def span(array, lb, ub): +def span(array, lower_bound, upper_bound): """Spans a hypercomplex number between lower and upper bounds. Args: array (np.array): A 2-dimensional input array. - lb (tuple, np.array): Lower bounds to be spanned. - ub (tuple, np.array): Upper bounds to be spanned. + lb (list, tuple, np.array): Lower bounds to be spanned. + ub (list, tuple, np.array): Upper bounds to be spanned. Returns: - A spanned value that can be used as decision variable in order to - feed a fitness function. + Spanned values that can be used as decision variables. """ - # We need to force lower and upper bounds to be arrays - lb = np.array(lb) - ub = np.array(ub) + # Forces lower and upper bounds to be arrays + lb = np.asarray(lower_bound) + ub = np.asarray(upper_bound) - # Calculating span function + # Calculates the spanning function array_span = (ub - lb) * (norm(array) / np.sqrt(array.shape[1])) + lb return array_span + + +def span_to_hyper_value(lb, ub): + """Spans a hyper-value between lower and upper bounds. + + Args: + lb (list, tuple, np.array): Lower bounds. + ub (list, tuple, np.array): Upper bounds. + + Returns: + The output of the incoming objective function with a spanned input. + + """ + + def _span_to_hyper_value(f): + """Actually decorates the incoming objective function. + + Args: + f (callable): Incoming objective function. + + Returns: + The wrapped objective function. + + """ + + @wraps(f) + def __span_to_hyper_value(x): + """Wraps the objective function for calculating its output. + + Args: + x (np.array): Array of hyper-values. + + Returns: + The objective function itself. + + """ + + # Spans `x` between lower and upper bounds + x = span(x, lb, ub) + + return f(x) + + return __span_to_hyper_value + + return _span_to_hyper_value diff --git a/opytimizer/math/random.py b/opytimizer/math/random.py index 2b81ca71..f04d9a9f 100644 --- a/opytimizer/math/random.py +++ b/opytimizer/math/random.py @@ -11,7 +11,7 @@ def generate_binary_random_number(size=1): size (int): Size of array. Returns: - An binary random number or array. + A binary random number or array. """ @@ -71,7 +71,7 @@ def generate_integer_random_number(low=0, high=1, exclude_value=None, size=None) def generate_uniform_random_number(low=0.0, high=1.0, size=1): - """Generates a random number or array based on an uniform distribution. + """Generates a random number or array based on a uniform distribution. Args: low (float): Lower interval. @@ -79,7 +79,7 @@ def generate_uniform_random_number(low=0.0, high=1.0, size=1): size (int): Size of array. Returns: - An uniform random number or array. + A uniform random number or array. """ diff --git a/opytimizer/optimizers/boolean/__init__.py b/opytimizer/optimizers/boolean/__init__.py index a6a293ab..8c3fc601 100644 --- a/opytimizer/optimizers/boolean/__init__.py +++ b/opytimizer/optimizers/boolean/__init__.py @@ -1,3 +1,7 @@ """A boolean package for all common opytimizer modules. It contains implementations of boolean-based optimizers. """ + +from opytimizer.optimizers.boolean.bmrfo import BMRFO +from opytimizer.optimizers.boolean.bpso import BPSO +from opytimizer.optimizers.boolean.umda import UMDA diff --git a/opytimizer/optimizers/boolean/bmrfo.py b/opytimizer/optimizers/boolean/bmrfo.py index 160d6c7a..8300ffb1 100644 --- a/opytimizer/optimizers/boolean/bmrfo.py +++ b/opytimizer/optimizers/boolean/bmrfo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -26,25 +24,24 @@ class BMRFO(Optimizer): """ - def __init__(self, algorithm='BMRFO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BMRFO.') - # Override its parent class with the receiving hyperparams - super(BMRFO, self).__init__(algorithm=algorithm) + # Overrides its parent class with the receiving params + super(BMRFO, self).__init__() # Somersault foraging self.S = np.array([1]) - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -88,7 +85,8 @@ def _cyclone_foraging(self, agents, best_position, i, iteration, n_iterations): # Checks if current iteration proportion is smaller than random generated number if iteration / n_iterations < u: # Generates binary random positions - r_position = r.generate_binary_random_number(size=(agents[i].n_variables, agents[i].n_dimensions)) + r_position = r.generate_binary_random_number( + size=(agents[i].n_variables, agents[i].n_dimensions)) # Checks if the index is equal to zero if i == 0: @@ -177,93 +175,46 @@ def _somersault_foraging(self, position, best_position): return somersault_foraging - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps chain, cyclone and somersault foraging updates over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Boolean Manta Ray Foraging Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # Generates an uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than 1/2 if r1 < 0.5: # Performs the cyclone foraging - agent.position = self._cyclone_foraging(agents, best_agent.position, i, iteration, n_iterations) + agent.position = self._cyclone_foraging( + space.agents, space.best_agent.position, i, iteration, n_iterations) # If random number is bigger than 1/2 else: # Performs the chain foraging - agent.position = self._chain_foraging(agents, best_agent.position, i) + agent.position = self._chain_foraging(space.agents, space.best_agent.position, i) # Clips the agent's limits - agent.clip_limits() + agent.clip_by_bound() # Evaluates the agent agent.fit = function(agent.position) # If new agent's fitness is better than best - if agent.fit < best_agent.fit: + if agent.fit < space.best_agent.fit: # Replace the best agent's position and fitness with its copy - best_agent.position = copy.deepcopy(agent.position) - best_agent.fit = copy.deepcopy(agent.fit) + space.best_agent.position = copy.deepcopy(agent.position) + space.best_agent.fit = copy.deepcopy(agent.fit) - # Iterate through all agents - for agent in agents: + # Iterates through all agents + for agent in space.agents: # Performs the somersault foraging - agent.position = self._somersault_foraging(agent.position, best_agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = self._somersault_foraging(agent.position, space.best_agent.position) diff --git a/opytimizer/optimizers/boolean/bpso.py b/opytimizer/optimizers/boolean/bpso.py index 1fabe221..dff4af51 100644 --- a/opytimizer/optimizers/boolean/bpso.py +++ b/opytimizer/optimizers/boolean/bpso.py @@ -4,12 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.decorator as d import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,19 +26,18 @@ class BPSO(Optimizer): """ - def __init__(self, algorithm='BPSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BPSO.') - # Override its parent class with the receiving hyperparams - super(BPSO, self).__init__(algorithm=algorithm) + # Overrides its parent class with the receiving params + super(BPSO, self).__init__() # Cognitive constant self.c1 = np.array([1]) @@ -49,8 +45,8 @@ def __init__(self, algorithm='BPSO', hyperparams=None): # Social constant self.c2 = np.array([1]) - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -84,82 +80,60 @@ def c2(self, c2): self._c2 = c2 - def _update_velocity(self, position, best_position, local_position): - """Updates a particle velocity (eq. 1). - - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - local_position (np.array): Agent's local best position. - - Returns: - The particle's new velocity. + @property + def local_position(self): + """np.array: Array of local positions. """ - # Defining random binary numbers - r1 = r.generate_binary_random_number(position.shape) - r2 = r.generate_binary_random_number(position.shape) - - # Calculating the local and global partials - local_partial = np.logical_and(self.c1, np.logical_xor(r1, np.logical_xor(local_position, position))) - global_partial = np.logical_and(self.c2, np.logical_xor(r2, np.logical_xor(best_position, position))) - - # Updating new velocity - new_velocity = np.logical_or(local_partial, global_partial) + return self._local_position - return new_velocity + @local_position.setter + def local_position(self, local_position): + if not isinstance(local_position, np.ndarray): + raise e.TypeError('`local_position` should be a numpy array') - def _update_position(self, position, velocity): - """Updates a particle position (eq. 2). + self._local_position = local_position - Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. - - Returns: - The particle's new position. + @property + def velocity(self): + """np.array: Array of velocities. """ - # Calculates new position - new_position = np.logical_xor(position, velocity) + return self._velocity - return new_position + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') - def _update(self, agents, best_agent, local_position, velocity): - """Method that wraps velocity and position updates over all agents and variables. + self._velocity = velocity + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - local_position (np.array): Array of local best posisitons. - velocity (np.array): Array of current velocities. + space (Space): A Space object containing meta-information. """ - # Iterate through all agents - for i, agent in enumerate(agents): - # Updates current agent velocities - velocity[i] = self._update_velocity(agent.position, best_agent.position, local_position[i]) - - # Updates current agent positions - agent.position = self._update_position(agent.position, velocity[i]) + # Arrays of local positions and velocities + self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions), dtype=bool) + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions), dtype=bool) - @d.pre_evaluate - def _evaluate(self, space, function, local_position): + def evaluate(self, space, function): """Evaluates the search space according to the objective function. Args: space (Space): A Space object that will be evaluated. function (Function): A Function object that will be used as the objective function. - local_position (np.array): Array of local best posisitons. """ - # Iterate through all agents + # Iterates through all agents for i, agent in enumerate(space.agents): - # Calculate the fitness value of current agent + # Calculates the fitness value of current agent fit = function(agent.position) # If fitness is better than agent's best fit @@ -168,61 +142,36 @@ def _evaluate(self, space, function, local_position): agent.fit = fit # Also updates the local best position to current's agent position - local_position[i] = copy.deepcopy(agent.position) + self.local_position[i] = copy.deepcopy(agent.position) # If agent's fitness is better than global fitness if agent.fit < space.best_agent.fit: # Makes a deep copy of agent's local best position and fitness to the best agent - space.best_agent.position = copy.deepcopy(local_position[i]) + space.best_agent.position = copy.deepcopy(self.local_position[i]) space.best_agent.fit = copy.deepcopy(agent.fit) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space): + """Wraps Boolean Particle Swarm Optimization over all agents and variables. Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + space (Space): Space containing agents and update-related information. """ - # Instanciating array of local positions and velocity - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions), dtype=bool) - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions), dtype=bool) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, local_position, velocity) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, local=local_position, best_agent=space.best_agent) + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Defines random binary numbers + r1 = r.generate_binary_random_number(agent.position.shape) + r2 = r.generate_binary_random_number(agent.position.shape) - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Calculates the local and global partials + local_partial = np.logical_and(self.c1, np.logical_xor( + r1, np.logical_xor(self.local_position[i], agent.position))) + global_partial = np.logical_and(self.c2, np.logical_xor( + r2, np.logical_xor(space.best_agent.position, agent.position))) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates current agent velocities (eq. 1) + self.velocity[i] = np.logical_or(local_partial, global_partial) - return history + # Updates current agent positions (eq. 2) + agent.position = np.logical_xor(agent.position, self.velocity[i]) diff --git a/opytimizer/optimizers/boolean/umda.py b/opytimizer/optimizers/boolean/umda.py index 1ab2653e..6c6c6733 100644 --- a/opytimizer/optimizers/boolean/umda.py +++ b/opytimizer/optimizers/boolean/umda.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -24,17 +22,16 @@ class UMDA(Optimizer): """ - def __init__(self, algorithm='UMDA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(UMDA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(UMDA, self).__init__() # Probability of selection self.p_selection = 0.75 @@ -45,8 +42,8 @@ def __init__(self, algorithm='UMDA', hyperparams=None): # Distribution upper bound self.upper_bound = 0.95 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -99,8 +96,7 @@ def upper_bound(self, upper_bound): if upper_bound < 0 or upper_bound > 1: raise e.ValueError('`upper_bound` should be between 0 and 1') if upper_bound < self.lower_bound: - raise e.ValueError( - '`upper_bound` should be greater than `lower_bound') + raise e.ValueError('`upper_bound` should be greater than `lower_bound') self._upper_bound = upper_bound @@ -150,77 +146,29 @@ def _sample_position(self, probs): return new_position - def _update(self, agents): - """Method that wraps selection, probability calculation and - position sampling over all agents and variables. + def update(self, space): + """Wraps Univariate Marginal Distribution Algorithm over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. """ - # Retrieving the number of agents - n_agents = len(agents) + # Retrieves the number of agents + n_agents = len(space.agents) # Selects the individuals through ranking n_selected = int(n_agents * self.p_selection) - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Calculates the probability of ocurrence from selected agents - probs = self._calculate_probability(agents[:n_selected]) + probs = self._calculate_probability(space.agents[:n_selected]) # Iterates through every agents - for agent in agents: + for agent in space.agents: # Samples new agent's position agent.position = self._sample_position(probs) - # Checking its limits - agent.clip_limits() - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Checks its limits + agent.clip_by_bound() diff --git a/opytimizer/optimizers/evolutionary/__init__.py b/opytimizer/optimizers/evolutionary/__init__.py index 4122e1ce..86445117 100644 --- a/opytimizer/optimizers/evolutionary/__init__.py +++ b/opytimizer/optimizers/evolutionary/__init__.py @@ -1,3 +1,15 @@ """An evolutionary package for all common opytimizer modules. It contains implementations of evolutionary-based optimizers. """ + +from opytimizer.optimizers.evolutionary.bsa import BSA +from opytimizer.optimizers.evolutionary.cro import CRO +from opytimizer.optimizers.evolutionary.de import DE +from opytimizer.optimizers.evolutionary.ep import EP +from opytimizer.optimizers.evolutionary.es import ES +from opytimizer.optimizers.evolutionary.foa import FOA +from opytimizer.optimizers.evolutionary.ga import GA +from opytimizer.optimizers.evolutionary.gp import GP +from opytimizer.optimizers.evolutionary.hs import HS, IHS, GHS, SGHS, NGHS, GOGHS +from opytimizer.optimizers.evolutionary.iwo import IWO +from opytimizer.optimizers.evolutionary.rra import RRA diff --git a/opytimizer/optimizers/evolutionary/bsa.py b/opytimizer/optimizers/evolutionary/bsa.py index 1928d2ef..b8cbd314 100644 --- a/opytimizer/optimizers/evolutionary/bsa.py +++ b/opytimizer/optimizers/evolutionary/bsa.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class BSA(Optimizer): """ - def __init__(self, algorithm='BSA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BSA.') - # Override its parent class with the receiving hyperparams - super(BSA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(BSA, self).__init__() # Experience from previous generation self.F = 3.0 @@ -47,8 +44,8 @@ def __init__(self, algorithm='BSA', hyperparams=None): # Number of non-crosses self.mix_rate = 1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -84,12 +81,37 @@ def mix_rate(self, mix_rate): self._mix_rate = mix_rate - def _permute(self, agents, old_agents): + @property + def old_agents(self): + """list: List of historical agents. + + """ + + return self._old_agents + + @old_agents.setter + def old_agents(self, old_agents): + if not isinstance(old_agents, list): + raise e.TypeError('`old_agents` should be a list') + + self._old_agents = old_agents + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Copies a list of agents into the historical population + self.old_agents = copy.deepcopy(space.agents) + + def _permute(self, agents): """Performs the permuting operator. Args: agents (list): List of agents. - old_agents (list): List of historical agents. """ @@ -100,21 +122,20 @@ def _permute(self, agents, old_agents): # If `a` is smaller than `b` if a < b: # Performs a full copy on the historical population - old_agents = copy.deepcopy(agents) + self.old_agents = copy.deepcopy(agents) # Generates two integers `i` and `j` i = r.generate_integer_random_number(high=len(agents)) j = r.generate_integer_random_number(high=len(agents), exclude_value=i) # Swap the agents - old_agents[i], old_agents[j] = copy.deepcopy(old_agents[j]), copy.deepcopy(old_agents[i]) + self.old_agents[i], self.old_agents[j] = copy.deepcopy(self.old_agents[j]), copy.deepcopy(self.old_agents[i]) - def _mutate(self, agents, old_agents): + def _mutate(self, agents): """Performs the mutation operator. Args: agents (list): List of agents. - old_agents (list): List of historical agents. Returns: A list holding the trial agents. @@ -128,12 +149,12 @@ def _mutate(self, agents, old_agents): r1 = r.generate_uniform_random_number() # Iterates through all populations - for (trial_agent, agent, old_agent) in zip(trial_agents, agents, old_agents): + for (trial_agent, agent, old_agent) in zip(trial_agents, agents, self.old_agents): # Updates the new trial agent's position trial_agent.position = agent.position + self.F * r1 * (old_agent.position - agent.position) # Clips its limits - trial_agent.clip_limits() + trial_agent.clip_by_bound() return trial_agents @@ -142,7 +163,7 @@ def _crossover(self, agents, trial_agents): Args: agents (list): List of agents. - old_agents (list): List of trial agents. + trial_agents (list): List of trial agents. """ @@ -194,27 +215,26 @@ def _crossover(self, agents, trial_agents): # Makes a deepcopy on such position trial_agents[i].position[j] = copy.deepcopy(agents[i].position[j]) - def _update(self, agents, function, old_agents): - """Method that wraps the update pipeline over all agents and variables. + def update(self, space, function): + """Wraps Backtracking Search Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. - function (Function): A function object. - old_agents (list): List of historical agents. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. """ # Performs the permuting operator - self._permute(agents, old_agents) + self._permute(space.agents) - # Calculate the trial agents based on the mutation operator - trial_agents = self._mutate(agents, old_agents) + # Calculates the trial agents based on the mutation operator + trial_agents = self._mutate(space.agents) # Performs the crossover - self._crossover(agents, trial_agents) + self._crossover(space.agents, trial_agents) # Iterates through all agents and trial agents - for (agent, trial_agent) in zip(agents, trial_agents): + for (agent, trial_agent) in zip(space.agents, trial_agents): # Calculates the trial agent's fitness trial_agent.fit = function(trial_agent.position) @@ -223,53 +243,3 @@ def _update(self, agents, function, old_agents): # Copies the trial agent's position and fitness to the agent's agent.position = copy.deepcopy(trial_agent.position) agent.fit = copy.deepcopy(trial_agent.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Makes a deepcopy of agents into the historical population - old_agents = copy.deepcopy(space.agents) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, old_agents) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/evolutionary/cro.py b/opytimizer/optimizers/evolutionary/cro.py index 56d8ca62..b67c869a 100644 --- a/opytimizer/optimizers/evolutionary/cro.py +++ b/opytimizer/optimizers/evolutionary/cro.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -29,21 +29,20 @@ class CRO(Optimizer): """ - def __init__(self, algorithm='CRO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> CRO.') - # Override its parent class with the receiving hyperparams - super(CRO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(CRO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/evolutionary/de.py b/opytimizer/optimizers/evolutionary/de.py index 608f422b..baf84eef 100644 --- a/opytimizer/optimizers/evolutionary/de.py +++ b/opytimizer/optimizers/evolutionary/de.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,17 +26,16 @@ class DE(Optimizer): """ - def __init__(self, algorithm='DE', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(DE, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(DE, self).__init__() # Crossover probability self.CR = 0.9 @@ -46,8 +43,8 @@ def __init__(self, algorithm='DE', hyperparams=None): # Differential weight self.F = 0.7 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -117,79 +114,31 @@ def _mutate_agent(self, agent, alpha, beta, gamma): return a - def _update(self, agents, function): - """Method that wraps selection and mutation updates over all - agents and variables (eq. 1-4). + def update(self, space, function): + """Wraps Differential Evolution over all agents and variables (eq. 1-4). Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. """ - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # Randomly picks three distinct other agents, not including current one - C = d.generate_choice_distribution(np.setdiff1d(range(0, len(agents)), i), size=3) + C = d.generate_choice_distribution(np.setdiff1d(range(0, len(space.agents)), i), size=3) # Mutates the current agent - a = self._mutate_agent(agent, agents[C[0]], agents[C[1]], agents[C[2]]) + a = self._mutate_agent(agent, space.agents[C[0]], space.agents[C[1]], space.agents[C[2]]) - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/evolutionary/ep.py b/opytimizer/optimizers/evolutionary/ep.py index 091af292..c0769a3f 100644 --- a/opytimizer/optimizers/evolutionary/ep.py +++ b/opytimizer/optimizers/evolutionary/ep.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,17 +25,16 @@ class EP(Optimizer): """ - def __init__(self, algorithm='EP', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(EP, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(EP, self).__init__() # Size of bout during the tournament selection self.bout_size = 0.1 @@ -45,8 +42,8 @@ def __init__(self, algorithm='EP', hyperparams=None): # Clipping ratio to helps the algorithm's convergence self.clip_ratio = 0.05 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -84,13 +81,47 @@ def clip_ratio(self, clip_ratio): self._clip_ratio = clip_ratio - def _mutate_parent(self, agent, function, strategy): + @property + def strategy(self): + """np.array: Array of strategies. + + """ + + return self._strategy + + @strategy.setter + def strategy(self, strategy): + if not isinstance(strategy, np.ndarray): + raise e.TypeError('`strategy` should be a numpy array') + + self._strategy = strategy + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Array of strategies + self.strategy = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + + # Iterates through all agents + for i in range(space.n_agents): + # For every decision variable + for j, (lb, ub) in enumerate(zip(space.lb, space.ub)): + # Initializes the strategy array with the proposed EP distance + self.strategy[i][j] = 0.05 * r.generate_uniform_random_number( + 0, ub - lb, size=space.agents[i].n_dimensions) + + def _mutate_parent(self, agent, index, function): """Mutates a parent into a new child (eq. 5.1). Args: agent (Agent): An agent instance to be reproduced. + index (int): Index of current agent. function (Function): A Function object that will be used as the objective function. - strategy (np.array): An array holding the strategies that conduct the searching process. Returns: A mutated child. @@ -104,21 +135,21 @@ def _mutate_parent(self, agent, function, strategy): r1 = r.generate_gaussian_random_number() # Updates its position - a.position += strategy * r1 + a.position += self.strategy[index] * r1 # Clips its limits - a.clip_limits() + a.clip_by_bound() # Calculates its fitness a.fit = function(a.position) return a - def _update_strategy(self, strategy, lower_bound, upper_bound): + def _update_strategy(self, index, lower_bound, upper_bound): """Updates the strategy and performs a clipping process to help its convergence (eq. 5.2). Args: - strategy (np.array): An strategy array to be updated. + index (int): Index of current agent. lower_bound (np.array): An array holding the lower bounds. upper_bound (np.array): An array holding the upper bounds. @@ -128,130 +159,69 @@ def _update_strategy(self, strategy, lower_bound, upper_bound): """ # Calculates the number of variables and dimensions - n_variables, n_dimensions = strategy.shape[0], strategy.shape[1] + n_variables, n_dimensions = self.strategy.shape[1], self.strategy.shape[2] # Generates a uniform random number r1 = r.generate_gaussian_random_number(size=(n_variables, n_dimensions)) # Calculates the new strategy - new_strategy = strategy + r1 * (np.sqrt(np.abs(strategy))) + self.strategy[index] += r1 * (np.sqrt(np.abs(self.strategy[index]))) # For every decision variable for j, (lb, ub) in enumerate(zip(lower_bound, upper_bound)): # Uses the clip ratio to help the convergence - new_strategy[j] = np.clip(new_strategy[j], lb, ub) * self.clip_ratio - - return new_strategy + self.strategy[index][j] = np.clip(self.strategy[index][j], lb, ub) * self.clip_ratio - def _update(self, agents, n_agents, function, strategy): - """Method that wraps evolution over all agents and variables. + def update(self, space, function): + """Wraps Evolutionary Programming over all agents and variables. Args: - agents (list): List of agents. - n_agents (int): Number of possible agents in the space. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - strategy (np.array): An array of strategies. - - Returns: - A new population with more fitted individuals. """ - # Creating a list for the produced children + # Calculates the number of agents + n_agents = len(space.agents) + + # Creates a list for the produced children children = [] - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # Mutates a parent and generates a new child - a = self._mutate_parent(agent, function, strategy[i]) + a = self._mutate_parent(agent, i, function) # Updates the strategy - strategy[i] = self._update_strategy(strategy[i], agent.lb, agent.ub) + self._update_strategy(i, agent.lb, agent.ub) # Appends the mutated agent to the children children.append(a) # Joins both populations - agents += children + space.agents += children # Number of individuals to be selected n_individuals = int(n_agents * self.bout_size) # Creates an empty array of wins - wins = np.zeros(len(agents)) + wins = np.zeros(len(space.agents)) - # Iterate through all agents in the new population - for i, agent in enumerate(agents): + # Iterates through all agents in the new population + for i, agent in enumerate(space.agents): # Iterate through all tournament individuals for _ in range(n_individuals): # Gathers a random index - index = r.generate_integer_random_number(0, len(agents)) + index = r.generate_integer_random_number(0, len(space.agents)) # If current agent's fitness is smaller than selected one - if agent.fit < agents[index].fit: + if agent.fit < space.agents[index].fit: # Increases its winning by one wins[i] += 1 # Sorts the agents list based on its winnings - agents = [agents for _, agents in sorted( - zip(wins, agents), key=lambda pair: pair[0], reverse=True)] - - return agents[:n_agents] - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instantiate an array of strategies - strategy = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Iterate through all agents - for i in range(space.n_agents): - # For every decision variable - for j, (lb, ub) in enumerate(zip(space.lb, space.ub)): - # Initializes the strategy array with the proposed EP distance - strategy[i][j] = 0.05 * r.generate_uniform_random_number( - 0, ub - lb, size=space.agents[i].n_dimensions) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - space.agents = self._update(space.agents, space.n_agents, function, strategy) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + space.agents = [agents for _, agents in sorted( + zip(wins, space.agents), key=lambda pair: pair[0], reverse=True)] - return history + # Gathers the best `n_agents` + space.agents = space.agents[:n_agents] diff --git a/opytimizer/optimizers/evolutionary/es.py b/opytimizer/optimizers/evolutionary/es.py index c81be96f..53ae8514 100644 --- a/opytimizer/optimizers/evolutionary/es.py +++ b/opytimizer/optimizers/evolutionary/es.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,23 +25,22 @@ class ES(Optimizer): """ - def __init__(self, algorithm='ES', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(ES, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(ES, self).__init__() # Ratio of children in the population self.child_ratio = 0.5 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -64,13 +61,65 @@ def child_ratio(self, child_ratio): self._child_ratio = child_ratio - def _mutate_parent(self, agent, function, strategy): + @property + def n_children(self): + """int: Number of children. + + """ + + return self._n_children + + @n_children.setter + def n_children(self, n_children): + if not isinstance(n_children, int): + raise e.TypeError('`n_children` should be an integer') + if n_children < 0: + raise e.ValueError('`n_children` should be >= 0') + + self._n_children = n_children + + @property + def strategy(self): + """np.array: Array of strategies. + + """ + + return self._strategy + + @strategy.setter + def strategy(self, strategy): + if not isinstance(strategy, np.ndarray): + raise e.TypeError('`strategy` should be a numpy array') + + self._strategy = strategy + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Number of children and array of strategies + self.n_children = int(space.n_agents * self.child_ratio) + self.strategy = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + + # Iterates through all agents + for i in range(self.n_children): + # For every decision variable + for j, (lb, ub) in enumerate(zip(space.lb, space.ub)): + # Initializes the strategy array with the proposed EP distance + self.strategy[i][j] = 0.05 * r.generate_uniform_random_number( + 0, ub - lb, size=space.agents[i].n_dimensions) + + def _mutate_parent(self, agent, index, function): """Mutates a parent into a new child (eq. 2). Args: agent (Agent): An agent instance to be reproduced. + index (int): Index of current agent. function (Function): A Function object that will be used as the objective function. - strategy (np.array): An array holding the strategies that conduct the searching process. Returns: A mutated child. @@ -84,21 +133,21 @@ def _mutate_parent(self, agent, function, strategy): r1 = r.generate_gaussian_random_number() # Updates its position - a.position += strategy * r1 + a.position += self.strategy[index] * r1 # Clips its limits - a.clip_limits() + a.clip_by_bound() # Calculates its fitness a.fit = function(a.position) return a - def _update_strategy(self, strategy): + def _update_strategy(self, index): """Updates the strategy (eq. 5-10). Args: - strategy (np.array): An strategy array to be updated. + index (int): Index of current agent. Returns: The updated strategy. @@ -106,7 +155,7 @@ def _update_strategy(self, strategy): """ # Calculates the number of variables and dimensions - n_variables, n_dimensions = strategy.shape[0], strategy.shape[1] + n_variables, n_dimensions = self.strategy.shape[1], self.strategy.shape[2] # Calculates the mutation strength and its complementary tau = 1 / np.sqrt(2 * n_variables) @@ -117,101 +166,35 @@ def _update_strategy(self, strategy): r2 = r.generate_gaussian_random_number(size=(n_variables, n_dimensions)) # Calculates the new strategy - new_strategy = strategy * np.exp(tau_p * r1 + tau * r2) - - return new_strategy + self.strategy[index] *= np.exp(tau_p * r1 + tau * r2) - def _update(self, agents, n_agents, function, n_children, strategy): - """Method that wraps evolution over all agents and variables. + def update(self, space, function): + """Wraps Evolution Strategies over all agents and variables. Args: - agents (list): List of agents. - n_agents (int): Number of possible agents in the space. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - n_children (int): Number of possible children in the space. - strategy (np.array): An strategy array. """ - # Creating a list for the produced children + # Calculates the number of agents + n_agents = len(space.agents) + + # Creates a list for the produced children children = [] # Iterate through all children - for i in range(n_children): + for i in range(self.n_children): # Mutates a parent and generates a new child - a = self._mutate_parent(agents[i], function, strategy[i]) + a = self._mutate_parent(space.agents[i], i, function) # Updates the strategy - strategy[i] = self._update_strategy(strategy[i]) + self._update_strategy(i) # Appends the mutated agent to the children children.append(a) - # Joins both populations - agents += children - - # Sorting agents - agents.sort(key=lambda x: x.fit) - - return agents[:n_agents] - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Calculates the number of possible children - n_children = int(space.n_agents * self.child_ratio) - - # Instantiate an array of strategies - strategy = np.zeros((n_children, space.n_variables, space.n_dimensions)) - - # Iterate through all possible children - for i in range(n_children): - # For every decision variable - for j, (lb, ub) in enumerate(zip(space.lb, space.ub)): - # Initializes the strategy array with the proposed ES distance - strategy[i][j] = 0.05 * r.generate_uniform_random_number( - 0, ub - lb, size=space.agents[i].n_dimensions) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - space.agents = self._update(space.agents, space.n_agents, function, n_children, strategy) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Joins both populations, sorts agents and gathers best `n_agents` + space.agents += children + space.agents.sort(key=lambda x: x.fit) + space.agents = space.agents[:n_agents] diff --git a/opytimizer/optimizers/evolutionary/foa.py b/opytimizer/optimizers/evolutionary/foa.py index f2928bc0..ec00e225 100644 --- a/opytimizer/optimizers/evolutionary/foa.py +++ b/opytimizer/optimizers/evolutionary/foa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class FOA(Optimizer): """ - def __init__(self, algorithm='FOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> FOA.') - # Override its parent class with the receiving hyperparams - super(FOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(FOA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/evolutionary/ga.py b/opytimizer/optimizers/evolutionary/ga.py index fc4167f6..0c7095fa 100644 --- a/opytimizer/optimizers/evolutionary/ga.py +++ b/opytimizer/optimizers/evolutionary/ga.py @@ -4,14 +4,12 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.general as g import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,17 +27,16 @@ class GA(Optimizer): """ - def __init__(self, algorithm='GA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(GA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GA, self).__init__() # Probability of selection self.p_selection = 0.75 @@ -50,8 +47,8 @@ def __init__(self, algorithm='GA', hyperparams=None): # Probability of crossover self.p_crossover = 0.5 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -208,23 +205,23 @@ def _mutation(self, alpha, beta): return alpha, beta - def _update(self, agents, function): - """Method that wraps selection, crossover and mutation over all agents and variables. + def update(self, space, function): + """Wraps Genetic Algorithm over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. """ - # Creating a list to hold the new population + # Creates a list to hold the new population new_agents = [] - # Retrieving the number of agents - n_agents = len(agents) + # Retrieves the number of agents + n_agents = len(space.agents) # Calculates a list of fitness from every agent - fitness = [agent.fit + c.EPSILON for agent in agents] + fitness = [agent.fit + c.EPSILON for agent in space.agents] # Selects the parents selected = self._roulette_selection(n_agents, fitness) @@ -232,12 +229,12 @@ def _update(self, agents, function): # For every pair of selected parents for s in g.n_wise(selected): # Performs the crossover and mutation - alpha, beta = self._crossover(agents[s[0]], agents[s[1]]) + alpha, beta = self._crossover(space.agents[s[0]], space.agents[s[1]]) alpha, beta = self._mutation(alpha, beta) # Checking `alpha` and `beta` limits - alpha.clip_limits() - beta.clip_limits() + alpha.clip_by_bound() + beta.clip_by_bound() # Calculates new fitness for `alpha` and `beta` alpha.fit = function(alpha.position) @@ -246,57 +243,7 @@ def _update(self, agents, function): # Appends the mutated agents to the children new_agents.extend([alpha, beta]) - # Joins both populations - agents += new_agents - - # Sorting agents - agents.sort(key=lambda x: x.fit) - - return agents[:n_agents] - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - space.agents = self._update(space.agents, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Joins both populations, sort agents and gathers best `n_agents` + space.agents += new_agents + space.agents.sort(key=lambda x: x.fit) + space.agents = space.agents[:n_agents] diff --git a/opytimizer/optimizers/evolutionary/gp.py b/opytimizer/optimizers/evolutionary/gp.py index 58c7ee31..9b9f282c 100644 --- a/opytimizer/optimizers/evolutionary/gp.py +++ b/opytimizer/optimizers/evolutionary/gp.py @@ -4,13 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.general as g import opytimizer.math.random as r -import opytimizer.utils.decorator as d import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +25,18 @@ class GP(Optimizer): """ - def __init__(self, algorithm='GP', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> GP.') - # Override its parent class with the receiving hyperparams - super(GP, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GP, self).__init__() # Probability of reproduction self.p_reproduction = 0.25 @@ -54,8 +50,8 @@ def __init__(self, algorithm='GP', hyperparams=None): # Nodes' prunning ratio self.prunning_ratio = 0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -159,7 +155,7 @@ def _reproduction(self, space): fitness = [agent.fit for agent in space.agents] # Number of individuals to be reproducted - n_individuals = int(space.n_trees * self.p_reproduction) + n_individuals = int(space.n_agents * self.p_reproduction) # Gathers a list of selected individuals to be replaced selected = g.tournament_selection(fitness, n_individuals) @@ -190,7 +186,7 @@ def _mutation(self, space): fitness = [agent.fit for agent in space.agents] # Number of individuals to be mutated - n_individuals = int(space.n_trees * self.p_mutation) + n_individuals = int(space.n_agents * self.p_mutation) # Gathers a list of selected individuals to be replaced selected = g.tournament_selection(fitness, n_individuals) @@ -229,7 +225,7 @@ def _mutate(self, space, tree, max_nodes): # Deep copying a new mutated tree from initial tree mutated_tree = copy.deepcopy(tree) - # Calculating mutation point + # Calculates mutation point mutation_point = int(r.generate_uniform_random_number(2, max_nodes)) # Finds the node at desired mutation point @@ -238,7 +234,7 @@ def _mutate(self, space, tree, max_nodes): # If the mutation point's parent is not a root (this may happen when the mutation point is a function), # and find_node() stops at a terminal node whose father is a root if sub_tree: - # Creating a new random sub-tree + # Creates a new random sub-tree branch = space.grow(space.min_depth, space.max_depth) # Checks if sub-tree should be positioned in the left @@ -281,7 +277,7 @@ def _crossover(self, space): fitness = [agent.fit for agent in space.agents] # Number of individuals to be crossovered - n_individuals = int(space.n_trees * self.p_crossover) + n_individuals = int(space.n_agents * self.p_crossover) # Checks if `n_individuals` is an odd number if n_individuals % 2 != 0: @@ -321,19 +317,19 @@ def _cross(self, father, mother, max_father, max_mother): """ - # Copying father tree to the father's offspring structure + # Copiesing father tree to the father's offspring structure father_offspring = copy.deepcopy(father) - # Calculating father's crossover point + # Calculates father's crossover point father_point = int(r.generate_uniform_random_number(2, max_father)) # Finds the node at desired crossover point sub_father, flag_father = father_offspring.find_node(father_point) - # Copying mother tree to the mother's offspring structure + # Copiesing mother tree to the mother's offspring structure mother_offspring = copy.deepcopy(mother) - # Calculating mother's crossover point + # Calculates mother's crossover point mother_point = int(r.generate_uniform_random_number(2, max_mother)) # Finds the node at desired crossover point @@ -408,25 +404,7 @@ def _cross(self, father, mother, max_father, max_mother): return father, mother - def _update(self, space): - """Method that wraps reproduction, crossover and mutation operators over all trees. - - Args: - space (TreeSpace): A TreeSpace object. - - """ - - # Performs the reproduction - self._reproduction(space) - - # Performs the crossover - self._crossover(space) - - # Performs the mutation - self._mutation(space) - - @d.pre_evaluate - def _evaluate(self, space, function): + def evaluate(self, space, function): """Evaluates the search space according to the objective function. Args: @@ -441,7 +419,7 @@ def _evaluate(self, space, function): agent.position = copy.deepcopy(tree.position) # Checks the agent limits - agent.clip_limits() + agent.clip_by_bound() # Calculates the fitness value of the agent agent.fit = function(agent.position) @@ -453,48 +431,19 @@ def _evaluate(self, space, function): space.best_agent.position = copy.deepcopy(agent.position) space.best_agent.fit = copy.deepcopy(agent.fit) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space): + """Wraps Genetic Programming over all trees and variables. Args: - space (TreeSpace): A TreeSpace object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + space (TreeSpace): TreeSpace containing agents and update-related information. """ - # Initial tree space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating trees with designed operators - self._update(space) - - # After the update, we need to re-evaluate the tree space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, - best_agent=space.best_agent, - best_tree=space.best_tree) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Performs the reproduction + self._reproduction(space) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Performs the crossover + self._crossover(space) - return history + # Performs the mutation + self._mutation(space) diff --git a/opytimizer/optimizers/evolutionary/hs.py b/opytimizer/optimizers/evolutionary/hs.py index 648856a1..89a1c90c 100644 --- a/opytimizer/optimizers/evolutionary/hs.py +++ b/opytimizer/optimizers/evolutionary/hs.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class HS(Optimizer): """ - def __init__(self, algorithm='HS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> HS.') - # Override its parent class with the receiving hyperparams - super(HS, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(HS, self).__init__() # Harmony memory considering rate self.HMCR = 0.7 @@ -51,8 +48,8 @@ def __init__(self, algorithm='HS', hyperparams=None): # Bandwidth parameter self.bw = 1.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -152,79 +149,32 @@ def _generate_new_harmony(self, agents): return a - def _update(self, agents, function): - """Method that wraps the update pipeline over all agents and variables. + def update(self, space, function): + """Wraps Harmony Search over all agents and variables. Args: - agents (list): List of agents. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. """ # Generates a new harmony - agent = self._generate_new_harmony(agents) + agent = self._generate_new_harmony(space.agents) - # Checking agent limits - agent.clip_limits() + # Checks agent limits + agent.clip_by_bound() # Calculates the new harmony fitness agent.fit = function(agent.position) - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # If newly generated agent fitness is better - if agent.fit < agents[-1].fit: + if agent.fit < space.agents[-1].fit: # Updates the corresponding agent's position and fitness - agents[-1].position = copy.deepcopy(agent.position) - agents[-1].fit = copy.deepcopy(agent.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + space.agents[-1].position = copy.deepcopy(agent.position) + space.agents[-1].fit = copy.deepcopy(agent.fit) class IHS(HS): @@ -240,12 +190,11 @@ class IHS(HS): """ - def __init__(self, algorithm='IHS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ @@ -263,8 +212,8 @@ def __init__(self, algorithm='IHS', hyperparams=None): # Maximum bandwidth parameter self.bw_max = 10 - # Override its parent class with the receiving hyperparams - super(IHS, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(IHS, self).__init__(params) logger.info('Class overrided.') @@ -340,61 +289,40 @@ def bw_max(self, bw_max): self._bw_max = bw_max - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, function, iteration, n_iterations): + """Wraps Improved Harmony Search over all agents and variables. Args: - space (Space): A Space object that will be evaluated. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + iteration (int): Current iteration. + n_iterations (int): Maximum number of iterations. """ - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating pitch adjusting rate - self.PAR = self.PAR_min + \ - (((self.PAR_max - self.PAR_min) / space.n_iterations) * t) - - # Updating bandwidth parameter - self.bw = self.bw_max * \ - np.exp((np.log(self.bw_min / self.bw_max) / - space.n_iterations) * t) - - # Updating agents - self._update(space.agents, function) + # Updates pitch adjusting rate + self.PAR = self.PAR_min + (((self.PAR_max - self.PAR_min) / n_iterations) * iteration) - # Checking if agents meet the bounds limits - space.clip_limits() + # Updates bandwidth parameter + self.bw = self.bw_max * np.exp((np.log(self.bw_min / self.bw_max) / n_iterations) * iteration) - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) + # Generates a new harmony + agent = self._generate_new_harmony(space.agents) - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) + # Checks agent limits + agent.clip_by_bound() - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Calculates the new harmony fitness + agent.fit = function(agent.position) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Sorts agents + space.agents.sort(key=lambda x: x.fit) - return history + # If newly generated agent fitness is better + if agent.fit < space.agents[-1].fit: + # Updates the corresponding agent's position and fitness + space.agents[-1].position = copy.deepcopy(agent.position) + space.agents[-1].fit = copy.deepcopy(agent.fit) class GHS(IHS): @@ -409,19 +337,18 @@ class GHS(IHS): """ - def __init__(self, algorithm='GHS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: IHS -> GHS.') - # Override its parent class with the receiving hyperparams - super(GHS, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(GHS, self).__init__(params) logger.info('Class overrided.') @@ -484,12 +411,11 @@ class SGHS(HS): """ - def __init__(self, algorithm='SGHS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ @@ -510,8 +436,8 @@ def __init__(self, algorithm='SGHS', hyperparams=None): # Maximum bandwidth parameter self.bw_max = 10 - # Override its parent class with the receiving hyperparams - super(SGHS, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(SGHS, self).__init__(params) logger.info('Class overrided.') @@ -564,7 +490,7 @@ def LP(self, LP): @property def HMCRm(self): - """float: Mean harmony memory considering rate + """float: Mean harmony memory considering rate. """ @@ -632,6 +558,68 @@ def bw_max(self, bw_max): self._bw_max = bw_max + @property + def lp(self): + """int: Current learning period. + + """ + + return self._lp + + @lp.setter + def lp(self, lp): + if not isinstance(lp, int): + raise e.TypeError('`lp` should be a integer') + if lp <= 0: + raise e.ValueError('`lp` should be > 0') + + self._lp = lp + + @property + def HMCR_history(self): + """list: Historical harmony memory considering rates. + + """ + + return self._HMCR_history + + @HMCR_history.setter + def HMCR_history(self, HMCR_history): + if not isinstance(HMCR_history, list): + raise e.TypeError('`HMCR_history` should be a list') + + self._HMCR_history = HMCR_history + + @property + def PAR_history(self): + """list: Historical pitch adjusting rates. + + """ + + return self._PAR_history + + @PAR_history.setter + def PAR_history(self, PAR_history): + if not isinstance(PAR_history, list): + raise e.TypeError('`PAR_history` should be a list') + + self._PAR_history = PAR_history + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Current learning period + self.lp = 1 + + # Historical HMCRs and PARs + self.HMCR_history = [] + self.PAR_history = [] + def _generate_new_harmony(self, agents): """It generates a new harmony. @@ -674,94 +662,60 @@ def _generate_new_harmony(self, agents): return a - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, function, iteration, n_iterations): + """Wraps Self-Adaptive Global-Best Harmony Search over all agents and variables. Args: - space (Space): A Space object that will be evaluated. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + iteration (int): Current iteration. + n_iterations (int): Maximum number of iterations. """ - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) + # Updates harmony memory considering and pitch adjusting rates + self.HMCR = r.generate_gaussian_random_number(self.HMCRm, 0.01)[0] + self.PAR = r.generate_gaussian_random_number(self.PARm, 0.05)[0] - # Initializing lists of HMCR and PAR - HMCR, PAR = [], [] + # Stores updates values to lists + self.HMCR_history.append(self.HMCR) + self.PAR_history.append(self.PAR) - # Initializing the learning period - lp = 1 + # If current iteration is smaller than half + if iteration < n_iterations // 2: + # Updates the bandwidth parameter + self.bw = self.bw_max - ((self.bw_max - self.bw_min) / n_iterations) * 2 * iteration + else: + # Replaces by the minimum bandwidth + self.bw = self.bw_min - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating harmony memory considering rate - self.HMCR = r.generate_gaussian_random_number(self.HMCRm, 0.01)[0] - - # Updating pitch adjusting rate - self.PAR = r.generate_gaussian_random_number(self.PARm, 0.05)[0] - - # Storing both values - HMCR.append(self.HMCR) - PAR.append(self.PAR) - - # If current iteration is smaller than half - if t < space.n_iterations // 2: - # Updates the bandwidth parameter - self.bw = self.bw_max - ((self.bw_max - self.bw_min) / space.n_iterations) * 2 * t - - # If is bigger than half - else: - # Replaces by the minimum bandwidth - self.bw = self.bw_min - - # Updating agents - self._update(space.agents, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Checks if learning period has reached its maximum - if lp == self.LP: - # Re-calculates the mean HMCR - self.HMCRm = np.mean(HMCR) - - # Re-calculates the mean PAR - self.PARm = np.mean(PAR) - - # Resets the learning period to one - lp = 1 + # Generates a new harmony + agent = self._generate_new_harmony(space.agents) - # If has not reached - else: - # Increase it by one - lp += 1 + # Checks agent limits + agent.clip_by_bound() - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) + # Calculates the new harmony fitness + agent.fit = function(agent.position) - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Sorts agents + space.agents.sort(key=lambda x: x.fit) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # If newly generated agent fitness is better + if agent.fit < space.agents[-1].fit: + # Updates the corresponding agent's position and fitness + space.agents[-1].position = copy.deepcopy(agent.position) + space.agents[-1].fit = copy.deepcopy(agent.fit) - return history + # Checks if learning period has reached its maximum + if self.lp == self.LP: + # Re-calculates the mean HMCR and PAR, and resets learning period + self.HMCRm = np.mean(self.HMCR_history) + self.PARm = np.mean(self.PAR_history) + self.lp = 1 + else: + # Increases learning period + self.lp += 1 class NGHS(HS): @@ -777,12 +731,11 @@ class NGHS(HS): """ - def __init__(self, algorithm='NGHS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ @@ -791,8 +744,8 @@ def __init__(self, algorithm='NGHS', hyperparams=None): # Mutation probability self.pm = 0.1 - # Override its parent class with the receiving hyperparams - super(NGHS, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(NGHS, self).__init__(params) logger.info('Class overrided.') @@ -852,30 +805,30 @@ def _generate_new_harmony(self, best, worst): return a - def _update(self, agents, function): - """Method that wraps the update pipeline over all agents and variables. + def update(self, space, function): + """Wraps Novel Global Harmony Search over all agents and variables. Args: - agents (list): List of agents. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. """ # Generates a new harmony - agent = self._generate_new_harmony(agents[0], agents[-1]) + agent = self._generate_new_harmony(space.agents[0], space.agents[-1]) - # Checking agent limits - agent.clip_limits() + # Checks agent limits + agent.clip_by_bound() # Calculates the new harmony fitness agent.fit = function(agent.position) - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Updates the worst agent's position and fitness - agents[-1].position = copy.deepcopy(agent.position) - agents[-1].fit = copy.deepcopy(agent.fit) + space.agents[-1].position = copy.deepcopy(agent.position) + space.agents[-1].fit = copy.deepcopy(agent.fit) class GOGHS(NGHS): @@ -891,19 +844,18 @@ class GOGHS(NGHS): """ - def __init__(self, algorithm='GOGHS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: NGHS -> GOGHS.') - # Override its parent class with the receiving hyperparams - super(GOGHS, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(GOGHS, self).__init__(params) logger.info('Class overrided.') @@ -922,7 +874,7 @@ def _generate_opposition_harmony(self, new_agent, agents): # Mimics an agent position a = copy.deepcopy(agents[0]) - # Creating pseudo-harmonies + # Creates pseudo-harmonies A = np.zeros((a.n_variables)) B = np.zeros((a.n_variables)) @@ -951,37 +903,37 @@ def _generate_opposition_harmony(self, new_agent, agents): return a - def _update(self, agents, function): - """Method that wraps the update pipeline over all agents and variables. + def update(self, space, function): + """Wraps Generalized Opposition Global-Best Harmony Search over all agents and variables. Args: - agents (list): List of agents. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. """ # Generates new harmonies - agent = self._generate_new_harmony(agents[0], agents[-1]) - opp_agent = self._generate_opposition_harmony(agent, agents) + agent = self._generate_new_harmony(space.agents[0], space.agents[-1]) + opp_agent = self._generate_opposition_harmony(agent, space.agents) - # Checking agents limits - agent.clip_limits() - opp_agent.clip_limits() + # Checks agents limits + agent.clip_by_bound() + opp_agent.clip_by_bound() # Calculates harmonies fitness agent.fit = function(agent.position) opp_agent.fit = function(opp_agent.position) - # Checking if oppisition-based is better than agent + # Checks if oppisition-based is better than agent if opp_agent.fit < agent.fit: # Copies the agent agent = copy.deepcopy(opp_agent) - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # If generated agent fitness is better - if agent.fit < agents[-1].fit: + if agent.fit < space.agents[-1].fit: # Updates the corresponding agent's position and fitness - agents[-1].position = copy.deepcopy(agent.position) - agents[-1].fit = copy.deepcopy(agent.fit) + space.agents[-1].position = copy.deepcopy(agent.position) + space.agents[-1].fit = copy.deepcopy(agent.fit) diff --git a/opytimizer/optimizers/evolutionary/iwo.py b/opytimizer/optimizers/evolutionary/iwo.py index 439e17ac..9d33a3e8 100644 --- a/opytimizer/optimizers/evolutionary/iwo.py +++ b/opytimizer/optimizers/evolutionary/iwo.py @@ -3,12 +3,9 @@ import copy -from tqdm import tqdm - import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as ex -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,17 +24,16 @@ class IWO(Optimizer): """ - def __init__(self, algorithm='IWO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(IWO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(IWO, self).__init__() # Minimum number of seeds self.min_seeds = 0 @@ -54,8 +50,11 @@ def __init__(self, algorithm='IWO', hyperparams=None): # Initial standard deviation self.init_sigma = 3 - # Now, we need to build this class up - self._build(hyperparams) + # Standard deviation + self.sigma = 0 + + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -146,6 +145,21 @@ def init_sigma(self, init_sigma): self._init_sigma = init_sigma + @property + def sigma(self): + """float: Standard deviation. + + """ + + return self._sigma + + @sigma.setter + def sigma(self, sigma): + if not isinstance(sigma, (float, int)): + raise ex.TypeError('`sigma` should be a float or integer') + + self._sigma = sigma + def _spatial_dispersal(self, iteration, n_iterations): """Calculates the Spatial Dispersal coefficient (eq. 1). @@ -155,10 +169,10 @@ def _spatial_dispersal(self, iteration, n_iterations): """ - # Calculating the iteration coefficient + # Calculates the iteration coefficient coef = ((n_iterations - iteration) ** self.e) / ((n_iterations + c.EPSILON) ** self.e) - # Updating the Spatial Dispersial + # Updates the Spatial Dispersial self.sigma = coef * (self.init_sigma - self.final_sigma) + self.final_sigma def _produce_offspring(self, agent, function): @@ -182,36 +196,38 @@ def _produce_offspring(self, agent, function): a.position[j] += self.sigma * r.generate_uniform_random_number(lb, ub, a.n_dimensions) # Clips its limits - a.clip_limits() + a.clip_by_bound() # Calculates its fitness a.fit = function(a.position) return a - def _update(self, agents, n_agents, function): - """Method that wraps offsprings generations over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Invasive Weed Optimization over all agents and variables. Args: - agents (list): List of agents. - n_agents (int): Number of possible agents in the space. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - - Returns: - A new population with more fitted individuals. + iteration (int): Current iteration. + n_iterations (int): Maximum number of iterations. """ - # Creating a list for the produced offsprings + # Calculates the current Spatial Dispersal + self._spatial_dispersal(iteration, n_iterations) + + # Calculates the number of agents and creates list of offsprings + n_agents = len(space.agents) offsprings = [] - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) - # Iterate through all agents - for agent in agents: - # Calculate the seeding ratio based on its fitness - ratio = (agent.fit - agents[-1].fit) / (agents[0].fit - agents[-1].fit + c.EPSILON) + # Iterates through all agents + for agent in space.agents: + # Calculates the seeding ratio based on its fitness + ratio = (agent.fit - space.agents[-1].fit) / (space.agents[0].fit - space.agents[-1].fit + c.EPSILON) # Calculates the number of produced seeds n_seeds = int(self.min_seeds + (self.max_seeds - self.min_seeds) * ratio) @@ -224,60 +240,7 @@ def _update(self, agents, n_agents, function): # Appends the agent to the offsprings offsprings.append(a) - # Joins both populations - agents += offsprings - - # Performs a new sort on the merged population - agents.sort(key=lambda x: x.fit) - - return agents[:n_agents] - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Calculates the current Spatial Dispersal - self._spatial_dispersal(t, space.n_iterations) - - # Updating agents - space.agents = self._update(space.agents, space.n_agents, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Joins both populations, sorts and gathers best `n_agents` + space.agents += offsprings + space.agents.sort(key=lambda x: x.fit) + space.agents = space.agents[:n_agents] diff --git a/opytimizer/optimizers/evolutionary/rra.py b/opytimizer/optimizers/evolutionary/rra.py index 480ed776..dac25171 100644 --- a/opytimizer/optimizers/evolutionary/rra.py +++ b/opytimizer/optimizers/evolutionary/rra.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -30,21 +30,20 @@ class RRA(Optimizer): """ - def __init__(self, algorithm='RRA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> RRA.') - # Override its parent class with the receiving hyperparams - super(RRA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(RRA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/misc/__init__.py b/opytimizer/optimizers/misc/__init__.py index 7ff01ef3..51fb2cb3 100644 --- a/opytimizer/optimizers/misc/__init__.py +++ b/opytimizer/optimizers/misc/__init__.py @@ -1,3 +1,9 @@ """An evolutionary package for all common opytimizer modules. It contains implementations of miscellaneous-based optimizers. """ + +from opytimizer.optimizers.misc.aoa import AOA +from opytimizer.optimizers.misc.cem import CEM +from opytimizer.optimizers.misc.doa import DOA +from opytimizer.optimizers.misc.gs import GS +from opytimizer.optimizers.misc.hc import HC diff --git a/opytimizer/optimizers/misc/aoa.py b/opytimizer/optimizers/misc/aoa.py index f07bb4ee..25dac19a 100644 --- a/opytimizer/optimizers/misc/aoa.py +++ b/opytimizer/optimizers/misc/aoa.py @@ -1,12 +1,9 @@ """Arithmetic Optimization Algorithm. """ -from tqdm import tqdm - import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,19 +22,18 @@ class AOA(Optimizer): """ - def __init__(self, algorithm='AOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> AOA.') - # Override its parent class with the receiving hyperparams - super(AOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(AOA, self).__init__() # Minimum accelerated function self.a_min = 0.2 @@ -51,8 +47,8 @@ def __init__(self, algorithm='AOA', hyperparams=None): # Control parameter self.mu = 0.499 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -126,28 +122,27 @@ def mu(self, mu): self._mu = mu - def _update(self, agents, best_agent, iteration, n_iterations): - """Method that wraps Arithmetic Optimization Algorithm over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Arithmetic Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - iteration (int): Current iteration value. + space (Space): Space containing agents and update-related information. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Calculating math optimizer accelarated coefficient (Eq. 2) + # Calculates math optimizer accelarated coefficient (eq. 2) MOA = self.a_min + iteration * ((self.a_max - self.a_min) / n_iterations) - # Calculating math optimizer probability (Eq. 4) + # Calculates math optimizer probability (eq. 4) MOP = 1 - (iteration ** (1 / self.alpha) / n_iterations ** (1 / self.alpha)) # Iterates through all agents - for agent in agents: + for agent in space.agents: # Iterates through all variables for j in range(agent.n_variables): - # Generating random probability + # Generates random probability r1 = r.generate_uniform_random_number() # Calculates the search partition @@ -160,13 +155,13 @@ def _update(self, agents, best_agent, iteration, n_iterations): # If probability is bigger than 0.5 if r2 > 0.5: - # Updates position with (Eq. 3 - top) - agent.position[j] = best_agent.position[j] / (MOP + c.EPSILON) * search_partition + # Updates position with (eq. 3 - top) + agent.position[j] = space.best_agent.position[j] / (MOP + c.EPSILON) * search_partition # If probability is smaller than 0.5 else: - # Updates position with (Eq. 3 - bottom) - agent.position[j] = best_agent.position[j] * MOP * search_partition + # Updates position with (eq. 3 - bottom) + agent.position[j] = space.best_agent.position[j] * MOP * search_partition # If probability is smaller than MOA else: @@ -175,57 +170,10 @@ def _update(self, agents, best_agent, iteration, n_iterations): # If probability is bigger than 0.5 if r3 > 0.5: - # Updates position with (Eq. 5 - top) - agent.position[j] = best_agent.position[j] - MOP * search_partition + # Updates position with (eq. 5 - top) + agent.position[j] = space.best_agent.position[j] - MOP * search_partition # If probability is smaller than 0.5 else: - # Updates position with (Eq. 5 - bottom) - agent.position[j] = best_agent.position[j] + MOP * search_partition - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Updates position with (eq. 5 - bottom) + agent.position[j] = space.best_agent.position[j] + MOP * search_partition diff --git a/opytimizer/optimizers/misc/cem.py b/opytimizer/optimizers/misc/cem.py index 13ce0911..ca702d48 100644 --- a/opytimizer/optimizers/misc/cem.py +++ b/opytimizer/optimizers/misc/cem.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,17 +23,16 @@ class CEM(Optimizer): """ - def __init__(self, algorithm='CEM', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(CEM, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(CEM, self).__init__() # Amount of positions to employ in mean and std updates self.n_updates = 5 @@ -43,8 +40,8 @@ def __init__(self, algorithm='CEM', hyperparams=None): # Learning rate self.alpha = 0.7 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -82,36 +79,81 @@ def alpha(self, alpha): self._alpha = alpha - def _create_new_samples(self, agents, function, mean, std): + @property + def mean(self): + """np.array: Array of means. + + """ + + return self._mean + + @mean.setter + def mean(self, mean): + if not isinstance(mean, np.ndarray): + raise e.TypeError('`mean` should be a numpy array') + + self._mean = mean + + @property + def std(self): + """np.array: Array of standard deviations. + + """ + + return self._std + + @std.setter + def std(self, std): + if not isinstance(std, np.ndarray): + raise e.TypeError('`std` should be a numpy array') + + self._std = std + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of means and standard deviations + self.mean = np.zeros(space.n_variables) + self.std = np.zeros(space.n_variables) + + # Iterates through all decision variables + for j, (lb, ub) in enumerate(zip(space.lb, space.ub)): + # Calculates the initial mean and standard deviation + self.mean[j] = r.generate_uniform_random_number(lb, ub) + self.std[j] = ub - lb + + def _create_new_samples(self, agents, function): """Creates new agents based on current mean and standard deviation. Args: agents (list): List of agents. function (Function): A Function object that will be used as the objective function. - mean (np.array): An array of means. - std (np.array): An array of standard deviations. """ - # Iterate through all agents + # Iterates through all agents for agent in agents: # Iterate through all decision variables - for j, (m, s) in enumerate(zip(mean, std)): + for j, (m, s) in enumerate(zip(self.mean, self.std)): # For each decision variable, we generate gaussian numbers based on mean and std agent.position[j] = r.generate_gaussian_random_number(m, s, agent.n_dimensions) # Clips the agent limits - agent.clip_limits() + agent.clip_by_bound() # Calculates its new fitness agent.fit = function(agent.position) - def _update_mean(self, updates, mean): + def _update_mean(self, updates): """Calculates and updates mean. Args: updates (np.array): An array of updates' positions. - mean (np.array): An array of means. Returns: The new mean values. @@ -119,17 +161,15 @@ def _update_mean(self, updates, mean): """ # Calculates the new mean based on update formula - new_mean = self.alpha * mean + (1 - self.alpha) * np.mean(updates) + new_mean = self.alpha * self.mean + (1 - self.alpha) * np.mean(updates) return new_mean - def _update_std(self, updates, mean, std): + def _update_std(self, updates): """Calculates and updates standard deviation. Args: updates (np.array): An array of updates' positions. - mean (np.array): An array of means. - std (np.array): An array of standard deviations. Returns: The new standard deviation values. @@ -137,89 +177,28 @@ def _update_std(self, updates, mean, std): """ # Calculates the new standard deviation based on update formula - new_std = self.alpha * std + (1 - self.alpha) * np.sqrt(np.mean((updates - mean) ** 2)) + new_std = self.alpha * self.std + (1 - self.alpha) * np.sqrt(np.mean((updates - self.mean) ** 2)) return new_std - def _update(self, agents, function, mean, std): - """Method that wraps sampling, mean, and standard deviation updates over all agents and variables. + def update(self, space, function): + """Wraps Cross-Entropy Method over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - mean (np.array): An array of means. - std (np.array): An array of standard deviations. """ # Creates new agents based on current mean and standard deviation - self._create_new_samples(agents, function, mean, std) + self._create_new_samples(space.agents, function) # Sorts the agents - agents.sort(key=lambda x: x.fit) - - # Gathering the update positions - update_position = np.array([agent.position for agent in agents[:self.n_updates]]) - - # For every decision variable - for j in range(mean.shape[0]): - # Update its mean and standard deviation - mean[j] = self._update_mean(update_position[:, j, :], mean[j]) - std[j] = self._update_std(update_position[:, j, :], mean[j], std[j]) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instantiating an array of means and standard deviations - mean = np.zeros(space.n_variables) - std = np.zeros(space.n_variables) - - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(space.lb, space.ub)): - # Calculates the initial mean and standard deviation - mean[j] = r.generate_uniform_random_number(lb, ub) - std[j] = ub - lb - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, mean, std) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + space.agents.sort(key=lambda x: x.fit) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Gathers the update positions + update_position = np.array([agent.position for agent in space.agents[:self.n_updates]]) - return history + # Updates its mean and standard deviation + self.mean = self._update_mean(update_position) + self.std = self._update_std(update_position) diff --git a/opytimizer/optimizers/misc/doa.py b/opytimizer/optimizers/misc/doa.py index 3b9b2d57..963c1d84 100644 --- a/opytimizer/optimizers/misc/doa.py +++ b/opytimizer/optimizers/misc/doa.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as rnd import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -26,25 +24,24 @@ class DOA(Optimizer): """ - def __init__(self, algorithm='DOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> DOA.') - # Override its parent class with the receiving hyperparams - super(DOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(DOA, self).__init__() # Chaos multiplier self.r = 1.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -65,8 +62,34 @@ def r(self, r): self._r = r - def _chaotic_map(self, lb, ub): - """Calculates the chaotic maps (eq. 3). + @property + def chaotic_map(self): + """np.array: Array of chaotic maps. + + """ + + return self._chaotic_map + + @chaotic_map.setter + def chaotic_map(self, chaotic_map): + if not isinstance(chaotic_map, np.ndarray): + raise e.TypeError('`chaotic_map` should be a numpy array') + + self._chaotic_map = chaotic_map + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Array of chaotic maps + self.chaotic_map = np.zeros((space.n_agents, space.n_variables)) + + def _calculate_chaotic_map(self, lb, ub): + """Calculates the chaotic map (eq. 3). Args: lb (float): Lower bound value. @@ -85,81 +108,29 @@ def _chaotic_map(self, lb, ub): return c_map - def _update(self, agents, best_agent, chaotic_map): - """Method that wraps global and local pollination updates over all agents and variables. + def update(self, space): + """Wraps Darcy Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - chaotic_map (np.array): Array of current chaotic maps. + space (Space): Space containing agents and update-related information. """ # Iterates through all agents - for i, agent in enumerate(agents): + for i, agent in enumerate(space.agents): # Iterates through all decision variables for j, (lb, ub) in enumerate(zip(agent.lb, agent.ub)): # Generates a chaotic map - c_map = self._chaotic_map(lb, ub) + c_map = self._calculate_chaotic_map(lb, ub) # Updates the agent's position (eq. 6) - agent.position[j] += (2 * (best_agent.position[j] - agent.position[j]) / ( - c_map - chaotic_map[i][j])) * (ub - lb) / len(agents) + agent.position[j] += (2 * (space.best_agent.position[j] - agent.position[j]) / ( + c_map - self.chaotic_map[i][j])) * (ub - lb) / len(space.agents) # Updates current chaotic map with newer value - chaotic_map[i][j] = c_map + self.chaotic_map[i][j] = c_map # Checks if position has exceed the bounds if (agent.position[j] < lb) or (agent.position[j] > ub): # If yes, replace its value with the proposed equation (eq. 7) - agent.position[j] = best_agent.position[j] * c_map - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instantiates an array to hold the chaotic maps - chaotic_map = np.zeros((space.n_agents, space.n_variables)) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, chaotic_map) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position[j] = space.best_agent.position[j] * c_map diff --git a/opytimizer/optimizers/misc/gs.py b/opytimizer/optimizers/misc/gs.py index 3218578e..eead2c52 100644 --- a/opytimizer/optimizers/misc/gs.py +++ b/opytimizer/optimizers/misc/gs.py @@ -1,9 +1,6 @@ """Grid-Search. """ -from tqdm import tqdm - -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -17,64 +14,25 @@ class GS(Optimizer): variables and methods. References: - J. Bergstra and Y. Bengio. Random search for hyper-parameter optimization. - Journal of machine learning research (2012). + J. Bergstra and Y. Bengio. Random search for hyper-parameter optimization. + Journal of machine learning research (2012). """ - def __init__(self, algorithm='GS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> GS.') - # Override its parent class with the receiving hyperparams - super(GS, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GS, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline (p. 282-283). - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/misc/hc.py b/opytimizer/optimizers/misc/hc.py index 9fdb2fa4..68f706a8 100644 --- a/opytimizer/optimizers/misc/hc.py +++ b/opytimizer/optimizers/misc/hc.py @@ -1,11 +1,8 @@ """Hill-Climbing. """ -from tqdm import tqdm - import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -13,7 +10,7 @@ class HC(Optimizer): - """A HC class, inherited from Optimizer. + """An HC class, inherited from Optimizer. This is the designed class to define HC-related variables and methods. @@ -23,19 +20,18 @@ class HC(Optimizer): """ - def __init__(self, algorithm='HC', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> HC.') - # Override its parent class with the receiving hyperparams - super(HC, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(HC, self).__init__() # Mean of noise distribution self.r_mean = 0 @@ -43,8 +39,8 @@ def __init__(self, algorithm='HC', hyperparams=None): # Variance of noise distribution self.r_var = 0.1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -80,66 +76,19 @@ def r_var(self, r_var): self._r_var = r_var - def _update(self, agents): - """Method that wraps Hill Climbing over all agents and variables (p. 252). + def update(self, space): + """Wraps Hill Climbing over all agents and variables (p. 252). Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. """ - # Iterate through all agents - for agent in agents: + # Iterates through all agents + for agent in space.agents: # Creates a gaussian noise vector noise = r.generate_gaussian_random_number( self.r_mean, self.r_var, size=(agent.n_variables, agent.n_dimensions)) - # Updating agent's position + # Updates agent's position agent.position += noise - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/population/__init__.py b/opytimizer/optimizers/population/__init__.py index 92710de4..cf591dbe 100644 --- a/opytimizer/optimizers/population/__init__.py +++ b/opytimizer/optimizers/population/__init__.py @@ -1,3 +1,15 @@ """An evolutionary package for all common opytimizer modules. It contains implementations of population-based optimizers. """ + +from opytimizer.optimizers.population.aeo import AEO +from opytimizer.optimizers.population.ao import AO +from opytimizer.optimizers.population.coa import COA +from opytimizer.optimizers.population.epo import EPO +from opytimizer.optimizers.population.gco import GCO +from opytimizer.optimizers.population.gwo import GWO +from opytimizer.optimizers.population.hho import HHO +from opytimizer.optimizers.population.loa import LOA +from opytimizer.optimizers.population.lpoa import LPOA +from opytimizer.optimizers.population.ppa import PPA +from opytimizer.optimizers.population.pvs import PVS diff --git a/opytimizer/optimizers/population/aeo.py b/opytimizer/optimizers/population/aeo.py index 4b005b23..bcc59bde 100644 --- a/opytimizer/optimizers/population/aeo.py +++ b/opytimizer/optimizers/population/aeo.py @@ -4,10 +4,8 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,20 +25,19 @@ class AEO(Optimizer): """ - def __init__(self, algorithm='AEO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(AEO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(AEO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -50,7 +47,7 @@ def _production(self, agent, best_agent, iteration, n_iterations): Args: agent (Agent): Current agent. best_agent (Agent): Best agent. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. Returns: @@ -114,8 +111,7 @@ def _omnivore_consumption(self, agent, producer, consumer, C): r2 = r.generate_uniform_random_number() # Updates its position - a.position += C * r2 * (a.position - producer.position) + \ - (1 - r2) * (a.position - consumer.position) + a.position += C * r2 * (a.position - producer.position) + (1 - r2) * (a.position - consumer.position) return a @@ -141,22 +137,22 @@ def _carnivore_consumption(self, agent, consumer, C): return a def _update_composition(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps production and consumption updates over all + """Wraps production and consumption updates over all agents and variables (eq. 1-8). Args: agents (list): List of agents. best_agent (Agent): Global best agent. function (Function): A Function object that will be used as the objective function. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Sorting agents according to their energy + # Sorts agents according to their energy agents.sort(key=lambda x: x.fit, reverse=True) - # Iterate through all agents + # Iterates through all agents for i, agent in enumerate(agents): # If it is the first agent if i == 0: @@ -196,20 +192,20 @@ def _update_composition(self, agents, best_agent, function, iteration, n_iterati # It will surely be a carnivore a = self._carnivore_consumption(agent, agents[j], C) - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) def _update_decomposition(self, agents, best_agent, function): - """Method that wraps decomposition updates over all + """Wraps decomposition updates over all agents and variables (eq. 9). Args: @@ -219,7 +215,7 @@ def _update_decomposition(self, agents, best_agent, function): """ - # Iterate through all agents + # Iterates through all agents for agent in agents: # Makes a deep copy of current agent a = copy.deepcopy(agent) @@ -239,79 +235,31 @@ def _update_decomposition(self, agents, best_agent, function): # Updates the new agent position a.position = best_agent.position + D * (e * best_agent.position - _h * agent.position) - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps composition and decomposition. + def update(self, space, function, iteration, n_iterations): + """Wraps Artificial Ecosystem-based Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Updating agents within the composition step - self._update_composition(agents, best_agent, function, iteration, n_iterations) - - # Updating agents within the decomposition step - self._update_decomposition(agents, best_agent, function) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates agents within the composition step + self._update_composition(space.agents, space.best_agent, function, iteration, n_iterations) - return history + # Updates agents within the decomposition step + self._update_decomposition(space.agents, space.best_agent, function) diff --git a/opytimizer/optimizers/population/ao.py b/opytimizer/optimizers/population/ao.py index 8512c390..83fc5023 100644 --- a/opytimizer/optimizers/population/ao.py +++ b/opytimizer/optimizers/population/ao.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class AO(Optimizer): """ - def __init__(self, algorithm='AO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> AO.') - # Override its parent class with the receiving hyperparams - super(AO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(AO, self).__init__() # First exploitation adjustment coefficient self.alpha = 0.1 @@ -57,8 +54,8 @@ def __init__(self, algorithm='AO', hyperparams=None): # Angle regularizer self.w = 0.005 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -147,23 +144,22 @@ def w(self, w): self._w = w - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps Aquila Optimizer over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Aquila Optimizer over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - iteration (int): Current iteration value. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Calculates the mean position of space - average = np.mean([agent.position for agent in agents], axis=0) + average = np.mean([agent.position for agent in space.agents], axis=0) # Iterates through all agents - for agent in agents: + for agent in space.agents: # Makes a deepcopy of current agent a = copy.deepcopy(agent) @@ -177,33 +173,34 @@ def _update(self, agents, best_agent, function, iteration, n_iterations): # If random number is smaller or equal to 0.5 if r1 <= 0.5: - # Updates temporary agent's position (Eq. 3) - a.position = best_agent.position * (1 - (iteration / n_iterations)) + \ - (average - best_agent.position * r2) + # Updates temporary agent's position (eq. 3) + a.position = space.best_agent.position * (1 - (iteration / n_iterations)) + \ + (average - space.best_agent.position * r2) # If random number is bigger than 0.5 else: # Generates a Lévy distirbution and a random integer levy = d.generate_levy_distribution(size=(agent.n_variables, agent.n_dimensions)) - idx = r.generate_integer_random_number(high=len(agents)) + idx = r.generate_integer_random_number(high=len(space.agents)) # Creates an evenly-space array of `n_variables` # Also broadcasts it to correct `n_dimensions` size D = np.linspace(1, agent.n_variables, agent.n_variables) D = np.repeat(np.expand_dims(D, -1), agent.n_dimensions, axis=1) - # Calculates current cycle value (Eq. 10) + # Calculates current cycle value (eq. 10) cycle = self.n_cycles + self.U * D - # Calculates `theta` (Eq. 11) + # Calculates `theta` (eq. 11) theta = -self.w * D + (3 * np.pi) / 2 - # Calculates `x` and `y` positioning (Eq. 8 and 9) + # Calculates `x` and `y` positioning (eq. 8 and 9) x = cycle * np.sin(theta) y = cycle * np.cos(theta) - # Updates temporary agent's position (Eq. 5) - a.position = best_agent.position * levy + agents[idx].position + (y - x) * r2 + # Updates temporary agent's position (eq. 5) + a.position = space.best_agent.position * levy + \ + space.agents[idx].position + (y - x) * r2 # If current iteration is bigger than 2/3 of maximum iterations else: @@ -216,82 +213,34 @@ def _update(self, agents, best_agent, function, iteration, n_iterations): lb = np.expand_dims(agent.lb, -1) ub = np.expand_dims(agent.ub, -1) - # Updates temporary agent's position (Eq. 13) - a.position = (best_agent.position - average) * \ + # Updates temporary agent's position (eq. 13) + a.position = (space.best_agent.position - average) * \ self.alpha - r2 + ((ub - lb) * r2 + lb) * self.delta # If random number is bigger than 0.5 else: - # Calculates both motions (Eq. 16 and 17) + # Calculates both motions (eq. 16 and 17) G1 = 2 * r2 - 1 G2 = 2 * (1 - (iteration / n_iterations)) - # Calculates quality function (Eq. 15) + # Calculates quality function (eq. 15) QF = iteration ** (G1 / (1 - n_iterations) ** 2) # Generates a Lévy distribution levy = d.generate_levy_distribution(size=(agent.n_variables, agent.n_dimensions)) - # Updates temporary agent's position (Eq. 14) - a.position = QF * best_agent.position - \ + # Updates temporary agent's position (eq. 14) + a.position = QF * space.best_agent.position - \ (G1 * a.position * r2) - G2 * levy + r2 * G1 - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, - function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/population/coa.py b/opytimizer/optimizers/population/coa.py index 74b30f15..38edc1eb 100644 --- a/opytimizer/optimizers/population/coa.py +++ b/opytimizer/optimizers/population/coa.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,25 +25,24 @@ class COA(Optimizer): """ - def __init__(self, algorithm='COA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> COA.') - # Override its parent class with the receiving hyperparams - super(COA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(COA, self).__init__() # Number of packs self.n_p = 2 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -66,13 +63,40 @@ def n_p(self, n_p): self._n_p = n_p - def _get_agents_from_pack(self, agents, index, n_c): + @property + def n_c(self): + """int: Number of coyotes per pack. + + """ + + return self._n_c + + @n_c.setter + def n_c(self, n_c): + if not isinstance(n_c, int): + raise e.TypeError('`n_c` should be an integer') + if n_c <= 0: + raise e.ValueError('`n_c` should be > 0') + + self._n_c = n_c + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Calculates the number of coyotes per pack + self.n_c = space.n_agents // self.n_p + + def _get_agents_from_pack(self, agents, index): """Gets a set of agents from a specified pack. Args: agents (list): List of agents. index (int): Index of pack. - n_c (int): Number of agents per pack. Returns: A sorted list of agents that belongs to the specified pack. @@ -80,16 +104,15 @@ def _get_agents_from_pack(self, agents, index, n_c): """ # Defines the starting and ending points - start, end = index * n_c, (index + 1) * n_c + start, end = index * self.n_c, (index + 1) * self.n_c return sorted(agents[start:end], key=lambda x: x.fit) - def _transition_packs(self, agents, n_c): - """Transits coyotes between packs (Eq. 4). + def _transition_packs(self, agents): + """Transits coyotes between packs (eq. 4). Args: agents (list): List of agents. - n_c (int): Number of coyotes per pack. """ @@ -106,38 +129,35 @@ def _transition_packs(self, agents, n_c): p2 = r.generate_integer_random_number(high=self.n_p) # Gathers two random coyotes - c1 = r.generate_integer_random_number(high=n_c) - c2 = r.generate_integer_random_number(high=n_c) + c1 = r.generate_integer_random_number(high=self.n_c) + c2 = r.generate_integer_random_number(high=self.n_c) # Calculates their indexes - i = n_c * p1 + c1 - j = n_c * p2 + c2 + i = self.n_c * p1 + c1 + j = self.n_c * p2 + c2 # Performs a swap betweeh them - agents[i], agents[j] = copy.deepcopy( - agents[j]), copy.deepcopy(agents[i]) + agents[i], agents[j] = copy.deepcopy(agents[j]), copy.deepcopy(agents[i]) - def _update(self, agents, function, n_c): - """Method that wraps Coyote Optimization Algorithm over all agents and variables. + def update(self, space, function): + """Wraps Coyote Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - n_c (int): Number of agents per pack. """ # Iterates through all packs for i in range(self.n_p): # Gets the agents for the specified pack - pack_agents = self._get_agents_from_pack(agents, i, n_c) + pack_agents = self._get_agents_from_pack(space.agents, i) - # Gathers the alpha coyote (Eq. 5) + # Gathers the alpha coyote (eq. 5) alpha = pack_agents[0] - # Computes the cultural tendency (Eq. 6) - tendency = np.median( - np.array([agent.position for agent in pack_agents]), axis=0) + # Computes the cultural tendency (eq. 6) + tendency = np.median(np.array([agent.position for agent in pack_agents]), axis=0) # Iterates through all coyotes in the pack for agent in pack_agents: @@ -156,76 +176,20 @@ def _update(self, agents, function, n_c): r1 = r.generate_uniform_random_number() r2 = r.generate_uniform_random_number() - # Updates the social condition (Eq. 12) + # Updates the social condition (eq. 12) a.position += r1 * lambda_1 + r2 * lambda_2 # Checks the agent's limits - a.clip_limits() + a.clip_by_bound() - # Evaluates the agent (Eq. 13) + # Evaluates the agent (eq. 13) a.fit = function(a.position) - # If the new potision is better than current agent's position (Eq. 14) + # If the new potision is better than current agent's position (eq. 14) if a.fit < agent.fit: # Replaces the current agent's position and fitness agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - # Performs transition between packs (Eq. 4) - self._transition_packs(agents, n_c) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Calculates the number of coyotes per pack - n_c = space.n_agents // self.n_p - - # If number of coyotes per pack equals to zero - if n_c == 0: - # Throws an error - raise e.ValueError( - 'Number of agents should be divisible by number of packs') - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, n_c) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Performs transition between packs (eq. 4) + self._transition_packs(space.agents) diff --git a/opytimizer/optimizers/population/epo.py b/opytimizer/optimizers/population/epo.py index 8f168b4f..78a4d432 100644 --- a/opytimizer/optimizers/population/epo.py +++ b/opytimizer/optimizers/population/epo.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as log from opytimizer.core.optimizer import Optimizer @@ -25,19 +23,18 @@ class EPO(Optimizer): """ - def __init__(self, algorithm='EPO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> EPO.') - # Override its parent class with the receiving hyperparams - super(EPO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(EPO, self).__init__() # Exploration control parameter self.f = 2.0 @@ -45,8 +42,8 @@ def __init__(self, algorithm='EPO', hyperparams=None): # Exploitation control parameter self.l = 1.5 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -80,18 +77,18 @@ def l(self, l): self._l = l - def _update(self, agents, best_agent, iteration, n_iterations): - """Method that wraps the Emperor Penguin Optimization over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Emperor Penguin Optimization over all agents and variables. + Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Iterates through every agent - for agent in agents: + for agent in space.agents: # Generates a radius constant R = r.generate_uniform_random_number() @@ -105,71 +102,24 @@ def _update(self, agents, best_agent, iteration, n_iterations): # Defines temperature as one T = 1 - # Calculates the temperature profile (Eq. 7) + # Calculates the temperature profile (eq. 7) T_p = T - n_iterations / (iteration - n_iterations) - # Calculates the polygon grid accuracy (Eq. 10) - P_grid = np.fabs(best_agent.position - agent.position) + # Calculates the polygon grid accuracy (eq. 10) + P_grid = np.fabs(space.best_agent.position - agent.position) # Generates a uniform random number and the `C` coefficient r1 = r.generate_uniform_random_number() C = r.generate_uniform_random_number(size=agent.n_variables) - # Calculates the avoidance coefficient (Eq. 9) + # Calculates the avoidance coefficient (eq. 9) A = 2 * (T_p + P_grid) * r1 - T_p - # Calculates the social forces of emperor penguin (Eq. 12) + # Calculates the social forces of emperor penguin (eq. 12) S = (np.fabs(self.f * np.exp(-iteration / self.l) - np.exp(-iteration))) ** 2 - # Calculates the distance between current agent and emperor penguin (Eq. 8) - D_ep = np.fabs(S * best_agent.position - C * agent.position) - - # Updates current agent's position (Eq. 13) - agent.position = best_agent.position - A * D_ep - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Calculates the distance between current agent and emperor penguin (eq. 8) + D_ep = np.fabs(S * space.best_agent.position - C * agent.position) - return history + # Updates current agent's position (eq. 13) + agent.position = space.best_agent.position - A * D_ep diff --git a/opytimizer/optimizers/population/gco.py b/opytimizer/optimizers/population/gco.py index c1e2f649..0cd3b85e 100644 --- a/opytimizer/optimizers/population/gco.py +++ b/opytimizer/optimizers/population/gco.py @@ -4,13 +4,11 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,17 +27,16 @@ class GCO(Optimizer): """ - def __init__(self, algorithm='GCO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(GCO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GCO, self).__init__() # Cross-ratio self.CR = 0.7 @@ -47,8 +44,8 @@ def __init__(self, algorithm='GCO', hyperparams=None): # Mutation factor self.F = 1.25 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -86,6 +83,48 @@ def F(self, F): self._F = F + @property + def life(self): + """np.array: Array of lives. + + """ + + return self._life + + @life.setter + def life(self, life): + if not isinstance(life, np.ndarray): + raise e.TypeError('`life` should be a numpy array') + + self._life = life + + @property + def counter(self): + """np.array: Array of counters. + + """ + + return self._counter + + @counter.setter + def counter(self, counter): + if not isinstance(counter, np.ndarray): + raise e.TypeError('`counter` should be a numpy array') + + self._counter = counter + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Array of lives and counters + self.life = r.generate_uniform_random_number(70, 70, space.n_agents) + self.counter = np.ones(space.n_agents) + def _mutate_cell(self, agent, alpha, beta, gamma): """Mutates a new cell based on distinct cells (alg. 2). @@ -115,59 +154,56 @@ def _mutate_cell(self, agent, alpha, beta, gamma): return a - def _dark_zone(self, agents, function, life, counter): + def _dark_zone(self, agents, function): """Performs the dark-zone update process (alg. 1). Args: agents (list): List of agents. function (Function): A Function object that will be used as the objective function. - life (np.array): An array holding each cell's current life. - counter (np.array): An array holding each cell's copy counter. """ - # Iterate through all agents + # Iterates through all agents for i, agent in enumerate(agents): # Generates the first random number, between 0 and 100 r1 = r.generate_uniform_random_number(0, 100) # If random number is smaller than cell's life - if r1 < life[i]: + if r1 < self.life[i]: # Increases it counter by one - counter[i] += 1 + self.counter[i] += 1 # If it is not smaller else: # Resets the counter to one - counter[i] = 1 + self.counter[i] = 1 # Generates the counting distribution and pick three cells - C = d.generate_choice_distribution(len(agents), counter / np.sum(counter), size=3) + C = d.generate_choice_distribution(len(agents), self.counter / np.sum(self.counter), size=3) # Mutates a new cell based on current and pre-picked cells a = self._mutate_cell(agent, agents[C[0]], agents[C[1]], agents[C[2]]) - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) # Increases the life of cell by ten - life[i] += 10 + self.life[i] += 10 - def _light_zone(self, agents, life): + def _light_zone(self, agents): """Performs the light-zone update process (alg. 1). Args: agents (list): List of agents. - life (np.array): An array holding each cell's current life. """ @@ -177,83 +213,28 @@ def _light_zone(self, agents, life): # Calculates the minimum and maximum fitness min_fit, max_fit = np.min(fits), np.max(fits) - # Iterate through all agents + # Iterates through all agents for i, agent in enumerate(agents): # Resets the cell life to 10 - life[i] = 10 + self.life[i] = 10 # Calculates the current cell new life fitness life_fit = (agent.fit - max_fit) / (min_fit - max_fit + c.EPSILON) # Adds 10 * new life fitness to cell's life - life[i] += 10 * life_fit + self.life[i] += 10 * life_fit - def _update(self, agents, function, life, counter): - """Method that wraps dark- and light-zone updates over all agents and variables. + def update(self, space, function): + """Wraps Germinal Center Optimization over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - life (np.array): An array holding each cell's current life. - counter (np.array): An array holding each cell's copy counter. """ # Performs the dark-zone update process - self._dark_zone(agents, function, life, counter) + self._dark_zone(space.agents, function) # Performs the light-zone update process - self._light_zone(agents, life) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instanciating array of lives - life = r.generate_uniform_random_number(70, 70, space.n_agents) - - # Instanciating array of counters - counter = np.ones(space.n_agents) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, life, counter) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + self._light_zone(space.agents) diff --git a/opytimizer/optimizers/population/gwo.py b/opytimizer/optimizers/population/gwo.py index c6935f80..67f80a96 100644 --- a/opytimizer/optimizers/population/gwo.py +++ b/opytimizer/optimizers/population/gwo.py @@ -4,10 +4,8 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -26,22 +24,21 @@ class GWO(Optimizer): """ - def __init__(self, algorithm='GWO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> GWO.') - # Override its parent class with the receiving hyperparams - super(GWO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GWO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -60,36 +57,36 @@ def _calculate_coefficients(self, a): r1 = r.generate_uniform_random_number() r2 = r.generate_uniform_random_number() - # Calculates the `A` coefficient (Eq. 3.3) + # Calculates the `A` coefficient (eq. 3.3) A = 2 * a * r1 - a - # Calculates the `C` coefficient (Eq. 3.4) + # Calculates the `C` coefficient (eq. 3.4) C = 2 * r2 return A, C - def _update(self, agents, function, iteration, n_iterations): - """Method that wraps the Grey Wolf Optimization over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Grey Wolf Optimization over all agents and variables. Args: - agents (list): List of agents. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Gathers the best three wolves - alpha, beta, delta = copy.deepcopy(agents[:3]) + alpha, beta, delta = copy.deepcopy(space.agents[:3]) # Defines the linear constant a = 2 - 2 * iteration / (n_iterations - 1) # Iterates through all agents - for agent in agents: + for agent in space.agents: # Makes a deepcopy of current agent X = copy.deepcopy(agent) @@ -103,11 +100,11 @@ def _update(self, agents, function, iteration, n_iterations): X_2 = beta.position - A_2 * np.fabs(C_2 * beta.position - agent.position) X_3 = delta.position - A_3 * np.fabs(C_3 * delta.position - agent.position) - # Calculates the temporary agent (Eq. 3.7) + # Calculates the temporary agent (eq. 3.7) X.position = (X_1 + X_2 + X_3) / 3 # Clips temporary agent's limits - X.clip_limits() + X.clip_by_bound() # Evaluates temporary agent's new position X.fit = function(X.position) @@ -117,50 +114,3 @@ def _update(self, agents, function, iteration, n_iterations): # Updates the corresponding agent's position and fitness agent.position = copy.deepcopy(X.position) agent.fit = copy.deepcopy(X.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/population/hho.py b/opytimizer/optimizers/population/hho.py index 5dd8a453..9d72fca2 100644 --- a/opytimizer/optimizers/population/hho.py +++ b/opytimizer/optimizers/population/hho.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.random as r -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,22 +23,21 @@ class HHO(Optimizer): """ - def __init__(self, algorithm='HHO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> HHO.') - # Override its parent class with the receiving hyperparams - super(HHO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(HHO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -65,7 +62,7 @@ def _calculate_initial_coefficients(self, iteration, n_iterations): # Calculates the jump strength J = 2 * (1 - r1) - # Calculates the energy (Eq. 3) + # Calculates the energy (eq. 3) E = 2 * E_0 * (1 - (iteration / n_iterations)) return np.fabs(E), J @@ -95,7 +92,7 @@ def _exploration_phase(self, agents, current_agent, best_agent): r1 = r.generate_uniform_random_number() r2 = r.generate_uniform_random_number() - # Updates the location vector (Eq. 1 - part 1) + # Updates the location vector (eq. 1 - part 1) location_vector = agents[j].position - r1 * \ np.fabs(agents[j].position - 2 * r2 * current_agent.position) @@ -112,7 +109,7 @@ def _exploration_phase(self, agents, current_agent, best_agent): lb = np.expand_dims(current_agent.lb, -1) ub = np.expand_dims(current_agent.ub, -1) - # Updates the location vector (Eq. 1 - part 2) + # Updates the location vector (eq. 1 - part 2) location_vector = (best_agent.position - average) - r3 * (lb + r4 * (ub - lb)) return location_vector @@ -136,153 +133,105 @@ def _exploitation_phase(self, energy, jump, agents, current_agent, best_agent, f # Generates a uniform random number w = r.generate_uniform_random_number() - # Soft besiege - if w >= 0.5 and energy >= 0.5: - # Calculates the delta's position - delta = best_agent.position - current_agent.position + # Without rapid dives + if w >= 0.5: + # Soft besiege + if energy >= 0.5: + # Calculates the delta's position + delta = best_agent.position - current_agent.position - # Calculates the location vector (Eq. 4) - location_vector = delta - energy * \ - np.fabs(jump * best_agent.position - current_agent.position) + # Calculates the location vector (eq. 4) + location_vector = delta - energy * np.fabs(jump * best_agent.position - current_agent.position) - return location_vector + return location_vector - # Hard besiege - if w >= 0.5 and energy < 0.5: - # Calculates the delta's position - delta = best_agent.position - current_agent.position + # Hard besiege + else: + # Calculates the delta's position + delta = best_agent.position - current_agent.position - # Calculates the location vector (Eq. 6) - location_vector = best_agent.position - energy * np.fabs(delta) + # Calculates the location vector (eq. 6) + location_vector = best_agent.position - energy * np.fabs(delta) - return location_vector + return location_vector - # Soft besiege with rapid dives - if w < 0.5 and energy >= 0.5: - # Calculates the `Y` position (Eq. 7) - Y = best_agent.position - energy * \ - np.fabs(jump * best_agent.position - current_agent.position) + # With rapid dives + # Soft besiege + if energy >= 0.5: + # Calculates the `Y` position (eq. 7) + Y = best_agent.position - energy * np.fabs(jump * best_agent.position - current_agent.position) - # Generates the Lévy's flight and random array (Eq. 9) + # Generates the Lévy's flight and random array (eq. 9) LF = d.generate_levy_distribution(1.5, (current_agent.n_variables, current_agent.n_dimensions)) S = r.generate_uniform_random_number(size=(current_agent.n_variables, current_agent.n_dimensions)) - # Calculates the `Z` position (Eq. 8) + # Calculates the `Z` position (eq. 8) Z = Y + S * LF # Evaluates new positions Y_fit = function(Y) Z_fit = function(Z) - # If `Y` position is better than current agent's one (Eq. 10 - part 1) + # If `Y` position is better than current agent's one (eq. 10 - part 1) if Y_fit < current_agent.fit: return Y - # If `Z` position is better than current agent's one (Eq. 10 - part 2) + # If `Z` position is better than current agent's one (eq. 10 - part 2) if Z_fit < current_agent.fit: return Z - # Hard besiege with rapid dives + # Hard besiege else: # Averages the population's position average = np.mean([x.position for x in agents], axis=0) - # Calculates the `Y` position (Eq. 12) - Y = best_agent.position - energy * \ - np.fabs(jump * best_agent.position - average) + # Calculates the `Y` position (eq. 12) + Y = best_agent.position - energy * np.fabs(jump * best_agent.position - average) - # Generates the Lévy's flight and random array (Eq. 9) + # Generates the Lévy's flight and random array (eq. 9) LF = d.generate_levy_distribution(1.5, (current_agent.n_variables, current_agent.n_dimensions)) S = r.generate_uniform_random_number(size=(current_agent.n_variables, current_agent.n_dimensions)) - # Calculates the `Z` position (Eq. 13) + # Calculates the `Z` position (eq. 13) Z = Y + S * LF # Evaluates new positions Y_fit = function(Y) Z_fit = function(Z) - # If `Y` position is better than current agent's one (Eq. 11 - part 1) + # If `Y` position is better than current agent's one (eq. 11 - part 1) if Y_fit < current_agent.fit: return Y - # If `Z` position is better than current agent's one (Eq. 11 - part 2) + # If `Z` position is better than current agent's one (eq. 11 - part 2) if Z_fit < current_agent.fit: return Z return current_agent.position - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps the Harris Hawks Optimization over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Harris Hawks Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Iterates through all agents - for agent in agents: + for agent in space.agents: # Calculates the prey's energy and jump's stength E, J = self._calculate_initial_coefficients(iteration, n_iterations) # Checks if energy is bigger or equal to one if E >= 1: # Performs the exploration phase - agent.position = self._exploration_phase(agents, agent, best_agent) + agent.position = self._exploration_phase(space.agents, agent, space.best_agent) # If energy is smaller than one else: # Performs the exploitation phase - agent.position = self._exploitation_phase(E, J, agents, agent, best_agent, function) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, - function, t, space.n_iterations) - - # Checking if agents meets the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = self._exploitation_phase(E, J, space.agents, agent, + space.best_agent, function) diff --git a/opytimizer/optimizers/population/loa.py b/opytimizer/optimizers/population/loa.py index f572f395..cc0428d6 100644 --- a/opytimizer/optimizers/population/loa.py +++ b/opytimizer/optimizers/population/loa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class LOA(Optimizer): """ - def __init__(self, algorithm='LOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> LOA.') - # Override its parent class with the receiving hyperparams - super(LOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(LOA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/population/lpoa.py b/opytimizer/optimizers/population/lpoa.py index 731e04a6..4a6c3410 100644 --- a/opytimizer/optimizers/population/lpoa.py +++ b/opytimizer/optimizers/population/lpoa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -29,21 +29,20 @@ class LPOA(Optimizer): """ - def __init__(self, algorithm='LPOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> LPOA.') - # Override its parent class with the receiving hyperparams - super(LPOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(LPOA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/population/ppa.py b/opytimizer/optimizers/population/ppa.py index 726b8c8e..7f40310b 100644 --- a/opytimizer/optimizers/population/ppa.py +++ b/opytimizer/optimizers/population/ppa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class PPA(Optimizer): """ - def __init__(self, algorithm='PPA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> PPA.') - # Override its parent class with the receiving hyperparams - super(PPA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(PPA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/population/pvs.py b/opytimizer/optimizers/population/pvs.py index ee9c374c..f9b88296 100644 --- a/opytimizer/optimizers/population/pvs.py +++ b/opytimizer/optimizers/population/pvs.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class PVS(Optimizer): """ - def __init__(self, algorithm='PVS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> PVS.') - # Override its parent class with the receiving hyperparams - super(PVS, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(PVS, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/science/__init__.py b/opytimizer/optimizers/science/__init__.py index 51afb9e9..d3eb628c 100644 --- a/opytimizer/optimizers/science/__init__.py +++ b/opytimizer/optimizers/science/__init__.py @@ -1,3 +1,21 @@ """An evolutionary package for all common opytimizer modules. It contains implementations of science-based optimizers. """ + +from opytimizer.optimizers.science.aig import AIG +from opytimizer.optimizers.science.aso import ASO +from opytimizer.optimizers.science.bh import BH +from opytimizer.optimizers.science.efo import EFO +from opytimizer.optimizers.science.eo import EO +from opytimizer.optimizers.science.esa import ESA +from opytimizer.optimizers.science.gsa import GSA +from opytimizer.optimizers.science.hgso import HGSO +from opytimizer.optimizers.science.lsa import LSA +from opytimizer.optimizers.science.moa import MOA +from opytimizer.optimizers.science.mvo import MVO +from opytimizer.optimizers.science.sa import SA +from opytimizer.optimizers.science.two import TWO +from opytimizer.optimizers.science.wca import WCA +from opytimizer.optimizers.science.wdo import WDO +from opytimizer.optimizers.science.weo import WEO +from opytimizer.optimizers.science.wwo import WWO diff --git a/opytimizer/optimizers/science/aig.py b/opytimizer/optimizers/science/aig.py index 5ef2a5ae..147449ec 100644 --- a/opytimizer/optimizers/science/aig.py +++ b/opytimizer/optimizers/science/aig.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -29,21 +29,20 @@ class AIG(Optimizer): """ - def __init__(self, algorithm='AIG', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> AIG.') - # Override its parent class with the receiving hyperparams - super(AIG, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(AIG, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/science/aso.py b/opytimizer/optimizers/science/aso.py index 50c80883..1acc6548 100644 --- a/opytimizer/optimizers/science/aso.py +++ b/opytimizer/optimizers/science/aso.py @@ -2,12 +2,10 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class ASO(Optimizer): """ - def __init__(self, algorithm='ASO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> ASO.') - # Override its parent class with the receiving hyperparams - super(ASO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(ASO, self).__init__() # Depth weight self.alpha = 50.0 @@ -47,8 +44,8 @@ def __init__(self, algorithm='ASO', hyperparams=None): # Multiplier weight self.beta = 0.2 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -84,8 +81,34 @@ def beta(self, beta): self._beta = beta + @property + def velocity(self): + """np.array: Array of velocities. + + """ + + return self._velocity + + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') + + self._velocity = velocity + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of velocities + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + def _calculate_mass(self, agents): - """Calculates the atoms' masses (Eq. 17 and 18). + """Calculates the atoms' masses (eq. 17 and 18). Args: agents (list): List of agents. @@ -106,7 +129,8 @@ def _calculate_mass(self, agents): total_fit = np.sum([np.exp(-(agent.fit - best) / (worst - best + c.EPSILON)) for agent in agents]) # Calculates the masses - mass = [np.exp(-(agent.fit - best) / (worst - best + c.EPSILON)) / total_fit for agent in agents] + mass = [np.exp(-(agent.fit - best) / (worst - best + + c.EPSILON)) / total_fit for agent in agents] return mass @@ -117,7 +141,7 @@ def _calculate_potential(self, agent, K_agent, average, iteration, n_iterations) agent (Agent): Agent to have its potential calculated. K_agent (Agent): Neighbour agent. average (np.array): Array of average positions. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ @@ -166,7 +190,7 @@ def _calculate_acceleration(self, agents, best_agent, mass, iteration, n_iterati agents (list): List of agents. best_agent (Agent): Global best agent. mass (np.array): Array of masses. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. Returns: @@ -199,121 +223,34 @@ def _calculate_acceleration(self, agents, best_agent, mass, iteration, n_iterati # Sums up the current potential to the total one total_potential += self._calculate_potential(agent, K_agent, average, iteration, n_iterations) - # Finally, calculates the acceleration (Eq. 16) + # Finally, calculates the acceleration (eq. 16) acceleration[i] = G * self.alpha * total_potential + \ self.beta * (best_agent.position - agent.position) / mass[i] return acceleration - def _update_velocity(self, velocity, acceleration): - """Updates an atom's velocity (Eq. 21). - - Args: - velocity (np.array): Agent's velocity. - acceleration (np.array): Agent's acceleration. - - Returns: - An updated velocity. - - """ - - # Generates a uniform random number - r1 = r.generate_uniform_random_number() - - # Calculates the new velocity - new_velocity = r1 * velocity + acceleration - - return new_velocity - - def _update_position(self, position, velocity): - """Updates an atom's position (Eq. 22). - - Args: - position (np.array): Agent's position. - velocity (np.array): Agent's velocity. - - Returns: - An updated position. - - """ - - # Calculates the new position - new_position = position + velocity - - return new_position - - def _update(self, agents, best_agent, velocity, iteration, n_iterations): - """Method that wraps the Atom Search Optimization over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Atom Search Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - velocity (np.array): Array of velocities. - iteration (int): Number of current iteration. + space (Space): Space containing agents and update-related information. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Calculates the masses (Eq. 17 and 18) - mass = self._calculate_mass(agents) + # Calculates the masses (eq. 17 and 18) + mass = self._calculate_mass(space.agents) - # Calculates the acceleration (Eq. 16) - acceleration = self._calculate_acceleration(agents, best_agent, mass, iteration, n_iterations) + # Calculates the acceleration (eq. 16) + acceleration = self._calculate_acceleration(space.agents, space.best_agent, mass, + iteration, n_iterations) # Iterates through all agents - for i, agent in enumerate(agents): - # Updates current agent's velocity (Eq. 21) - velocity[i] = self._update_velocity(velocity[i], acceleration[i]) - - # Updates current agent's position (Eq. 22) - agent.position = self._update_position(agent.position, velocity[i]) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instantiates an array of velocities - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, velocity, t, space.n_iterations) - - # Checking if agents meets the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + for i, agent in enumerate(space.agents): + # Updates current agent's velocity (eq. 21) + r1 = r.generate_uniform_random_number() + self.velocity[i] = r1 * self.velocity[i] + acceleration[i] - return history + # Updates current agent's position (eq. 22) + agent.position += self.velocity[i] diff --git a/opytimizer/optimizers/science/bh.py b/opytimizer/optimizers/science/bh.py index bcd877d1..d1e45fcd 100644 --- a/opytimizer/optimizers/science/bh.py +++ b/opytimizer/optimizers/science/bh.py @@ -2,13 +2,11 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer -from opytimizer.utils import constants +from opytimizer.utils import constant logger = l.get_logger(__name__) @@ -25,22 +23,21 @@ class BH(Optimizer): """ - def __init__(self, algorithm='BH', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BH.') - # Override its parent class with the receiving hyperparams - super(BH, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(BH, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -60,7 +57,7 @@ def _update_position(self, agents, best_agent, function): # Event's horizon cost cost = 0 - # Iterate through all agents + # Iterates through all agents for agent in agents: # Generate an uniform random number r1 = r.generate_uniform_random_number() @@ -68,8 +65,8 @@ def _update_position(self, agents, best_agent, function): # Updates agent's position agent.position += r1 * (best_agent.position - agent.position) - # Checking agents limits - agent.clip_limits() + # Checks agents limits + agent.clip_by_bound() # Evaluates agent agent.fit = function(agent.position) @@ -96,7 +93,7 @@ def _event_horizon(self, agents, best_agent, cost): """ # Calculates the radius of the event horizon - radius = best_agent.fit / max(cost, constants.EPSILON) + radius = best_agent.fit / max(cost, constant.EPSILON) # Iterate through every agent for agent in agents: @@ -105,70 +102,20 @@ def _event_horizon(self, agents, best_agent, cost): # If distance is smaller than horizon's radius if distance < radius: - # Generates a new random star - for j, (lb, ub) in enumerate(zip(agent.lb, agent.ub)): - # For each decision variable, we generate uniform random numbers - agent.position[j] = r.generate_uniform_random_number(lb, ub, size=agent.n_dimensions) + # Fills agent with new random positions + agent.fill_with_uniform() - def _update(self, agents, best_agent, function): - """Method that wraps the update pipeline over all agents and variables. + def update(self, space, function): + """Wraps Black Hole over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. """ # Updates stars position and calculate their cost (eq. 3) - cost = self._update_position(agents, best_agent, function) + cost = self._update_position(space.agents, space.best_agent, function) # Performs the Event Horizon (eq. 4) - self._event_horizon(agents, best_agent, cost) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + self._event_horizon(space.agents, space.best_agent, cost) diff --git a/opytimizer/optimizers/science/efo.py b/opytimizer/optimizers/science/efo.py index 8f9c0726..eac25a04 100644 --- a/opytimizer/optimizers/science/efo.py +++ b/opytimizer/optimizers/science/efo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,17 +26,16 @@ class EFO(Optimizer): """ - def __init__(self, algorithm='EFO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(EFO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(EFO, self).__init__() # Positive field proportion self.positive_field = 0.1 @@ -52,8 +49,14 @@ def __init__(self, algorithm='EFO', hyperparams=None): # Probability of selecting a random eletromagnet self.r_ratio = 0.4 - # Now, we need to build this class up - self._build(hyperparams) + # Golden ratio + self.phi = (1 + np.sqrt(5)) / 2 + + # Eletromagnetic index + self.RI = 0 + + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -128,6 +131,38 @@ def r_ratio(self, r_ratio): self._r_ratio = r_ratio + @property + def phi(self): + """float: Golden ratio. + + """ + + return self._phi + + @phi.setter + def phi(self, phi): + if not isinstance(phi, (float, int)): + raise e.TypeError('`phi` should be a float or integer') + + self._phi = phi + + @property + def RI(self): + """float: Eletromagnetic index. + + """ + + return self._RI + + @RI.setter + def RI(self, RI): + if not isinstance(RI, int): + raise e.TypeError('`RI` should be an integer') + if RI < 0: + raise e.TypeError('`RI` should be >= 0') + + self._RI = RI + def _calculate_indexes(self, n_agents): """Calculates the indexes of positive, negative and neutral particles. @@ -140,10 +175,12 @@ def _calculate_indexes(self, n_agents): """ # Calculates a positive particle's index - positive_index = int(r.generate_uniform_random_number(0, n_agents * self.positive_field)) + positive_index = int(r.generate_uniform_random_number( + 0, n_agents * self.positive_field)) # Calculates a negative particle's index - negative_index = int(r.generate_uniform_random_number(n_agents * (1 - self.negative_field), n_agents)) + negative_index = int(r.generate_uniform_random_number( + n_agents * (1 - self.negative_field), n_agents)) # Calculates a neutral particle's index neutral_index = int(r.generate_uniform_random_number( @@ -151,25 +188,23 @@ def _calculate_indexes(self, n_agents): return positive_index, negative_index, neutral_index - def _update(self, agents, function, phi, RI): - """Method that wraps updates over all agents and variables (eq. 1-4). + def update(self, space, function): + """Wraps Electromagnetic Field Optimization over all agents and variables (eq. 1-4). Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - phi (float): Golden ratio constant. - RI (int): Index of particle's eletromagnet. """ # Sorts agents according to their fitness - agents.sort(key=lambda x: x.fit) + space.agents.sort(key=lambda x: x.fit) # Gathers the number of total agents - n_agents = len(agents) + n_agents = len(space.agents) # Making a deepcopy of current's best agent - agent = copy.deepcopy(agents[0]) + agent = copy.deepcopy(space.agents[0]) # Generates a uniform random number for the force force = r.generate_uniform_random_number() @@ -185,16 +220,17 @@ def _update(self, agents, function, phi, RI): # If random number is smaller than the probability of selecting eletromagnets if r1 < self.ps_ratio: # Applies agent's position as positive particle's position - agent.position[j] = agents[pos].position[j] + agent.position[j] = space.agents[pos].position[j] # If random number is bigger else: # Calculates the new agent's position - agent.position[j] = agents[neg].position[j] + phi * force * ( - agents[pos].position[j] - agents[neu].position[j]) - force * (agents[neg].position[j] - agents[neu].position[j]) + agent.position[j] = space.agents[neg].position[j] + self.phi * force * \ + (space.agents[pos].position[j] - space.agents[neu].position[j]) \ + - force * (space.agents[neg].position[j] - space.agents[neu].position[j]) # Clips the agent's position to its limits - agent.clip_limits() + agent.clip_by_bound() # Generates a third uniform random number r2 = r.generate_uniform_random_number() @@ -202,75 +238,20 @@ def _update(self, agents, function, phi, RI): # If random number is smaller than probability of changing a random eletromagnet if r2 < self.r_ratio: # Update agent's position based on RI - agent.position[RI] = r.generate_uniform_random_number(agent.lb[RI], agent.ub[RI]) + agent.position[self.RI] = r.generate_uniform_random_number(agent.lb[self.RI], agent.ub[self.RI]) # Increases RI by one - RI += 1 + self.RI += 1 # If RI exceeds the number of variables - if RI >= agent.n_variables: + if self.RI >= agent.n_variables: # Resets it to one - RI = 1 + self.RI = 1 # Calculates the agent's fitness agent.fit = function(agent.position) # If newly generated agent fitness is better than worst fitness - if agent.fit < agents[-1].fit: + if agent.fit < space.agents[-1].fit: # Updates the corresponding agent's object - agents[-1] = copy.deepcopy(agent) - - return RI - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Defines the golden ratio - phi = (1 + np.sqrt(5)) / 2 - - # Defines the eletromagnetic index - RI = 0 - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - RI = self._update(space.agents, function, phi, RI) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + space.agents[-1] = copy.deepcopy(agent) diff --git a/opytimizer/optimizers/science/eo.py b/opytimizer/optimizers/science/eo.py index 9c9d6011..7bba401f 100644 --- a/opytimizer/optimizers/science/eo.py +++ b/opytimizer/optimizers/science/eo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as rnd import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class EO(Optimizer): """ - def __init__(self, algorithm='EO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> EO.') - # Override its parent class with the receiving hyperparams - super(EO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(EO, self).__init__() # Exploration constant self.a1 = 2.0 @@ -53,8 +50,8 @@ def __init__(self, algorithm='EO', hyperparams=None): # Velocity self.V = 1.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -126,48 +123,67 @@ def V(self, V): self._V = V - def _calculate_equilibrium(self, agents, C): + @property + def C(self): + """list: Concentrations (agents). + + """ + + return self._C + + @C.setter + def C(self, C): + if not isinstance(C, list): + raise e.TypeError('`C` should be a list') + + self._C = C + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # List of concentrations (agents) + self.C = [copy.deepcopy(space.agents[0]) for _ in range(4)] + + def _calculate_equilibrium(self, agents): """Calculates the equilibrium concentrations. Args: agents (list): List of agents. - C (list): List of concentrations to be updated. - - Returns: - List of equilibrium concentrations. """ # Iterates through all agents for agent in agents: # If current agent's fitness is smaller than C0 - if agent.fit < C[0].fit: + if agent.fit < self.C[0].fit: # Replaces C0 object - C[0] = copy.deepcopy(agent) + self.C[0] = copy.deepcopy(agent) # If current agent's fitness is between C0 and C1 - elif agent.fit < C[1].fit: + elif agent.fit < self.C[1].fit: # Replaces C1 object - C[1] = copy.deepcopy(agent) + self.C[1] = copy.deepcopy(agent) # If current agent's fitness is between C1 and C2 - elif agent.fit < C[2].fit: + elif agent.fit < self.C[2].fit: # Replaces C2 object - C[2] = copy.deepcopy(agent) + self.C[2] = copy.deepcopy(agent) # If current agent's fitness is between C2 and C3 - elif agent.fit < C[3].fit: + elif agent.fit < self.C[3].fit: # Replaces C3 object - C[3] = copy.deepcopy(agent) + self.C[3] = copy.deepcopy(agent) - return C - - def _average_concentration(self, function, C): + def _average_concentration(self, function): """Averages the concentrations. Args: function (Function): A Function object that will be used as the objective function. - C (list): List of concentrations. Returns: Averaged concentration. @@ -175,43 +191,42 @@ def _average_concentration(self, function, C): """ # Makes a deepcopy to withhold the future update - C_avg = copy.deepcopy(C[0]) + C_avg = copy.deepcopy(self.C[0]) # Update the position with concentrations' averager - C_avg.position = np.mean([c.position for c in C], axis=0) + C_avg.position = np.mean([c.position for c in self.C], axis=0) # Clips its limits - C_avg.clip_limits() + C_avg.clip_by_bound() # Re-calculate its fitness C_avg.fit = function(C_avg.position) return C_avg - def _update(self, agents, function, C, iteration, n_iterations): - """Method that wraps Equilibrium Optimizer over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Equilibrium Optimizer over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - C (list): List of concentrations. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Calculates the equilibrium and average concentrations - C = self._calculate_equilibrium(agents, C) - C_avg = self._average_concentration(function, C) + self._calculate_equilibrium(space.agents) + C_avg = self._average_concentration(function) # Makes a pool of both concentrations and their average (eq. 7) - C_pool = C + [C_avg] + C_pool = self.C + [C_avg] # Calculates the time (eq. 9) t = (1 - iteration / n_iterations) ** (self.a2 * iteration / n_iterations) # Iterates through all agents - for agent in agents: + for agent in space.agents: # Generates a integer between [0, 5) to select the concentration i = rnd.generate_integer_random_number(0, 5) @@ -245,53 +260,3 @@ def _update(self, agents, function, C, iteration, n_iterations): # Updates agent's position (eq. 16) agent.position = C_pool[i].position + ( agent.position - C_pool[i].position) * F + (G / (lambd * self.V)) * (1 - F) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Creates a list of concentrations (agents) - C = [copy.deepcopy(space.agents[0]) for _ in range(4)] - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, C, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/science/esa.py b/opytimizer/optimizers/science/esa.py index c68fb49c..914fcb58 100644 --- a/opytimizer/optimizers/science/esa.py +++ b/opytimizer/optimizers/science/esa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class ESA(Optimizer): """ - def __init__(self, algorithm='ESA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> ESA.') - # Override its parent class with the receiving hyperparams - super(ESA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(ESA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/science/gsa.py b/opytimizer/optimizers/science/gsa.py index 4154a083..2164f3e2 100644 --- a/opytimizer/optimizers/science/gsa.py +++ b/opytimizer/optimizers/science/gsa.py @@ -2,13 +2,11 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.general as g import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,25 +25,24 @@ class GSA(Optimizer): """ - def __init__(self, algorithm='GSA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> GSA.') - # Override its parent class with the receiving hyperparams - super(GSA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GSA, self).__init__() # Initial gravity value self.G = 2.467 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -66,6 +63,32 @@ def G(self, G): self._G = G + @property + def velocity(self): + """np.array: Array of velocities. + + """ + + return self._velocity + + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') + + self._velocity = velocity + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of velocities + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + def _calculate_mass(self, agents): """Calculates agents' mass (eq. 16). @@ -77,14 +100,14 @@ def _calculate_mass(self, agents): """ - # Gathering the best and worst agents + # Gathers the best and worst agents best, worst = agents[0].fit, agents[-1].fit - # Calculating agents' masses using equation 15 - mass = [(agent.fit - worst) / (best - worst) for agent in agents] + # Calculates agents' masses using equation 15 + mass = [(agent.fit - worst) / (best - worst + c.EPSILON) for agent in agents] - # Normalizing agents' masses - norm_mass = mass / np.sum(mass) + # Normalizes agents' masses + norm_mass = mass / (np.sum(mass) + c.EPSILON) return norm_mass @@ -108,125 +131,40 @@ def _calculate_force(self, agents, mass, gravity): # Transforms the force into an array force = np.asarray(force) - # Applying a stochastic trait to the force + # Applies a stochastic trait to the force force = np.sum(r.generate_uniform_random_number() * force, axis=1) return force - def _update_velocity(self, force, mass, velocity): - """Updates an agent velocity (eq. 11). + def update(self, space, iteration): + """Wraps Gravitational Search Algorithm over all agents and variables. Args: - force (np.array): Matrix of attraction forces. - mass (np.array): An array of agents' mass. - velocity (np.array): Agent's current velocity. - - Returns: - A new velocity. - - """ - - # Calculates the acceleration using paper's equation 10 - acceleration = force / (mass + c.EPSILON) - - # Calculates the new velocity - new_velocity = r.generate_uniform_random_number() * velocity + acceleration - - return new_velocity - - def _update_position(self, position, velocity): - """Updates an agent position (eq. 12). - - Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. - - Returns: - A new position. + space (Space): Space containing agents and update-related information. + iteration (int): Current iteration. """ - # Calculates new position - new_position = position + velocity + # Sorts agents + space.agents.sort(key=lambda x: x.fit) - return new_position - - def _update(self, agents, velocity, iteration): - """Method that wraps Gravitational Search Algorithm over all agents and variables. - - Args: - agents (list): List of agents. - velocity (np.array): Array of current velocities. - iteration (int): Current iteration value. - - """ - - # Sorting agents - agents.sort(key=lambda x: x.fit) - - # Calculating the current gravity + # Calculates the current gravity gravity = self.G / (iteration + 1) - # Calculating agents' mass - mass = self._calculate_mass(agents) - - # Calculating agents' attraction force - force = self._calculate_force(agents, mass, gravity) - - # Iterate through all agents - for i, agent in enumerate(agents): - # Updates current agent velocities - velocity[i] = self._update_velocity(force[i], mass[i], velocity[i]) - - # Updates current agent positions - agent.position = self._update_position(agent.position, velocity[i]) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Creates an array of velocities - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, velocity, t) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) + # Calculates agents' mass + mass = self._calculate_mass(space.agents) - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) + # Calculates agents' attraction force + force = self._calculate_force(space.agents, mass, gravity) - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Calculates the acceleration (eq. 10) + acceleration = force[i] / (mass[i] + c.EPSILON) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates current agent velocity (eq. 11) + r1 = r.generate_uniform_random_number() + self.velocity[i] = r1 * self.velocity[i] + acceleration - return history + # Updates current agent position (eq. 12) + agent.position += self.velocity[i] diff --git a/opytimizer/optimizers/science/hgso.py b/opytimizer/optimizers/science/hgso.py index 76b00c5c..11305e40 100644 --- a/opytimizer/optimizers/science/hgso.py +++ b/opytimizer/optimizers/science/hgso.py @@ -2,12 +2,10 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.general as g import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -26,19 +24,18 @@ class HGSO(Optimizer): """ - def __init__(self, algorithm='HGSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> HGSO.') - # Override its parent class with the receiving hyperparams - super(HGSO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(HGSO, self).__init__() # Number of clusters self.n_clusters = 2 @@ -61,8 +58,8 @@ def __init__(self, algorithm='HGSO', hyperparams=None): # Solubility constant self.K = 1.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -185,6 +182,67 @@ def K(self, K): self._K = K + @property + def coefficient(self): + """np.array: Array of coefficients. + + """ + + return self._coefficient + + @coefficient.setter + def coefficient(self, coefficient): + if not isinstance(coefficient, np.ndarray): + raise e.TypeError('`coefficient` should be a numpy array') + + self._coefficient = coefficient + + @property + def pressure(self): + """np.array: Array of pressures. + + """ + + return self._pressure + + @pressure.setter + def pressure(self, pressure): + if not isinstance(pressure, np.ndarray): + raise e.TypeError('`pressure` should be a numpy array') + + self._pressure = pressure + + @property + def constant(self): + """np.array: Array of constants. + + """ + + return self._constant + + @constant.setter + def constant(self, constant): + if not isinstance(constant, np.ndarray): + raise e.TypeError('`constant` should be a numpy array') + + self._constant = constant + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Number of agents per cluster + n_agents_per_cluster = int(len(space.agents) / self.n_clusters) + + # Arrays of coefficients, pressures and constants + self.coefficient = self.l1 * r.generate_uniform_random_number(size=self.n_clusters) + self.pressure = self.l2 * r.generate_uniform_random_number(size=(self.n_clusters, n_agents_per_cluster)) + self.constant = self.l3 * r.generate_uniform_random_number(size=self.n_clusters) + def _update_position(self, agent, cluster_agent, best_agent, solubility): """Updates the position of a single gas (eq. 10). @@ -215,23 +273,19 @@ def _update_position(self, agent, cluster_agent, best_agent, solubility): return new_position - def _update(self, agents, best_agent, function, coefficient, pressure, constant, iteration, n_iterations): - """Method that wraps Henry Gas Solubility Optimization over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Henry Gas Solubility Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - coefficient (np.array): Henry's coefficient array. - pressure (np.array): Partial pressure array. - constant (np.array): Constants array. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Creates n-wise clusters - clusters = g.n_wise(agents, pressure.shape[1]) + clusters = g.n_wise(space.agents, self.pressure.shape[1]) # Iterates through all clusters for i, cluster in enumerate(clusters): @@ -239,7 +293,7 @@ def _update(self, agents, best_agent, function, coefficient, pressure, constant, T = np.exp(-iteration / n_iterations) # Updates Henry's coefficient (eq. 8) - coefficient[i] *= np.exp(-constant[i] * (1 / T - 1 / 298.15)) + self.coefficient[i] *= np.exp(-self.constant[i] * (1 / T - 1 / 298.15)) # Transforms the cluster into a list and sorts it cluster = list(cluster) @@ -248,86 +302,30 @@ def _update(self, agents, best_agent, function, coefficient, pressure, constant, # Iterates through all agents in cluster for j, agent in enumerate(cluster): # Calculates agent's solubility (eq. 9) - solubility = self.K * coefficient[i] * pressure[i][j] + solubility = self.K * self.coefficient[i] * self.pressure[i][j] # Updates agent's position (eq. 10) - agent.position = self._update_position(agent, cluster[0], best_agent, solubility) + agent.position = self._update_position(agent, cluster[0], space.best_agent, solubility) # Clips agent's limits - agent.clip_limits() + agent.clip_by_bound() # Re-calculates its fitness agent.fit = function(agent.position) # Re-sorts the whole space - agents.sort(key=lambda x: x.fit) + space.agents.sort(key=lambda x: x.fit) # Generates a uniform random number r1 = r.generate_uniform_random_number() # Calculates the number of worst agents (eq. 11) - N = int(len(agents) * (r1 * (0.2 - 0.1) + 0.1)) + N = int(len(space.agents) * (r1 * (0.2 - 0.1) + 0.1)) # Iterates through every bad agent - for agent in agents[-N:]: + for agent in space.agents[-N:]: # Generates another uniform random number r2 = r.generate_uniform_random_number() # Updates bad agent's position (eq. 12) agent.position = agent.lb + r2 * (agent.ub - agent.lb) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Calculates the number of agents per cluster - n_agents_per_cluster = int(len(space.agents) / self.n_clusters) - - # Instantiates a coefficients', pressures' and constants' array - coefficient = self.l1 * r.generate_uniform_random_number(size=self.n_clusters) - pressure = self.l2 * r.generate_uniform_random_number(size=(self.n_clusters, n_agents_per_cluster)) - constant = self.l3 * r.generate_uniform_random_number(size=self.n_clusters) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, - coefficient, pressure, constant, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/science/lsa.py b/opytimizer/optimizers/science/lsa.py index 302d58e2..7a451f1f 100644 --- a/opytimizer/optimizers/science/lsa.py +++ b/opytimizer/optimizers/science/lsa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class LSA(Optimizer): """ - def __init__(self, algorithm='LSA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> LSA.') - # Override its parent class with the receiving hyperparams - super(LSA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(LSA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/science/moa.py b/opytimizer/optimizers/science/moa.py index fa84872b..8190b3ae 100644 --- a/opytimizer/optimizers/science/moa.py +++ b/opytimizer/optimizers/science/moa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class MOA(Optimizer): """ - def __init__(self, algorithm='MOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> MOA.') - # Override its parent class with the receiving hyperparams - super(MOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(MOA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/science/mvo.py b/opytimizer/optimizers/science/mvo.py index b203c25a..3da7e279 100644 --- a/opytimizer/optimizers/science/mvo.py +++ b/opytimizer/optimizers/science/mvo.py @@ -2,12 +2,10 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.general as g import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,17 +25,16 @@ class MVO(Optimizer): """ - def __init__(self, algorithm='MVO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(MVO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(MVO, self).__init__() # Minimum value for the Wormhole Existence Probability self.WEP_min = 0.2 @@ -48,8 +45,8 @@ def __init__(self, algorithm='MVO', hyperparams=None): # Exploitation accuracy self.p = 6.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -106,20 +103,25 @@ def p(self, p): self._p = p - def _update(self, agents, best_agent, function, WEP, TDR): - """Method that wraps updates over all agents and variables (eq. 3.1-3.4). + def update(self, space, function, iteration, n_iterations): + """Wraps Multi-Verse Optimizer over all agents and variables (eq. 3.1-3.4). Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - WEP (float): Current iteration's Wormhole Existence Probability. - TDR (floar): Current iteration's Travelling Distance Rate. + iteration (int): Current iteration. + n_iterations (int): Maximum number of iterations. """ + # Calculates the Wormhole Existence Probability + WEP = self.WEP_min + (iteration + 1) * ((self.WEP_max - self.WEP_min) / n_iterations) + + # Calculates the Travelling Distance Rate + TDR = 1 - ((iteration + 1) ** (1 / self.p) / n_iterations ** (1 / self.p)) + # Gathers the fitness for each individual - fitness = [agent.fit for agent in agents] + fitness = [agent.fit for agent in space.agents] # Calculates the norm of the fitness norm = np.linalg.norm(fitness) @@ -127,8 +129,8 @@ def _update(self, agents, best_agent, function, WEP, TDR): # Normalizes every individual's fitness norm_fitness = fitness / norm - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # For every decision variable for j in range(agent.n_variables): # Generates a uniform random number @@ -140,7 +142,7 @@ def _update(self, agents, best_agent, function, WEP, TDR): white_hole = g.weighted_wheel_selection(norm_fitness) # Gathers current agent's position as white hole's position - agent.position[j] = agents[white_hole].position[j] + agent.position[j] = space.agents[white_hole].position[j] # Generates a second uniform random number r2 = r.generate_uniform_random_number() @@ -156,68 +158,15 @@ def _update(self, agents, best_agent, function, WEP, TDR): # If random number is smaller than 0.5 if r3 < 0.5: # Updates the agent's position with `+` - agent.position[j] = best_agent.position[j] + TDR * width + agent.position[j] = space.best_agent.position[j] + TDR * width # If not else: # Updates the agent's position with `-` - agent.position[j] = best_agent.position[j] - TDR * width + agent.position[j] = space.best_agent.position[j] - TDR * width # Clips the agent limits - agent.clip_limits() + agent.clip_by_bound() # Calculates its fitness agent.fit = function(agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Calculates the Wormhole Existence Probability - WEP = self.WEP_min + (t + 1) * ((self.WEP_max - self.WEP_min) / space.n_iterations) - - # Calculates the Travelling Distance Rate - TDR = 1 - ((t + 1) ** (1 / self.p) / space.n_iterations ** (1 / self.p)) - - # Updating agents - self._update(space.agents, space.best_agent, function, WEP, TDR) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/science/sa.py b/opytimizer/optimizers/science/sa.py index fac747d5..81d496cb 100644 --- a/opytimizer/optimizers/science/sa.py +++ b/opytimizer/optimizers/science/sa.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class SA(Optimizer): """ - def __init__(self, algorithm='SA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SA.') - # Override its parent class with the receiving hyperparams - super(SA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SA, self).__init__() # System's temperature self.T = 100 @@ -48,8 +45,8 @@ def __init__(self, algorithm='SA', hyperparams=None): # Temperature decay self.beta = 0.999 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -87,28 +84,28 @@ def beta(self, beta): self._beta = beta - def _update(self, agents, function): - """Method that wraps Simulated Annealing over all agents and variables. + def update(self, space, function): + """Wraps Simulated Annealing over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A function object. """ - # Iterate through all agents - for agent in agents: + # Iterates through all agents + for agent in space.agents: # Mimics its position a = copy.deepcopy(agent) - # Generating a random noise from a gaussian distribution + # Generates a random noise from a gaussian distribution noise = r.generate_gaussian_random_number(0, 0.1, size=((agent.n_variables, agent.n_dimensions))) - # Applying the noise + # Applies the noise a.position += noise - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) @@ -118,62 +115,15 @@ def _update(self, agents, function): # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) # Checks if state should be updated or not elif r1 < np.exp(-(a.fit - agent.fit) / self.T): - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) # Decay the temperature self.T *= self.beta - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/science/two.py b/opytimizer/optimizers/science/two.py index f5445db1..a1470d60 100644 --- a/opytimizer/optimizers/science/two.py +++ b/opytimizer/optimizers/science/two.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class TWO(Optimizer): """ - def __init__(self, algorithm='TWO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> TWO.') - # Override its parent class with the receiving hyperparams - super(TWO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(TWO, self).__init__() # Static friction coefficient self.mu_s = 1 @@ -57,8 +54,8 @@ def __init__(self, algorithm='TWO', hyperparams=None): # Scaling factor self.beta = 0.05 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -143,7 +140,8 @@ def beta(self, beta): if not isinstance(beta, (float, int)): raise e.TypeError('`beta` should be a float or integer') if beta <= 0 or beta > 1: - raise e.ValueError('`beta` should be greater than 0 and less than 1') + raise e.ValueError( + '`beta` should be greater than 0 and less than 1') self._beta = beta @@ -169,49 +167,47 @@ def _constraint_handle(self, agents, best_agent, function, iteration): r2 = r.generate_gaussian_random_number() # Updates the agent's position - agent.position = best_agent.position + \ - (r2 / iteration) * (best_agent.position - agent.position) + agent.position = best_agent.position + (r2 / iteration) * (best_agent.position - agent.position) # Clips its limits - agent.clip_limits() + agent.clip_by_bound() # Re-calculates its fitness agent.fit = function(agent.position) - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps Tug of War Optimization over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Tug of War Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Gathers best and worst fitness - best_fit, worst_fit = agents[0].fit, agents[-1].fit + best_fit, worst_fit = space.agents[0].fit, space.agents[-1].fit # Calculates the agents' weights weights = [(agent.fit - worst_fit) / - (best_fit - worst_fit + c.EPSILON) + 1 for agent in agents] + (best_fit - worst_fit + c.EPSILON) + 1 for agent in space.agents] # We copy a temporary list for iterating purposes - temp_agents = copy.deepcopy(agents) + temp_agents = copy.deepcopy(space.agents) # Linearly decreasing `mu_k` mu_k = self.mu_k - (self.mu_k - 0.1) * (iteration / n_iterations) - # Iterating through 'i' agents + # Iterates through 'i' agents for i, temp1 in enumerate(temp_agents): # Initializes `delta` as zero delta = 0.0 - # Iterating through 'j' agents + # Iterates through 'j' agents for j, temp2 in enumerate(temp_agents): # If weight from agent `i` is smaller than weight from agent `j` if weights[i] < weights[j]: @@ -228,66 +224,20 @@ def _update(self, agents, best_agent, function, iteration, n_iterations): r1 = r.generate_gaussian_random_number(size=(temp1.n_variables, temp1.n_dimensions)) # Calculates the displacement (eq. 9-10) - delta += 0.5 * acceleration * self.delta_t ** 2 + np.multiply(self.alpha ** iteration * self.beta * ( - np.expand_dims(temp1.ub, -1) - np.expand_dims(temp1.lb, -1)), r1) + delta += 0.5 * acceleration * self.delta_t ** 2 + \ + np.multiply(self.alpha ** iteration * self.beta * (np.expand_dims(temp1.ub, -1) \ + - np.expand_dims(temp1.lb, -1)), r1) # Updates the temporary agent's position (eq. 11) temp1.position += delta # Performs the constraint handling - self._constraint_handle(temp_agents, best_agent, function, iteration) + self._constraint_handle(temp_agents, space.best_agent, function, iteration+1) # Iterates through real and temporary populations - for agent, temp in zip(agents, temp_agents): + for agent, temp in zip(space.agents, temp_agents): # If temporary agent is better than real one if temp.fit < agent.fit: # Updates its position and fitness agent.position = copy.deepcopy(temp.position) agent.fit = copy.deepcopy(temp.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, t+1, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/science/wca.py b/opytimizer/optimizers/science/wca.py index 69a04dcc..faf9b016 100644 --- a/opytimizer/optimizers/science/wca.py +++ b/opytimizer/optimizers/science/wca.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,28 +25,27 @@ class WCA(Optimizer): """ - def __init__(self, algorithm='WCA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> WCA.') - # Override its parent class with the receiving hyperparams - super(WCA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(WCA, self).__init__() - # Number of rivers + sea + # Number of sea + rivers self.nsr = 2 # Maximum evaporation condition self.d_max = 0.1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -86,37 +83,50 @@ def d_max(self, d_max): self._d_max = d_max + @property + def flows(self): + """np.array: Array of flows. + + """ + + return self._flows + + @flows.setter + def flows(self, flows): + if not isinstance(flows, np.ndarray): + raise e.TypeError('`flows` should be a numpy array') + + self._flows = flows + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Array of flows + self.flows = np.zeros(self.nsr, dtype=int) + def _flow_intensity(self, agents): """Calculates the intensity of each possible flow (eq. 6). Args: agents (list): List of agents. - Returns: - It returns an array of flows' intensity. - """ - # Our initial cost will be 0 - cost = 0 - - # Creates an empty integer array of number of rivers + sea - flows = np.zeros(self.nsr, dtype=int) + # Calculates the cost + cost = np.sum([agents[i].fit for i in range(self.nsr)]) - # For every river + sea - for i in range(self.nsr): - # We accumulates its fitness - cost += agents[i].fit - - # Iterating again over rivers + sea + # Iterates again over sea + rivers for i in range(self.nsr): # Calculates its particular flow intensity - flows[i] = round(np.fabs(agents[i].fit / cost) * (len(agents) - self.nsr)) - - return flows + self.flows[i] = np.floor(np.fabs(agents[i].fit / cost) * (len(agents) - self.nsr)) def _raining_process(self, agents, best_agent): - """Performs the raining process (eq. 12). + """Performs the raining process (eq. 11-12). Args: agents (list): List of agents. @@ -124,132 +134,114 @@ def _raining_process(self, agents, best_agent): """ - # Iterate through every raindrop - for k in range(self.nsr, len(agents)): - # Calculate the euclidean distance between sea and raindrop / strream - distance = (np.linalg.norm(best_agent.position - agents[k].position)) - - # If distance if smaller than evaporation condition - if distance > self.d_max: - # Generates a new random gaussian number - r1 = r.generate_gaussian_random_number(1, agents[k].n_variables) - - # Changes the stream position - agents[k].position = best_agent.position + np.sqrt(0.1) * r1 - - def _update_stream(self, agents, flows): + # Iterates through all sea + rivers + for i in range(0, self.nsr): + # Iterates through all raindrops that belongs to specific sea or river + for j in range(self.nsr, self.flows[i] + self.nsr): + # Calculates the euclidean distance between sea and raindrop / stream + distance = (np.linalg.norm(best_agent.position - agents[j].position)) + + # If distance if smaller than evaporation condition + if distance < self.d_max: + # If it is supposed to replace the sea streams' position + if i == 0: + # Updates position (eq. 12) + r1 = r.generate_gaussian_random_number(1, agents[j].n_variables) + agents[j].position = best_agent.position + np.sqrt(0.1) * r1 + + # If it is supposed to replace the river streams' position + else: + # Updates position (eq. 11) + agents[j].fill_with_uniform() + + def _update_stream(self, agents, function): """Updates every stream position (eq. 8). Args: agents (list): List of agents. - flows (np.array): Array of flows' intensity. + function (Function): A Function object that will be used as the objective function. """ - # Defining a counter to the summation of flows + # Defines a counter to the summation of flows n_flows = 0 # For every river, ignoring the sea - for k in range(1, self.nsr): - # Accumulate the number of flows - n_flows += flows[k] + for i in range(0, self.nsr): + # Accumulates the number of flows + n_flows += self.flows[i] - # Iterate through every possible flow - for i in range((n_flows - flows[k]), n_flows): - # Calculates a random uniform number between 0 and 1 + # Iterates through every possible flow + for j in range((self.nsr + n_flows - self.flows[i]), self.nsr + n_flows): + # Calculates a random uniform number r1 = r.generate_uniform_random_number() - # Updates stream position - agents[i].position += r1 * 2 * (agents[i].position - agents[k].position) + # Updates river position + agents[j].position += r1 * 2 * (agents[i].position - agents[j].position) + + # Clips its limits and recalculates its fitness + agents[j].clip_by_bound() + agents[j].fit = function(agents[j].position) - def _update_river(self, agents, best_agent): + def _update_river(self, agents, best_agent, function): """Updates every river position (eq. 9). Args: agents (list): List of agents. best_agent (Agent): Global best agent. + function (Function): A Function object that will be used as the objective function. """ # For every river, ignoring the sea - for k in range(1, self.nsr): - # Calculates a random uniform number between 0 and 1 + for i in range(1, self.nsr): + # Calculates a random uniform r1 = r.generate_uniform_random_number() # Updates river position - agents[k].position += r1 * 2 * (best_agent.position - agents[k].position) + agents[i].position += r1 * 2 * (best_agent.position - agents[i].position) - def _update(self, agents, best_agent, flows): - """Updates the agents position. + # Clips its limits and recalculates its fitness + agents[i].clip_by_bound() + agents[i].fit = function(agents[i].position) - Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - flows (np.array): Array of flows' intensity. - - """ - - # Updates every stream position - self._update_stream(agents, flows) - - # Updates every river position - self._update_river(agents, best_agent) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, function, n_iterations): + """Wraps Water Cycle Algorithm over all agents and variables. Args: - space (Space): A Space object that will be evaluated. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + n_iterations (int): Maximum number of iterations. """ - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # Calculating the flow's intensity (eq. 6) - flows = self._flow_intensity(space.agents) + # Calculates the flow intensity + self._flow_intensity(space.agents) - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, flows) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Sorting agents - space.agents.sort(key=lambda x: x.fit) - - # Performs the raining process (eq. 12) - self._raining_process(space.agents, space.best_agent) - - # Updates the evaporation condition - self.d_max -= (self.d_max / space.n_iterations) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates every stream position + self._update_stream(space.agents, function) - return history + # Updates every river position + self._update_river(space.agents, space.best_agent, function) + + # Iterates through all rivers + for i in range(1, self.nsr): + # Iterates through all raindrops + for j in range(self.nsr, len(space.agents)): + # If raindrop position is better than river's + if space.agents[j].fit < space.agents[i].fit: + # Exchanges their positions + space.agents[i], space.agents[j] = space.agents[j], space.agents[i] + + # Iterates through all rivers: + for i in range(1, self.nsr): + # If river position is better than seá's + if space.agents[i].fit < space.agents[0].fit: + # Exchanges their positions + space.agents[i], space.agents[0] = space.agents[0], space.agents[i] + + # Performs the raining process (eq. 12) + self._raining_process(space.agents, space.best_agent) + + # Updates the evaporation condition + self.d_max -= (self.d_max / n_iterations) diff --git a/opytimizer/optimizers/science/wdo.py b/opytimizer/optimizers/science/wdo.py index 867707f3..844094cd 100644 --- a/opytimizer/optimizers/science/wdo.py +++ b/opytimizer/optimizers/science/wdo.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,19 +23,18 @@ class WDO(Optimizer): """ - def __init__(self, algorithm='WDO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> WDO.') - # Override its parent class with the receiving hyperparams - super(WDO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(WDO, self).__init__() # Maximum velocity self.v_max = 0.3 @@ -54,8 +51,8 @@ def __init__(self, algorithm='WDO', hyperparams=None): # Pressure constant self.RT = 1.5 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -144,121 +141,59 @@ def RT(self, RT): self._RT = RT - def _update_velocity(self, position, best_position, velocity, alt_velocity, index): - """Updates an agent velocity (eq. 15). - - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - velocity (np.array): Agent's current velocity. - alt_velocity (np.array): Random agent's current velocity. - index (int): Index of current agent. - - Returns: - A new velocity based. + @property + def velocity(self): + """np.array: Array of velocities. """ - # Calculates new velocity - new_velocity = (1 - self.alpha) * velocity - self.g * position + (self.RT * np.abs( - 1 / index - 1) * (best_position - position)) + (self.c * alt_velocity / index) + return self._velocity - return new_velocity + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') - def _update_position(self, position, velocity): - """Updates an agent position (eq. 16). + self._velocity = velocity - Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. - Returns: - A new position. + Args: + space (Space): A Space object containing meta-information. """ - # Calculates new position - new_position = position + velocity + # Arrays of velocities + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - return new_position - - def _update(self, agents, best_agent, function, velocity): - """Method that wraps velocity and position updates over all agents and variables. + def update(self, space, function): + """Wraps Wind Driven Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A function object. - velocity (np.array): Array of current velocities. """ - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # Generates a random index based on the number of agents - index = r.generate_integer_random_number(0, len(agents)) + index = r.generate_integer_random_number(0, len(space.agents)) - # Updating velocity - velocity[i] = self._update_velocity(agent.position, best_agent.position, velocity[i], velocity[index], i + 1) + # Updates velocity (eq. 15) + self.velocity[i] = (1 - self.alpha) * self.velocity[i] - self.g * agent.position + \ + (self.RT * np.abs(1 / (index + 1) - 1) * (space.best_agent.position - agent.position)) + \ + (self.c * self.velocity[index] / (index + 1)) - # Clips the velocity values between (-v_max, v_max) - velocity = np.clip(velocity, -self.v_max, self.v_max) + # Clips the velocity values between [-v_max, v_max] + self.velocity = np.clip(self.velocity, -self.v_max, self.v_max) - # Updating agent's position - agent.position = self._update_position(agent.position, velocity[i]) + # Updates agent's position (eq. 16) + agent.position += self.velocity[i] # Checks agent limits - agent.clip_limits() + agent.clip_by_bound() # Evaluates agent agent.fit = function(agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instanciating array of velocities - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, velocity) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/science/weo.py b/opytimizer/optimizers/science/weo.py index 2fbfaf73..a174e7dc 100644 --- a/opytimizer/optimizers/science/weo.py +++ b/opytimizer/optimizers/science/weo.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -29,21 +29,20 @@ class WEO(Optimizer): """ - def __init__(self, algorithm='WEO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> WEO.') - # Override its parent class with the receiving hyperparams - super(WEO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(WEO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/science/wwo.py b/opytimizer/optimizers/science/wwo.py index 76922340..2ec0721f 100644 --- a/opytimizer/optimizers/science/wwo.py +++ b/opytimizer/optimizers/science/wwo.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class WWO(Optimizer): """ - def __init__(self, algorithm='WWO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> WWO.') - # Override its parent class with the receiving hyperparams - super(WWO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(WWO, self).__init__() # Maximum wave height self.h_max = 5 @@ -54,8 +51,8 @@ def __init__(self, algorithm='WWO', hyperparams=None): # Maximum number of breakings self.k_max = 1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -127,12 +124,54 @@ def k_max(self, k_max): self._k_max = k_max - def _propagate_wave(self, agent, function, length): + @property + def height(self): + """np.array: Array of heights. + + """ + + return self._height + + @height.setter + def height(self, height): + if not isinstance(height, np.ndarray): + raise e.TypeError('`height` should be a numpy array') + + self._height = height + + @property + def length(self): + """np.array: Array of lengths. + + """ + + return self._length + + @length.setter + def length(self, length): + if not isinstance(length, np.ndarray): + raise e.TypeError('`length` should be a numpy array') + + self._length = length + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of heights and lengths + self.height = r.generate_uniform_random_number(self.h_max, self.h_max, space.n_agents) + self.length = r.generate_uniform_random_number(0.5, 0.5, space.n_agents) + + def _propagate_wave(self, agent, function, index): """Propagates wave into a new position (eq. 6). Args: function (Function): A function object. - length (np.array): Array of wave lengths. + index (int): Index of wave length. Returns: Propagated wave. @@ -148,24 +187,24 @@ def _propagate_wave(self, agent, function, length): r1 = r.generate_uniform_random_number(-1, 1) # Updates the wave's position - wave.position[j] += r1 * length * (j + 1) + wave.position[j] += r1 * self.length[index] * (j + 1) # Clips its limits - wave.clip_limits() + wave.clip_by_bound() # Re-calculates its fitness wave.fit = function(wave.position) return wave - def _refract_wave(self, agent, best_agent, function, length): + def _refract_wave(self, agent, best_agent, function, index): """Refract wave into a new position (eq. 8-9). Args: agent (Agent): Agent to be refracted. best_agent (Agent): Global best agent. function (Function): A function object. - length (np.array): Array of wave lengths. + index (int): Index of wave length. Returns: New height and length values. @@ -183,11 +222,11 @@ def _refract_wave(self, agent, best_agent, function, length): # Calculates the standard deviation std = np.fabs(best_agent.position[j] - agent.position[j]) / 2 - # Generates a new position (Eq. 8) + # Generates a new position (eq. 8) agent.position[j] = r.generate_gaussian_random_number(mean, std) # Clips its limits - agent.clip_limits() + agent.clip_by_bound() # Re-calculates its fitness agent.fit = function(agent.position) @@ -195,8 +234,8 @@ def _refract_wave(self, agent, best_agent, function, length): # Updates the new height to maximum height value new_height = self.h_max - # Re-calculates the new length (Eq. 9) - new_length = length * (current_fit / (agent.fit + c.EPSILON)) + # Re-calculates the new length (eq. 9) + new_length = self.length[index] * (current_fit / (agent.fit + c.EPSILON)) return new_height, new_length @@ -223,19 +262,18 @@ def _break_wave(self, wave, function, j): broken_wave.position[j] += r1 * self.beta * (j + 1) # Clips its limits - broken_wave.clip_limits() + broken_wave.clip_by_bound() # Re-calculates its fitness broken_wave.fit = function(broken_wave.position) return broken_wave - def _update_wave_length(self, agents, length): + def _update_wave_length(self, agents): """Updates the wave length of current population. Args: agents (list): List of agents. - length (np.array): Array of wave lengths. """ @@ -245,117 +283,61 @@ def _update_wave_length(self, agents, length): # Iterates through all agents for i, agent in enumerate(agents): # Updates its length - length[i] *= self.alpha ** -((agent.fit - agents[-1].fit + c.EPSILON) / ( - agents[0].fit - agents[-1].fit + c.EPSILON)) + self.length[i] *= self.alpha ** -((agent.fit - agents[-1].fit + c.EPSILON) / \ + (agents[0].fit - agents[-1].fit + c.EPSILON)) - def _update(self, agents, best_agent, function, height, length): - """Method that wraps Water Wave Optimization over all agents and variables. + def update(self, space, function): + """Wraps Water Wave Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A function object. - height (np.array): Array of wave heights. - length (np.array): Array of wave lengths. """ # Iterates through all agents - for i, agent in enumerate(agents): - # Propagates a wave into a new temporary one (Eq. 6) - wave = self._propagate_wave(agent, function, length[i]) + for i, agent in enumerate(space.agents): + # Propagates a wave into a new temporary one (eq. 6) + wave = self._propagate_wave(agent, function, i) # Checks if propagated wave is better than current one if wave.fit < agent.fit: # Also checks if propagated wave is better than global one - if wave.fit < best_agent.fit: + if wave.fit < space.best_agent.fit: # Replaces the best agent with propagated wave - best_agent.position = copy.deepcopy(wave.position) - best_agent.fit = copy.deepcopy(wave.fit) + space.best_agent.position = copy.deepcopy(wave.position) + space.best_agent.fit = copy.deepcopy(wave.fit) # Generates a `k` number of breaks k = r.generate_integer_random_number(1, self.k_max + 1) # Iterates through every possible break for j in range(k): - # Breaks the propagated wave (Eq. 10) + # Breaks the propagated wave (eq. 10) broken_wave = self._break_wave(wave, function, j) # Checks if broken wave is better than global one - if broken_wave.fit < best_agent.fit: + if broken_wave.fit < space.best_agent.fit: # Replaces the best agent with broken wave - best_agent.position = copy.deepcopy(broken_wave.position) - best_agent.fit = copy.deepcopy(broken_wave.fit) + space.best_agent.position = copy.deepcopy(broken_wave.position) + space.best_agent.fit = copy.deepcopy(broken_wave.fit) # Replaces current agent's with propagated wave agent.position = copy.deepcopy(wave.position) agent.fit = copy.deepcopy(wave.fit) # Sets its height to maximum height - height[i] = self.h_max + self.height[i] = self.h_max # If propagated wave is not better than current agent else: # Decreases its height by one - height[i] -= 1 + self.height[i] -= 1 # If its height reaches zero - if height[i] == 0: - # Refracts the wave and generates a new height and wave length (Eq. 8-9) - height[i], length[i] = self._refract_wave(agent, best_agent, function, length[i]) - - # Updates the wave length for all agents (Eq. 7) - self._update_wave_length(agents, length) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Creates a height vector with `h_max` values - height = r.generate_uniform_random_number(self.h_max, self.h_max, space.n_agents) - - # Creates a length vector with 0.5 values - length = r.generate_uniform_random_number(0.5, 0.5, space.n_agents) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, height, length) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + if self.height[i] == 0: + # Refracts the wave and generates a new height and wave length (eq. 8-9) + self.height[i], self.length[i] = self._refract_wave(agent, space.best_agent, function, i) - return history + # Updates the wave length for all agents (eq. 7) + self._update_wave_length(space.agents) diff --git a/opytimizer/optimizers/social/__init__.py b/opytimizer/optimizers/social/__init__.py index 1dfe7155..fe11d793 100644 --- a/opytimizer/optimizers/social/__init__.py +++ b/opytimizer/optimizers/social/__init__.py @@ -1,3 +1,10 @@ """An evolutionary package for all common opytimizer modules. It contains implementations of human social behavior-based optimizers. """ + +from opytimizer.optimizers.social.bso import BSO +from opytimizer.optimizers.social.ci import CI +from opytimizer.optimizers.social.isa import ISA +from opytimizer.optimizers.social.mvpa import MVPA +from opytimizer.optimizers.social.qsa import QSA +from opytimizer.optimizers.social.ssd import SSD diff --git a/opytimizer/optimizers/social/bso.py b/opytimizer/optimizers/social/bso.py index 859c7d74..7d366314 100644 --- a/opytimizer/optimizers/social/bso.py +++ b/opytimizer/optimizers/social/bso.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class BSO(Optimizer): """ - def __init__(self, algorithm='BSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BSO.') - # Override its parent class with the receiving hyperparams - super(BSO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(BSO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/social/ci.py b/opytimizer/optimizers/social/ci.py index 58700dd7..e2464f4c 100644 --- a/opytimizer/optimizers/social/ci.py +++ b/opytimizer/optimizers/social/ci.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class CI(Optimizer): """ - def __init__(self, algorithm='CI', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> CI.') - # Override its parent class with the receiving hyperparams - super(CI, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(CI, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/social/isa.py b/opytimizer/optimizers/social/isa.py index 745bb938..a5b82720 100644 --- a/opytimizer/optimizers/social/isa.py +++ b/opytimizer/optimizers/social/isa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -29,21 +29,20 @@ class ISA(Optimizer): """ - def __init__(self, algorithm='ISA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> ISA.') - # Override its parent class with the receiving hyperparams - super(ISA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(ISA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/social/mvpa.py b/opytimizer/optimizers/social/mvpa.py index 7b9f4a38..65cf2cff 100644 --- a/opytimizer/optimizers/social/mvpa.py +++ b/opytimizer/optimizers/social/mvpa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class MVPA(Optimizer): """ - def __init__(self, algorithm='MVPA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> MVPA.') - # Override its parent class with the receiving hyperparams - super(MVPA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(MVPA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/social/qsa.py b/opytimizer/optimizers/social/qsa.py index 90ddcf88..4dd83cf5 100644 --- a/opytimizer/optimizers/social/qsa.py +++ b/opytimizer/optimizers/social/qsa.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c -import opytimizer.utils.history as h +import opytimizer.utils.constant as c import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,22 +26,21 @@ class QSA(Optimizer): """ - def __init__(self, algorithm='QSA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> QSA.') - # Override its parent class with the receiving hyperparams - super(QSA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(QSA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -101,7 +98,7 @@ def _business_one(self, agents, function, beta): # Calculates the number of agents in each queue q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit) - # Represents the update patterns by Eq. 4 and Eq. 5 + # Represents the update patterns by eq. 4 and eq. 5 case = None # Iterates through all agents @@ -150,10 +147,11 @@ def _business_one(self, agents, function, beta): # Generates an Erlang number e = r.generate_gamma_random_number(1, 0.5, 1) - # Calculates the fluctuation (Eq. 6) - F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + e * (A.position - a.position) + # Calculates the fluctuation (eq. 6) + F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \ + e * (A.position - a.position) - # Updates the temporary agent's position (Eq. 4) + # Updates the temporary agent's position (eq. 4) a.position = A.position + F_1 # Evaluates the agent @@ -175,10 +173,10 @@ def _business_one(self, agents, function, beta): # If case is defined as two else: - # Calculates the fluctuation (Eq. 7) + # Calculates the fluctuation (eq. 7) F_2 = beta * alpha * (E * np.fabs(A.position - a.position)) - # Updates the temporary agent's position (Eq. 5) + # Updates the temporary agent's position (eq. 5) a.position += F_2 # Evaluates the agent @@ -258,18 +256,18 @@ def _business_two(self, agents, function): # If random number is smaller than confusion degree if r2 < cv: - # Calculates the fluctuation (Eq. 14) + # Calculates the fluctuation (eq. 14) F_1 = e * (A_1.position - A_2.position) - # Update agent's position (Eq. 12) + # Update agent's position (eq. 12) a.position += F_1 # If random number is bigger than confusion degree else: - # Calculates the fluctuation (Eq. 15) + # Calculates the fluctuation (eq. 15) F_2 = e * (A.position - A_1.position) - # Update agent's position (Eq. 13) + # Update agent's position (eq. 13) a.position += F_2 # Evaluates the agent @@ -314,7 +312,7 @@ def _business_three(self, agents, function): # Generates an Erlang number e = r.generate_gamma_random_number(1, 0.5, 1) - # Updates temporary agent's position (Eq. 17) + # Updates temporary agent's position (eq. 17) a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j]) # Evaluates the agent @@ -326,11 +324,11 @@ def _business_three(self, agents, function): agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - def _update(self, agents, function, iteration, n_iterations): - """Method that wraps the Queue Search Algorithm over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Queue Search Algorithm over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. @@ -341,57 +339,10 @@ def _update(self, agents, function, iteration, n_iterations): beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations)) # Performs the first business phase - self._business_one(agents, function, beta) + self._business_one(space.agents, function, beta) # Performs the second business phase - self._business_two(agents, function) + self._business_two(space.agents, function) # Performs the third business phase - self._business_three(agents, function) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, t, space.n_iterations) - - # Checking if agents meets the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + self._business_three(space.agents, function) diff --git a/opytimizer/optimizers/social/ssd.py b/opytimizer/optimizers/social/ssd.py index e0946a80..b9e2f1cc 100644 --- a/opytimizer/optimizers/social/ssd.py +++ b/opytimizer/optimizers/social/ssd.py @@ -4,12 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.decorator as d import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,19 +26,18 @@ class SSD(Optimizer): """ - def __init__(self, algorithm='SSD', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SSD.') - # Override its parent class with the receiving hyperparams - super(SSD, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SSD, self).__init__() # Exploration parameter self.c = 2.0 @@ -49,8 +45,8 @@ def __init__(self, algorithm='SSD', hyperparams=None): # Decay rate self.decay = 0.99 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -87,6 +83,48 @@ def decay(self, decay): raise e.ValueError('`decay` should be between 0 and 1') self._decay = decay + @property + def local_position(self): + """np.array: Array of local positions. + + """ + + return self._local_position + + @local_position.setter + def local_position(self, local_position): + if not isinstance(local_position, np.ndarray): + raise e.TypeError('`local_position` should be a numpy array') + + self._local_position = local_position + + @property + def velocity(self): + """np.array: Array of velocities. + + """ + + return self._velocity + + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') + + self._velocity = velocity + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of local positions and velocities + self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.velocity = r.generate_uniform_random_number(size=(space.n_agents, space.n_variables, space.n_dimensions)) + def _mean_global_solution(self, alpha, beta, gamma): """Calculates the mean global solution (eq. 9). @@ -105,12 +143,12 @@ def _mean_global_solution(self, alpha, beta, gamma): return mean - def _update_position(self, position, velocity): + def _update_position(self, position, index): """Updates a particle position (eq. 10). Args: position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. + index (int): Index of current agent. Returns: A new position. @@ -118,92 +156,53 @@ def _update_position(self, position, velocity): """ # Calculates new position - new_position = position + velocity + new_position = position + self.velocity[index] return new_position - def _update_velocity(self, position, mean, local_position): + def _update_velocity(self, position, mean, index): """Updates a particle velocity (eq. 11). Args: position (np.array): Agent's current position. mean (np.array): Mean global best position. - local_position (np.array): Agent's local best position. + index (int): Index of current agent. Returns: A new velocity. """ - # Generating random numbers + # Generates random numbers r1 = r.generate_uniform_random_number() r2 = r.generate_uniform_random_number() # If random number is smaller than or equal to 0.5 if r2 <= 0.5: # Updates its velocity based on sine wave - new_velocity = self.c * np.sin(r1) * (local_position - position) + np.sin(r1) * (mean - position) + new_velocity = self.c * np.sin(r1) * (self.local_position[index] - position) \ + + np.sin(r1) * (mean - position) # If random number is bigger than 0.5 else: # Updates its velocity based on cosine wave - new_velocity = self.c * np.cos(r1) * (local_position - position) + np.cos(r1) * (mean - position) + new_velocity = self.c * np.cos(r1) * (self.local_position[index] - position) \ + + np.cos(r1) * (mean - position) return new_velocity - def _update(self, agents, function, local_position, velocity): - """Method that wraps mean global solution, position and velocity updates over all agents and variables. - - Args: - agents (list): List of agents. - function (Function): A Function object that will be used as the objective function. - local_position (np.array): Array of local best posisitons. - velocity (np.array): Array of current velocities. - - """ - - # Iterate through all agents - for i, agent in enumerate(agents): - # Calculates the new fitness - fit = function(agent.position) - - # If new fitness is better than agent's fitness - if fit < agent.fit: - # Updates its current fitness to the newer one - agent.fit = fit - - # Also updates the local best position to current agent's position - local_position[i] = copy.deepcopy(agent.position) - - # Sorting agents - agents.sort(key=lambda x: x.fit) - - # Calculates the mean global solution - mean = self._mean_global_solution(agents[0].position, agents[1].position, agents[2].position) - - # Updates current agent positions - agent.position = self._update_position(agent.position, velocity[i]) - - # Checking agent limits - agent.clip_limits() - - # Updates current agent velocities - velocity[i] = self._update_velocity(agent.position, mean, local_position[i]) - - @d.pre_evaluate - def _evaluate(self, space, function, local_position): + def evaluate(self, space, function): """Evaluates the search space according to the objective function. Args: space (Space): A Space object that will be evaluated. function (Function): A Function object that will be used as the objective function. - local_position (np.array): Array of local best posisitons. """ - # Iterate through all agents + # Iterates through all agents for i, agent in enumerate(space.agents): - # Calculate the fitness value of current agent + # Calculates the fitness value of current agent fit = function(agent.position) # If fitness is better than agent's best fit @@ -212,66 +211,51 @@ def _evaluate(self, space, function, local_position): agent.fit = fit # Also updates the local best position to current's agent position - local_position[i] = copy.deepcopy(agent.position) + self.local_position[i] = copy.deepcopy(agent.position) # If agent's fitness is better than global fitness if agent.fit < space.best_agent.fit: # Makes a deep copy of agent's local best position and fitness to the best agent - space.best_agent.position = copy.deepcopy(local_position[i]) + space.best_agent.position = copy.deepcopy(self.local_position[i]) space.best_agent.fit = copy.deepcopy(agent.fit) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, function): + """Wraps Social Ski Driver over all agents and variables. Args: - space (Space): A Space object that will be evaluated. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. """ - # Instanciating array of local positions - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # And also an array of velocities - velocity = r.generate_uniform_random_number(size=(space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Calculates the new fitness + fit = function(agent.position) - # Updating agents - self._update(space.agents, function, local_position, velocity) + # If new fitness is better than agent's fitness + if fit < agent.fit: + # Updates its current fitness to the newer one + agent.fit = fit - # Checking if agents meet the bounds limits - space.clip_limits() + # Also updates the local best position to current agent's position + self.local_position[i] = copy.deepcopy(agent.position) - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) - # Reducing exploration parameter - self.c *= self.decay + # Calculates the mean global solution + mean = self._mean_global_solution(space.agents[0].position, space.agents[1].position, + space.agents[2].position) - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, local=local_position, best_agent=space.best_agent) + # Updates current agent positions + agent.position = self._update_position(agent.position, i) - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Checks agent limits + agent.clip_by_bound() - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates current agent velocities + self.velocity[i] = self._update_velocity(agent.position, mean, i) - return history + # Reduces exploration parameter + self.c *= self.decay diff --git a/opytimizer/optimizers/swarm/__init__.py b/opytimizer/optimizers/swarm/__init__.py index e214c5e2..054cda4f 100644 --- a/opytimizer/optimizers/swarm/__init__.py +++ b/opytimizer/optimizers/swarm/__init__.py @@ -1,3 +1,33 @@ """An evolutionary package for all common opytimizer modules. It contains implementations of swarm-based optimizers. """ + +from opytimizer.optimizers.swarm.abc import ABC +from opytimizer.optimizers.swarm.abo import ABO +from opytimizer.optimizers.swarm.af import AF +from opytimizer.optimizers.swarm.ba import BA +from opytimizer.optimizers.swarm.bfo import BFO +from opytimizer.optimizers.swarm.boa import BOA +from opytimizer.optimizers.swarm.bwo import BWO +from opytimizer.optimizers.swarm.cs import CS +from opytimizer.optimizers.swarm.csa import CSA +from opytimizer.optimizers.swarm.eho import EHO +from opytimizer.optimizers.swarm.fa import FA +from opytimizer.optimizers.swarm.ffoa import FFOA +from opytimizer.optimizers.swarm.fpa import FPA +from opytimizer.optimizers.swarm.fso import FSO +from opytimizer.optimizers.swarm.goa import GOA +from opytimizer.optimizers.swarm.js import JS, NBJS +from opytimizer.optimizers.swarm.kh import KH +from opytimizer.optimizers.swarm.mfo import MFO +from opytimizer.optimizers.swarm.mrfo import MRFO +from opytimizer.optimizers.swarm.pio import PIO +from opytimizer.optimizers.swarm.pso import AIWPSO, PSO, RPSO, SAVPSO, VPSO +from opytimizer.optimizers.swarm.sbo import SBO +from opytimizer.optimizers.swarm.sca import SCA +from opytimizer.optimizers.swarm.sfo import SFO +from opytimizer.optimizers.swarm.sos import SOS +from opytimizer.optimizers.swarm.ssa import SSA +from opytimizer.optimizers.swarm.sso import SSO +from opytimizer.optimizers.swarm.stoa import STOA +from opytimizer.optimizers.swarm.woa import WOA diff --git a/opytimizer/optimizers/swarm/abc.py b/opytimizer/optimizers/swarm/abc.py index e06ed5e6..ef96d59d 100644 --- a/opytimizer/optimizers/swarm/abc.py +++ b/opytimizer/optimizers/swarm/abc.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,25 +27,24 @@ class ABC(Optimizer): """ - def __init__(self, algorithm='ABC', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> ABC.') - # Override its parent class with the receiving hyperparams - super(ABC, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(ABC, self).__init__() # Number of trial limits self.n_trials = 10 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -68,17 +65,40 @@ def n_trials(self, n_trials): self._n_trials = n_trials - def _evaluate_location(self, agent, neighbour, function, trial): + @property + def trial(self): + """np.array: Array of trial. + + """ + + return self._trial + + @trial.setter + def trial(self, trial): + if not isinstance(trial, np.ndarray): + raise e.TypeError('`trial` should be a numpy array') + + self._trial = trial + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of trials + self.trial = np.zeros(space.n_agents) + + def _evaluate_location(self, agent, neighbour, function, index): """Evaluates a food source location and update its value if possible (eq. 2.2). Args: agent (Agent): An agent. neighbour (Agent): A neightbour agent. function (Function): A function object. - trial (int): A trial counter. - - Returns: - The number of trials for the current food source. + index (int): Index of trial. """ @@ -91,8 +111,8 @@ def _evaluate_location(self, agent, neighbour, function, trial): # Change its location according to equation 2.2 a.position = agent.position + (agent.position - neighbour.position) * r1 - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Evaluating its fitness a.fit = function(a.position) @@ -100,7 +120,7 @@ def _evaluate_location(self, agent, neighbour, function, trial): # Check if fitness is improved if a.fit < agent.fit: # If yes, reset the number of trials for this particular food source - trial = 0 + self.trial[index] = 0 # Copies the new position and fitness agent.position = copy.deepcopy(a.position) @@ -109,42 +129,38 @@ def _evaluate_location(self, agent, neighbour, function, trial): # If not else: # We increse the trials counter - trial += 1 - - return trial + self.trial[index] += 1 - def _send_employee(self, agents, function, trials): + def _send_employee(self, agents, function): """Sends employee bees onto food source to evaluate its nectar. Args: agents (list): List of agents. function (Function): A function object. - trials (np.array): Array of trials counter. """ # Iterate through all food sources for i, agent in enumerate(agents): - # Gathering a random source to be used + # Gathers a random source to be used source = r.generate_integer_random_number(0, len(agents)) # Measuring food source location - trials[i] = self._evaluate_location(agent, agents[source], function, trials[i]) + self._evaluate_location(agent, agents[source], function, i) - def _send_onlooker(self, agents, function, trials): + def _send_onlooker(self, agents, function): """Sends onlooker bees to select new food sources (eq. 2.1). Args: agents (list): List of agents. function (Function): A function object. - trials (np.array): Array of trials counter. """ - # Calculating the fitness somatory + # Calculates the fitness somatory total = sum(agent.fit for agent in agents) - # Defining food sources' counter + # Defines food sources' counter k = 0 # While counter is less than the amount of food sources @@ -163,28 +179,27 @@ def _send_onlooker(self, agents, function, trials): k += 1 # Gathers a random source to be used - source = int(r.generate_uniform_random_number(0, len(agents))) + source = r.generate_integer_random_number(0, len(agents)) # Evaluate its location - trials[i] = self._evaluate_location(agent, agents[source], function, trials[i]) + self._evaluate_location(agent, agents[source], function, i) - def _send_scout(self, agents, function, trials): + def _send_scout(self, agents, function): """Sends scout bees to scout for new possible food sources. Args: agents (list): List of agents. function (Function): A function object. - trials (np.array): Array of trials counter. """ - # Calculating the maximum trial counter value and index - max_trial, max_index = np.max(trials), np.argmax(trials) + # Calculates the maximum trial counter value and index + max_trial, max_index = np.max(self.trial), np.argmax(self.trial) # If maximum trial is bigger than number of possible trials if max_trial > self.n_trials: # Resets the trial counter - trials[max_index] = 0 + self.trial[max_index] = 0 # Copies the current agent a = copy.deepcopy(agents[max_index]) @@ -192,8 +207,8 @@ def _send_scout(self, agents, function, trials): # Updates its position with a random shakeness a.position += r.generate_uniform_random_number(-1, 1) - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Recalculates its fitness a.fit = function(a.position) @@ -203,71 +218,20 @@ def _send_scout(self, agents, function, trials): # We copy the temporary agent to the current one agents[max_index] = copy.deepcopy(a) - def _update(self, agents, function, trials): - """Method that wraps the update pipeline over all agents and variables. - - Args: - agents (list): List of agents. - function (Function): A function object. - trials (np.array): Array of trials counter. - - """ - - # Sending employee bees step - self._send_employee(agents, function, trials) - - # Sending onlooker bees step - self._send_onlooker(agents, function, trials) - - # Sending scout bees step - self._send_scout(agents, function, trials) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, function): + """Wraps Artificial Bee Colony over all agents and variables. Args: - space (Space): A Space object that will be evaluated. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. """ - # Instanciating array of trials counter - trials = np.zeros(space.n_agents) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, trials) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Sends employee bees step + self._send_employee(space.agents, function) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Sends onlooker bees step + self._send_onlooker(space.agents, function) - return history + # Sends scout bees step + self._send_scout(space.agents, function) diff --git a/opytimizer/optimizers/swarm/abo.py b/opytimizer/optimizers/swarm/abo.py index 0dbfc42d..d395d8c8 100644 --- a/opytimizer/optimizers/swarm/abo.py +++ b/opytimizer/optimizers/swarm/abo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class ABO(Optimizer): """ - def __init__(self, algorithm='ABO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> ABO.') - # Override its parent class with the receiving hyperparams - super(ABO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(ABO, self).__init__() # Ratio of sunspot butterflies self.sunspot_ratio = 0.9 @@ -47,8 +44,8 @@ def __init__(self, algorithm='ABO', hyperparams=None): # Free flight constant self.a = 2.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -113,7 +110,7 @@ def _flight_mode(self, agent, neighbour, function): temp.position[j] = agent.position[j] + (agent.position[j] - neighbour.position[j]) * r1 # Clips its limits - temp.clip_limits() + temp.clip_by_bound() # Re-calculates its fitness temp.fit = function(temp.position) @@ -126,49 +123,49 @@ def _flight_mode(self, agent, neighbour, function): # Return current agent as well as a false variable return agent.position, agent.fit, False - def _update(self, agents, function, iteration, n_iterations): - """Method that wraps Artificial Butterfly Optimization over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Artificial Butterfly Optimization over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Calculates the number of sunspot butterflies - n_sunspots = int(self.sunspot_ratio * len(agents)) + n_sunspots = int(self.sunspot_ratio * len(space.agents)) # Iterates through all sunspot butterflies - for agent in agents[:n_sunspots]: + for agent in space.agents[:n_sunspots]: # Generates the index for a random sunspot butterfly - k = r.generate_integer_random_number(0, len(agents)) + k = r.generate_integer_random_number(0, len(space.agents)) # Performs a flight mode using sunspot butterflies (eq. 1) - agent.position, agent.fit, _ = self._flight_mode(agent, agents[k], function) + agent.position, agent.fit, _ = self._flight_mode(agent, space.agents[k], function) # Iterates through all canopy butterflies - for agent in agents[n_sunspots:]: + for agent in space.agents[n_sunspots:]: # Generates the index for a random canopy butterfly - k = r.generate_integer_random_number(0, len(agents) - n_sunspots) + k = r.generate_integer_random_number(0, len(space.agents) - n_sunspots) # Performs a flight mode using canopy butterflies (eq. 1) - agent.position, agent.fit, is_better = self._flight_mode(agent, agents[k], function) + agent.position, agent.fit, is_better = self._flight_mode(agent, space.agents[k], function) # If there was not fitness replacement if not is_better: # Generates the index for a random butterfly - k = r.generate_integer_random_number(0, len(agents)) + k = r.generate_integer_random_number(0, len(space.agents)) # Generates random uniform number r1 = r.generate_uniform_random_number() # Calculates `D` (eq. 4) - D = np.fabs(2 * r1 * agents[k].position - agent.position) + D = np.fabs(2 * r1 * space.agents[k].position - agent.position) # Generates another random uniform number r2 = r.generate_uniform_random_number() @@ -177,57 +174,10 @@ def _update(self, agents, function, iteration, n_iterations): a = (self.a - self.a * (iteration / n_iterations)) # Updates the agent's position (eq. 3) - agent.position = agents[k].position - 2 * a * r2 - a * D + agent.position = space.agents[k].position - 2 * a * r2 - a * D # Clips its limits - agent.clip_limits() + agent.clip_by_bound() # Re-calculates its fitness agent.fit = function(agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/af.py b/opytimizer/optimizers/swarm/af.py index c86d6536..4d9df73d 100644 --- a/opytimizer/optimizers/swarm/af.py +++ b/opytimizer/optimizers/swarm/af.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class AF(Optimizer): """ - def __init__(self, algorithm='AF', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> AF.') - # Override its parent class with the receiving hyperparams - super(AF, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(AF, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/swarm/ba.py b/opytimizer/optimizers/swarm/ba.py index 69ff407e..8de02f56 100644 --- a/opytimizer/optimizers/swarm/ba.py +++ b/opytimizer/optimizers/swarm/ba.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as rnd import opytimizer.utils.exception as ex -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class BA(Optimizer): """ - def __init__(self, algorithm='BA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BA.') - # Override its parent class with the receiving hyperparams - super(BA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(BA, self).__init__() # Minimum frequency range self.f_min = 0 @@ -53,8 +50,8 @@ def __init__(self, algorithm='BA', hyperparams=None): # Pulse rate self.r = 0.5 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -128,170 +125,129 @@ def r(self, r): self._r = r - def _update_frequency(self, min_frequency, max_frequency): - """Updates an agent frequency (eq. 2). - - Args: - min_frequency (float): Minimum frequency range. - max_frequency (float): Maximum frequency range. - - Returns: - A new frequency. + @property + def frequency(self): + """np.array: Array of frequencies. """ - # Generating beta random number - beta = rnd.generate_uniform_random_number() - - # Calculating new frequency - # Note that we have to apply (min - max) instead of (max - min) or it will not converge - new_frequency = min_frequency + (min_frequency - max_frequency) * beta + return self._frequency - return new_frequency + @frequency.setter + def frequency(self, frequency): + if not isinstance(frequency, np.ndarray): + raise ex.TypeError('`frequency` should be a numpy array') - def _update_velocity(self, position, best_position, frequency, velocity): - """Updates an agent velocity (eq. 3). + self._frequency = frequency - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - frequency (float): Agent's frequency. - velocity (np.array): Agent's current velocity. - - Returns: - A new velocity. + @property + def velocity(self): + """np.array: Array of velocities. """ - # Calculates new velocity - new_velocity = velocity + (position - best_position) * frequency + return self._velocity - return new_velocity + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise ex.TypeError('`velocity` should be a numpy array') - def _update_position(self, position, velocity): - """Updates an agent position (eq. 4). - - Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. + self._velocity = velocity - Returns: - A new position. + @property + def loudness(self): + """np.array: Array of loudnesses. """ - # Calculates new position - new_position = position + velocity + return self._loudness - return new_position + @loudness.setter + def loudness(self, loudness): + if not isinstance(loudness, np.ndarray): + raise ex.TypeError('`loudness` should be a numpy array') - def _update(self, agents, best_agent, function, iteration, frequency, velocity, loudness, pulse_rate): - """Method that wraps Bat Algorithm over all agents and variables. + self._loudness = loudness - Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - function (Function): A function object. - iteration (int): Current iteration value. - frequency (np.array): Array of frequencies. - velocity (np.array): Array of current velocities. - loudness (np.array): Array of loudnesses. - pulse_rate (np.array): Array of pulse rates. + @property + def pulse_rate(self): + """np.array: Array of pulse rates. """ - # Declaring alpha constant - alpha = 0.9 + return self._pulse_rate - # Iterate through all agents - for i, agent in enumerate(agents): - # Updating frequency - frequency[i] = self._update_frequency(self.f_min, self.f_max) + @pulse_rate.setter + def pulse_rate(self, pulse_rate): + if not isinstance(pulse_rate, np.ndarray): + raise ex.TypeError('`pulse_rate` should be a numpy array') - # Updating velocity - velocity[i] = self._update_velocity(agent.position, best_agent.position, frequency[i], velocity[i]) + self._pulse_rate = pulse_rate - # Updating agent's position - agent.position = self._update_position(agent.position, velocity[i]) + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. - # Generating a random probability - p = rnd.generate_uniform_random_number() - - # Generating a random number - e = rnd.generate_gaussian_random_number() - - # Check if probability is bigger than current pulse rate - if p > pulse_rate[i]: - # Performing a local random walk (eq. 5) - # We apply 0.001 to limit the step size - agent.position = best_agent.position + 0.001 * e * np.mean(loudness) - - # Checks agent limits - agent.clip_limits() - - # Evaluates agent - agent.fit = function(agent.position) - - # Checks if probability is smaller than loudness and if fit is better - if p < loudness[i] and agent.fit < best_agent.fit: - # Copying the new solution to space's best agent - best_agent = copy.deepcopy(agent) + Args: + space (Space): A Space object containing meta-information. - # Increasing pulse rate (eq. 6) - pulse_rate[i] = self.r * (1 - np.exp(-alpha * iteration)) + """ - # Decreasing loudness (eq. 6) - loudness[i] = self.A * alpha + # Arrays of frequencies, velocities, loudnesses and pulse rates + self.frequency = rnd.generate_uniform_random_number(self.f_min, self.f_max, space.n_agents) + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.loudness = rnd.generate_uniform_random_number(0, self.A, space.n_agents) + self.pulse_rate = rnd.generate_uniform_random_number(0, self.r, space.n_agents) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, function, iteration): + """Wraps Bat Algorithm over all agents and variables. Args: - space (Space): A Space object that will be evaluated. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + iteration (int): Current iteration. """ - # Instanciating array of frequencies, velocities, loudness and pulse rates - frequency = rnd.generate_uniform_random_number(self.f_min, self.f_max, space.n_agents) - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - loudness = rnd.generate_uniform_random_number(0, self.A, space.n_agents) - pulse_rate = rnd.generate_uniform_random_number(0, self.r, space.n_agents) + # Declares alpha constant + alpha = 0.9 - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Updates frequency (eq. 2) + # Note that we have to apply (min - max) instead of (max - min) or it will not converge + beta = rnd.generate_uniform_random_number() + self.frequency[i] = self.f_min + (self.f_min - self.f_max) * beta - # We will define a History object for further dumping - history = h.History(store_best_only) + # Updates velocity (eq. 3) + self.velocity[i] += (agent.position - space.best_agent.position) * self.frequency[i] - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') + # Updates agent's position (eq. 4) + agent.position += self.velocity[i] - # Updating agents - self._update(space.agents, space.best_agent, function, t, frequency, velocity, loudness, pulse_rate) + # Generates random uniform and gaussian numbers + p = rnd.generate_uniform_random_number() + e = rnd.generate_gaussian_random_number() - # Checking if agents meet the bounds limits - space.clip_limits() + # Checks if probability is bigger than current pulse rate + if p > self.pulse_rate[i]: + # Performs a local random walk (eq. 5) + # We apply 0.001 to limit the step size + agent.position = space.best_agent.position + 0.001 * e * np.mean(self.loudness) - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) + # Checks agent limits + agent.clip_by_bound() - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) + # Evaluates agent + agent.fit = function(agent.position) - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Checks if probability is smaller than loudness and if fit is better + if p < self.loudness[i] and agent.fit < space.best_agent.fit: + # Copies the new solution to space's best agent + space.best_agent = copy.deepcopy(agent) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Increasing pulse rate (eq. 6 - left) + self.pulse_rate[i] = self.r * (1 - np.exp(-alpha * iteration)) - return history + # Decreasing loudness (eq. 6 - right) + self.loudness[i] = self.A * alpha diff --git a/opytimizer/optimizers/swarm/bfo.py b/opytimizer/optimizers/swarm/bfo.py index 68a3f72f..14c5b86e 100644 --- a/opytimizer/optimizers/swarm/bfo.py +++ b/opytimizer/optimizers/swarm/bfo.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class BFO(Optimizer): """ - def __init__(self, algorithm='BFO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BFO.') - # Override its parent class with the receiving hyperparams - super(BFO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(BFO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/swarm/boa.py b/opytimizer/optimizers/swarm/boa.py index ca659830..43d867f6 100644 --- a/opytimizer/optimizers/swarm/boa.py +++ b/opytimizer/optimizers/swarm/boa.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,19 +23,18 @@ class BOA(Optimizer): """ - def __init__(self, algorithm='BOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BOA.') - # Override its parent class with the receiving hyperparams - super(BOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(BOA, self).__init__() # Sensor modality self.c = 0.01 @@ -48,8 +45,8 @@ def __init__(self, algorithm='BOA', hyperparams=None): # Switch probability self.p = 0.8 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -104,6 +101,32 @@ def p(self, p): self._p = p + @property + def fragrance(self): + """np.array: Array of fragrances. + + """ + + return self._fragrance + + @fragrance.setter + def fragrance(self, fragrance): + if not isinstance(fragrance, np.ndarray): + raise e.TypeError('`fragrance` should be a numpy array') + + self._fragrance = fragrance + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of fragances + self.fragrance = np.zeros(space.n_agents) + def _best_movement(self, agent_position, best_position, fragrance, random): """Updates the agent's position towards the best butterfly (eq. 2). @@ -143,88 +166,37 @@ def _local_movement(self, agent_position, j_position, k_position, fragrance, ran return new_position - def _update(self, agents, best_agent, fragrance): - """Method that wraps global and local pollination updates over all agents and variables. + def update(self, space): + """Wraps Butterfly Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - fragrance (np.array): Array of fragrances. + space (Space): Space containing agents and update-related information. """ # Iterates through all agents - for i, agent in enumerate(agents): + for i, agent in enumerate(space.agents): # Calculates fragrance for current agent (eq. 1) - fragrance[i] = self.c * agent.fit ** self.a + self.fragrance[i] = self.c * agent.fit ** self.a # Iterates through all agents - for i, agent in enumerate(agents): + for i, agent in enumerate(space.agents): # Generates a uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than switch probability if r1 < self.p: # Moves current agent towards the best one (eq. 2) - agent.position = self._best_movement( - agent.position, best_agent.position, fragrance[i], r1) + agent.position = self._best_movement(agent.position, space.best_agent.position, + self.fragrance[i], r1) # If random number is bigger than switch probability else: # Generates `j` and `k` indexes - j = r.generate_integer_random_number(0, len(agents)) - k = r.generate_integer_random_number(0, len(agents), exclude_value=j) + j = r.generate_integer_random_number(0, len(space.agents)) + k = r.generate_integer_random_number(0, len(space.agents), exclude_value=j) # Moves current agent using a local movement (eq. 3) - agent.position = self._local_movement( - agent.position, agents[j].position, agents[k].position, fragrance[i], r1) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instantiates an array of fragrances - fragrance = np.zeros(space.n_agents) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, fragrance) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = self._local_movement(agent.position, space.agents[j].position, + space.agents[k].position, self.fragrance[i], + r1) diff --git a/opytimizer/optimizers/swarm/bwo.py b/opytimizer/optimizers/swarm/bwo.py index c9940eec..e39647fd 100644 --- a/opytimizer/optimizers/swarm/bwo.py +++ b/opytimizer/optimizers/swarm/bwo.py @@ -3,11 +3,8 @@ import copy -from tqdm import tqdm - import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +25,18 @@ class BWO(Optimizer): """ - def __init__(self, algorithm='BWO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> BWO.') - # Override its parent class with the receiving hyperparams - super(BWO, self).__init__(algorithm=algorithm) + # Overrides its parent class with the receiving params + super(BWO, self).__init__() # Procreating rate self.pp = 0.6 @@ -51,8 +47,8 @@ def __init__(self, algorithm='BWO', hyperparams=None): # Mutation rate self.pm = 0.4 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -153,29 +149,31 @@ def _mutation(self, alpha): return alpha - def _update(self, agents, n_variables, function): - """Method that wraps procreation, cannibalism and mutation over all agents and variables. + def update(self, space, function): + """Wraps Black Widow Optimization over all agents and variables. Args: - agents (list): List of agents. - n_variables (int): Number of decision variables. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. """ - # Retrieving the number of agents - n_agents = len(agents) + # Retrieves the number of agents + n_agents = len(space.agents) + n_variables = space.n_variables # Calculates the number agents that reproduces, are cannibals and mutates - n_reproduct, n_cannibals, n_mutate = int(n_agents * self.pp), int(n_agents * self.cr), int(n_agents * self.pm) + n_reproduct = int(n_agents * self.pp) + n_cannibals = int(n_agents * self.cr) + n_mutate = int(n_agents * self.pm) - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Selecting the best solutions and saving in auxiliary population - agents1 = copy.deepcopy(agents[:n_reproduct]) + agents1 = copy.deepcopy(space.agents[:n_reproduct]) - # Creating an empty auxiliary population + # Creates an empty auxiliary population agents2 = [] # For every possible reproducting agent @@ -184,9 +182,9 @@ def _update(self, agents, n_variables, function): idx = r.generate_uniform_random_number(0, n_agents, size=2) # Making a deepcopy of father and mother - father, mother = copy.deepcopy(agents[int(idx[0])]), copy.deepcopy(agents[int(idx[1])]) + father, mother = copy.deepcopy(space.agents[int(idx[0])]), copy.deepcopy(space.agents[int(idx[1])]) - # Creating an empty list of auxiliary agents + # Creates an empty list of auxiliary agents new_agents = [] # For every possible pair of variables @@ -194,9 +192,9 @@ def _update(self, agents, n_variables, function): # Procreates parents into two new offsprings y1, y2 = self._procreating(father, mother) - # Checking `y1` and `y2` limits - y1.clip_limits() - y2.clip_limits() + # Checks `y1` and `y2` limits + y1.clip_by_bound() + y2.clip_by_bound() # Calculates new fitness for `y1` and `y2` y1.fit = function(y1.position) @@ -205,7 +203,7 @@ def _update(self, agents, n_variables, function): # Appends the mother and mutated agents to the new population new_agents.extend([mother, y1, y2]) - # Sorting new population + # Sorts new population new_agents.sort(key=lambda x: x.fit) # Extending auxiliary population with the number of cannibals (s. 3.3) @@ -219,8 +217,8 @@ def _update(self, agents, n_variables, function): # Performs the mutation alpha = self._mutation(agents1[idx]) - # Checking `alpha` limits - alpha.clip_limits() + # Checks `alpha` limits + alpha.clip_by_bound() # Calculates new fitness for `alpha` alpha.fit = function(alpha.position) @@ -228,57 +226,7 @@ def _update(self, agents, n_variables, function): # Appends the mutated agent to the auxiliary population agents2.extend([alpha]) - # Joins both populations - agents += agents2 - - # Sorting agents - agents.sort(key=lambda x: x.fit) - - return agents[:n_agents] - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - space.agents = self._update(space.agents, space.n_variables, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Joins both populations, sorts them and retrieves `n_agents` + space.agents += agents2 + space.agents.sort(key=lambda x: x.fit) + space.agents = space.agents[:n_agents] diff --git a/opytimizer/optimizers/swarm/cs.py b/opytimizer/optimizers/swarm/cs.py index db52cada..54109615 100644 --- a/opytimizer/optimizers/swarm/cs.py +++ b/opytimizer/optimizers/swarm/cs.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as log from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class CS(Optimizer): """ - def __init__(self, algorithm='CS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> CS.') - # Override its parent class with the receiving hyperparams - super(CS, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(CS, self).__init__() # Step size self.alpha = 1 @@ -51,8 +48,8 @@ def __init__(self, algorithm='CS', hyperparams=None): # Probability of replacing worst nests self.p = 0.2 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -124,13 +121,13 @@ def _generate_new_nests(self, agents, best_agent): # Then, we iterate for every agent for new_agent in new_agents: - # Calculating the Lévy distribution + # Calculates the Lévy distribution step = d.generate_levy_distribution(self.beta, new_agent.n_variables) # Expanding its dimension to perform entrywise multiplication step = np.expand_dims(step, axis=1) - # Calculating the difference vector between local and best positions + # Calculates the difference vector between local and best positions # Alpha controls the intensity of the step size step_size = self.alpha * step * (new_agent.position - best_agent.position) @@ -164,7 +161,7 @@ def _generate_abandoned_nests(self, agents, prob): # It will be used to replace or not a certain nest b = d.generate_bernoulli_distribution(1 - prob, len(agents)) - # Iterating through every new agent + # Iterates through every new agent for j, new_agent in enumerate(new_agents): # Generates a uniform random number r1 = r.generate_uniform_random_number() @@ -173,7 +170,7 @@ def _generate_abandoned_nests(self, agents, prob): k = r.generate_integer_random_number(0, len(agents)-1) l = r.generate_integer_random_number(0, len(agents)-1, exclude_value=k) - # Calculating the random walk between these two nests + # Calculates the random walk between these two nests step_size = r1 * (agents[k].position - agents[l].position) # Finally, we replace the old nest @@ -192,10 +189,10 @@ def _evaluate_nests(self, agents, new_agents, function): """ - # Iterating through each agent and new agent + # Iterates through each agent and new agent for agent, new_agent in zip(agents, new_agents): - # Check agent limits - new_agent.clip_limits() + # Checks agent's limits + new_agent.clip_by_bound() # Calculates the new agent fitness new_agent.fit = function(new_agent.position) @@ -206,71 +203,23 @@ def _evaluate_nests(self, agents, new_agents, function): agent.position = copy.deepcopy(new_agent.position) agent.fit = copy.deepcopy(new_agent.fit) - def _update(self, agents, best_agent, function): - """Method that wraps Cuckoo Search algorithm over all agents and variables. + def update(self, space, function): + """Wraps Cuckoo Search over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. """ # Generate new nests - new_agents = self._generate_new_nests(agents, best_agent) + new_agents = self._generate_new_nests(space.agents, space.best_agent) # Evaluate new generated nests - self._evaluate_nests(agents, new_agents, function) + self._evaluate_nests(space.agents, new_agents, function) # Generate new nests to be replaced - new_agents = self._generate_abandoned_nests(agents, self.p) + new_agents = self._generate_abandoned_nests(space.agents, self.p) # Evaluate new generated nests for further replacement - self._evaluate_nests(agents, new_agents, function) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + self._evaluate_nests(space.agents, new_agents, function) diff --git a/opytimizer/optimizers/swarm/csa.py b/opytimizer/optimizers/swarm/csa.py index 436d8582..f4b0400b 100644 --- a/opytimizer/optimizers/swarm/csa.py +++ b/opytimizer/optimizers/swarm/csa.py @@ -4,12 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.decorator as d import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,19 +26,18 @@ class CSA(Optimizer): """ - def __init__(self, algorithm='CSA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> CSA.') - # Override its parent class with the receiving hyperparams - super(CSA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(CSA, self).__init__() # Flight length self.fl = 2.0 @@ -49,8 +45,8 @@ def __init__(self, algorithm='CSA', hyperparams=None): # Awareness probability self.AP = 0.1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -86,14 +82,38 @@ def AP(self, AP): self._AP = AP - @d.pre_evaluate - def _evaluate(self, space, function, memory): + @property + def memory(self): + """np.array: Array of memories. + + """ + + return self._memory + + @memory.setter + def memory(self, memory): + if not isinstance(memory, np.ndarray): + raise e.TypeError('`memory` should be a numpy array') + + self._memory = memory + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of memories + self.memory = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + + def evaluate(self, space, function): """Evaluates the search space according to the objective function. Args: space (Space): A Space object that will be evaluated. function (Function): A Function object that will be used as the objective function. - memory (np.array): Array of memories. """ @@ -107,91 +127,38 @@ def _evaluate(self, space, function, memory): # Updates its current fitness to the newer one agent.fit = fit - # Also updates the memory to current's agent position (Eq. 5) - memory[i] = copy.deepcopy(agent.position) + # Also updates the memory to current's agent position (eq. 5) + self.memory[i] = copy.deepcopy(agent.position) # If agent's fitness is better than global fitness if agent.fit < space.best_agent.fit: # Makes a deep copy of agent's local best position and fitness to the best agent - space.best_agent.position = copy.deepcopy(memory[i]) + space.best_agent.position = copy.deepcopy(self.memory[i]) space.best_agent.fit = copy.deepcopy(agent.fit) - def _update(self, agents, memory): - """Method that wraps the Crow Search Algorithm over all agents and variables. + def update(self, space): + """Wraps Crow Search Algorithm over all agents and variables. Args: - agents (list): List of agents. - memory (np.array): Array of memories. + space (Space): Space containing agents and update-related information. """ # Iterates through every agent - for agent in agents: + for agent in space.agents: # Generates uniform random numbers r1 = r.generate_uniform_random_number() r2 = r.generate_uniform_random_number() # Generates a random integer (e.g. selects the crow) - j = r.generate_integer_random_number(high=len(agents)) + j = r.generate_integer_random_number(high=len(space.agents)) # Checks if first random number is greater than awareness probability if r1 >= self.AP: - # Updates agent's position (Eq. 2) - agent.position += r2 * self.fl * (memory[j] - agent.position) + # Updates agent's position (eq. 2) + agent.position += r2 * self.fl * (self.memory[j] - agent.position) # If random number is smaller than probability else: - # Generate a random position - for j, (lb, ub) in enumerate(zip(agent.lb, agent.ub)): - # For each decision variable, we generate uniform random numbers - agent.position[j] = r.generate_uniform_random_number(lb, ub, size=agent.n_dimensions) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instanciates an array of memories - memory = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, memory, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, memory) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, memory, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Fills agent with new random positions + agent.fill_with_uniform() diff --git a/opytimizer/optimizers/swarm/eho.py b/opytimizer/optimizers/swarm/eho.py index 38be7ffe..2c997798 100644 --- a/opytimizer/optimizers/swarm/eho.py +++ b/opytimizer/optimizers/swarm/eho.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class EHO(Optimizer): """ - def __init__(self, algorithm='EHO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> EHO.') - # Override its parent class with the receiving hyperparams - super(EHO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(EHO, self).__init__() # Matriarch influence self.alpha = 0.5 @@ -50,8 +47,8 @@ def __init__(self, algorithm='EHO', hyperparams=None): # Maximum number of clans self.n_clans = 10 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -100,44 +97,71 @@ def n_clans(self): @n_clans.setter def n_clans(self, n_clans): if not isinstance(n_clans, int): - raise e.TypeError('`n_clans` should be integer') + raise e.TypeError('`n_clans` should be an integer') if n_clans < 1: raise e.ValueError('`n_clans` should be > 0') self._n_clans = n_clans - def _get_agents_from_clan(self, agents, index, n_ci): + @property + def n_ci(self): + """int: Number of elephants per clan. + + """ + + return self._n_ci + + @n_ci.setter + def n_ci(self, n_ci): + if not isinstance(n_ci, int): + raise e.TypeError('`n_ci` should be an integer') + if n_ci < 1: + raise e.ValueError('`n_ci` should be > 0') + + self._n_ci = n_ci + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Number of elephants per clan + self.n_ci = space.n_agents // self.n_clans + + def _get_agents_from_clan(self, agents, index): """Gets a set of agents from a specified clan. Args: agents (list): List of agents. index (int): Index of clan. - n_ci (int): Number of agents per clan. Returns: A sorted list of agents that belongs to the specified clan. + """ # Defines the starting and ending points - start, end = index * n_ci, (index + 1) * n_ci + start, end = index * self.n_ci, (index + 1) * self.n_ci return sorted(agents[start:end], key=lambda x: x.fit) - def _updating_operator(self, agents, centers, function, n_ci): + def _updating_operator(self, agents, centers, function): """Performs the separating operator. Args: agents (list): List of agents. centers (list): List of centers. function (Function): A Function object that will be used as the objective function. - n_ci (int): Number of agents per clan. """ # Iterates through every clan for i in range(self.n_clans): # Gets the agents for the specified clan - clan_agents = self._get_agents_from_clan(agents, i, n_ci) + clan_agents = self._get_agents_from_clan(agents, i) # Iterates through every agent in clan for j, agent in enumerate(clan_agents): @@ -149,16 +173,16 @@ def _updating_operator(self, agents, centers, function, n_ci): # If it is the first agent in clan if j == 0: - # Updates its position (Eq. 2) + # Updates its position (eq. 2) a.position = self.beta * centers[i] # If it is not the first (best) agent in clan else: - # Updates its position (Eq. 1) + # Updates its position (eq. 1) a.position += self.alpha * (clan_agents[0].position - a.position) * r1 # Checks the agent's limits - a.clip_limits() + a.clip_by_bound() # Evaluates the agent a.fit = function(a.position) @@ -169,35 +193,31 @@ def _updating_operator(self, agents, centers, function, n_ci): agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - def _separating_operator(self, agents, n_ci): + def _separating_operator(self, agents): """Performs the separating operator. Args: agents (list): List of agents. - n_ci (int): Number of agents per clan. """ # Iterates through every clan for i in range(self.n_clans): # Gets the agents for the specified clan - clan_agents = self._get_agents_from_clan(agents, i, n_ci) + clan_agents = self._get_agents_from_clan(agents, i) # Gathers the worst agent in clan worst = clan_agents[-1] - # Generates a new position for the worst agent in clan (Eq. 4) - for j, (lb, ub) in enumerate(zip(worst.lb, worst.ub)): - # For each decision variable, we generate uniform random numbers - worst.position[j] = r.generate_uniform_random_number(lb, ub, size=worst.n_dimensions) + # Generates a new position for the worst agent in clan (eq. 4) + worst.fill_with_uniform() - def _update(self, agents, function, n_ci): - """Method that wraps Elephant Herd Optimization over all agents and variables. + def update(self, space, function): + """Wraps Elephant Herd Optimization over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - n_ci (int): Number of agents per clan. """ @@ -207,7 +227,7 @@ def _update(self, agents, function, n_ci): # Iterates through every clan for i in range(self.n_clans): # Gets the agents for the specified clan - clan_agents = self._get_agents_from_clan(agents, i, n_ci) + clan_agents = self._get_agents_from_clan(space.agents, i) # Calculates the clan's center position clan_center = np.mean(np.array([agent.position for agent in clan_agents]), axis=0) @@ -216,63 +236,7 @@ def _update(self, agents, function, n_ci): centers.append(clan_center) # Performs the updating operator - self._updating_operator(agents, centers, function, n_ci) + self._updating_operator(space.agents, centers, function) # Performs the separating operators - self._separating_operator(agents, n_ci) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Calculates the number of elephants per clan - n_ci = space.n_agents // self.n_clans - - # If number of elephants per clan equals to zero - if n_ci == 0: - # Throws an error - raise e.ValueError( - 'Number of agents should be divisible by number of clans') - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, n_ci) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + self._separating_operator(space.agents) diff --git a/opytimizer/optimizers/swarm/fa.py b/opytimizer/optimizers/swarm/fa.py index 797b2364..b032e3c6 100644 --- a/opytimizer/optimizers/swarm/fa.py +++ b/opytimizer/optimizers/swarm/fa.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.general as g import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class FA(Optimizer): """ - def __init__(self, algorithm='FA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> FA.') - # Override its parent class with the receiving hyperparams - super(FA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(FA, self).__init__() # Randomization parameter self.alpha = 0.5 @@ -51,8 +48,8 @@ def __init__(self, algorithm='FA', hyperparams=None): # Light absorption coefficient self.gamma = 1.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -107,27 +104,27 @@ def gamma(self, gamma): self._gamma = gamma - def _update(self, agents, n_iterations): - """Method that wraps Firefly Algorithm over all agents and variables (eq. 3-9). + def update(self, space, n_iterations): + """Wraps Firefly Algorithm over all agents and variables (eq. 3-9). Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. n_iterations (int): Maximum number of iterations. """ - # Calculating current iteration delta + # Calculates current iteration delta delta = 1 - ((10e-4) / 0.9) ** (1 / n_iterations) - # Applying update to alpha parameter + # Applies update to alpha parameter self.alpha *= (1 - delta) # We copy a temporary list for iterating purposes - temp_agents = copy.deepcopy(agents) + temp_agents = copy.deepcopy(space.agents) - # Iterating through 'i' agents - for agent in agents: - # Iterating through 'j' agents + # Iterates through 'i' agents + for agent in space.agents: + # Iterates through 'j' agents for temp in temp_agents: # Distance is calculated by an euclidean distance between 'i' and 'j' (eq. 8) distance = g.euclidean_distance(agent.position, temp.position) @@ -142,50 +139,3 @@ def _update(self, agents, n_iterations): # Updates agent's position (eq. 9) agent.position = beta * (temp.position + agent.position) + self.alpha * (r1 - 0.5) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/ffoa.py b/opytimizer/optimizers/swarm/ffoa.py index 3be761f7..78c73ad4 100644 --- a/opytimizer/optimizers/swarm/ffoa.py +++ b/opytimizer/optimizers/swarm/ffoa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class FFOA(Optimizer): """ - def __init__(self, algorithm='FFOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> FFOA.') - # Override its parent class with the receiving hyperparams - super(FFOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(FFOA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/swarm/fpa.py b/opytimizer/optimizers/swarm/fpa.py index 3efce0a9..88081b5e 100644 --- a/opytimizer/optimizers/swarm/fpa.py +++ b/opytimizer/optimizers/swarm/fpa.py @@ -3,12 +3,9 @@ import copy -from tqdm import tqdm - import opytimizer.math.distribution as d import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as log from opytimizer.core.optimizer import Optimizer @@ -27,17 +24,16 @@ class FPA(Optimizer): """ - def __init__(self, algorithm='FPA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(FPA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(FPA, self).__init__() # Lévy flight control parameter self.beta = 1.5 @@ -48,8 +44,8 @@ def __init__(self, algorithm='FPA', hyperparams=None): # Probability of local pollination self.p = 0.8 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -149,95 +145,49 @@ def _local_pollination(self, agent_position, k_position, l_position, epsilon): return new_position - def _update(self, agents, best_agent, function): - """Method that wraps global and local pollination updates over all agents and variables. + def update(self, space, function): + """Wraps Flower Pollination Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. """ # Iterates through all agents - for agent in agents: + for agent in space.agents: # Creates a temporary agent a = copy.deepcopy(agent) - # Generating an uniform random number + # Generates an uniform random number r1 = r.generate_uniform_random_number() # Check if generated random number is bigger than probability if r1 > self.p: - # Update a temporary position according to global pollination - a.position = self._global_pollination(agent.position, best_agent.position) + # Updates a temporary position according to global pollination + a.position = self._global_pollination( + agent.position, space.best_agent.position) else: # Generates an uniform random number epsilon = r.generate_uniform_random_number() # Generates an index for flower `k` and flower `l` - k = r.generate_integer_random_number(0, len(agents)) - l = r.generate_integer_random_number(0, len(agents), exclude_value=k) + k = r.generate_integer_random_number(0, len(space.agents)) + l = r.generate_integer_random_number(0, len(space.agents), exclude_value=k) - # Update a temporary position according to local pollination - a.position = self._local_pollination(agent.position, agents[k].position, agents[l].position, epsilon) + # Updates a temporary position according to local pollination + a.position = self._local_pollination(agent.position, space.agents[k].position, + space.agents[l].position, epsilon) - # Check agent limits - a.clip_limits() + # Checks agent's limits + a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If new fitness is better than agent's fitness if a.fit < agent.fit: - # Copy its position and fitness to the agent + # Copies its position and fitness to the agent agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/fso.py b/opytimizer/optimizers/swarm/fso.py index 977dc1f7..3e974307 100644 --- a/opytimizer/optimizers/swarm/fso.py +++ b/opytimizer/optimizers/swarm/fso.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -29,21 +29,20 @@ class FSO(Optimizer): """ - def __init__(self, algorithm='FSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> FSO.') - # Override its parent class with the receiving hyperparams - super(FSO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(FSO, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/swarm/goa.py b/opytimizer/optimizers/swarm/goa.py index 27f61e3e..01f707e5 100644 --- a/opytimizer/optimizers/swarm/goa.py +++ b/opytimizer/optimizers/swarm/goa.py @@ -4,12 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.general as g -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as log from opytimizer.core.optimizer import Optimizer @@ -28,19 +26,18 @@ class GOA(Optimizer): """ - def __init__(self, algorithm='GOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> GOA.') - # Override its parent class with the receiving hyperparams - super(GOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(GOA, self).__init__() # Minimum comfort zone self.c_min = 0.00001 @@ -54,8 +51,8 @@ def __init__(self, algorithm='GOA', hyperparams=None): # Attractive length scale self.l = 1.5 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -138,35 +135,34 @@ def _social_force(self, r): """ - # Calculates the social force (Eq. 2.3) + # Calculates the social force (eq. 2.3) s = self.f * np.exp(-r / self.l) - np.exp(-r) return s - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps the Grasshopper Optimization Algorithm over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Grasshopper Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - function (Function): A function object. + space (Space): Space containing agents and update-related information. + function (Function): A Function object that will be used as the objective function. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Calculates the comfort coefficient (Eq. 2.8) + # Calculates the comfort coefficient (eq. 2.8) comfort = self.c_max - iteration * ((self.c_max - self.c_min) / n_iterations) - # We copy a temporary list for iterating purposes - temp_agents = copy.deepcopy(agents) + # Copies a temporary list for iterating purposes + temp_agents = copy.deepcopy(space.agents) - # Iterating through 'i' agents - for agent in agents: + # Iterates through 'i' agents + for agent in space.agents: # Initializes the total comfort as zero total_comfort = np.zeros((agent.n_variables, agent.n_dimensions)) - # Iterating through 'j' agents + # Iterates through 'j' agents for temp in temp_agents: # Distance is calculated by an euclidean distance between 'i' and 'j' distance = g.euclidean_distance(agent.position, temp.position) @@ -184,59 +180,11 @@ def _update(self, agents, best_agent, function, iteration, n_iterations): # Sums the current comfort to the total one total_comfort += comfort * ((ub - lb) / 2) * s * unit - # Updates the agent's position (Eq. 2.7) - agent.position = comfort * total_comfort + best_agent.position + # Updates the agent's position (eq. 2.7) + agent.position = comfort * total_comfort + space.best_agent.position # Checks the agent's limits - agent.clip_limits() + agent.clip_by_bound() # Evaluates the new agent's position agent.fit = function(agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, - function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/js.py b/opytimizer/optimizers/swarm/js.py index d4c91996..a776e3c1 100644 --- a/opytimizer/optimizers/swarm/js.py +++ b/opytimizer/optimizers/swarm/js.py @@ -1,12 +1,10 @@ -"""Jellyfish Search. +"""Jellyfish Search-based algorithms. """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,19 +23,18 @@ class JS(Optimizer): """ - def __init__(self, algorithm='JS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> JS.') - # Override its parent class with the receiving hyperparams - super(JS, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(JS, self).__init__() # Chaotic map coefficient self.eta = 4.0 @@ -48,8 +45,8 @@ def __init__(self, algorithm='JS', hyperparams=None): # Motion coefficient self.gamma = 0.1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -125,11 +122,22 @@ def _initialize_chaotic_map(self, agents): else: # Iterates through all decision variables for j in range(agent.n_variables): - # Calculates its position using logistic chaotic map (Eq. 18) + # Calculates its position using logistic chaotic map (eq. 18) agent.position[j] = self.eta * agents[i-1].position[j] * (1 - agents[i-1].position[j]) + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Initializes the chaotic map + self._initialize_chaotic_map(space.agents) + def _ocean_current(self, agents, best_agent): - """Calculates the ocean current (Eq. 9). + """Calculates the ocean current (eq. 9). Args: agents (Agent): List of agents. @@ -146,13 +154,13 @@ def _ocean_current(self, agents, best_agent): # Calculates the mean location of all jellyfishes u = np.mean([agent.position for agent in agents]) - # Calculates the ocean current (Eq. 9) + # Calculates the ocean current (eq. 9) trend = best_agent.position - self.beta * r1 * u return trend def _motion_a(self, lb, ub): - """Calculates type A motion (Eq. 12). + """Calculates type A motion (eq. 12). Args: lb (np.array): Array of lower bounds. @@ -172,7 +180,7 @@ def _motion_a(self, lb, ub): return motion def _motion_b(self, agent_i, agent_j): - """Calculates type B motion (Eq. 15). + """Calculates type B motion (eq. 15). Args: agent_i (Agent): Current agent to be updated. @@ -188,12 +196,12 @@ def _motion_b(self, agent_i, agent_j): # Checks if current fitness is bigger or equal to selected one if agent_i.fit >= agent_j.fit: - # Determines its direction (Eq. 15 - top) + # Determines its direction (eq. 15 - top) d = agent_j.position - agent_i.position # If current fitness is smaller else: - # Determines its direction (Eq. 15 - bottom) + # Determines its direction (eq. 15 - bottom) d = agent_i.position - agent_j.position # Calculates type B motion @@ -201,34 +209,33 @@ def _motion_b(self, agent_i, agent_j): return motion - def _update(self, agents, best_agent, iteration, n_iterations): - """Method that wraps the Jellyfish Search over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Jellyfish Search over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Iterates through all agents - for agent in agents: + for agent in space.agents: # Generates an uniform random number r1 = r.generate_uniform_random_number() - # Calculates the time control mechanism (Eq. 17) + # Calculates the time control mechanism (eq. 17) c = np.fabs((1 - iteration / n_iterations) * (2 * r1 - 1)) # If time control mechanism is bigger or equal to 0.5 if c >= 0.5: - # Calculates the ocean current (Eq. 9) - trend = self._ocean_current(agents, best_agent) + # Calculates the ocean current (eq. 9) + trend = self._ocean_current(space.agents, space.best_agent) # Generate a uniform random number r2 = r.generate_uniform_random_number() - # Updates the location of current jellyfish (Eq. 11) + # Updates the location of current jellyfish (eq. 11) agent.position += r2 * trend # If time control mechanism is smaller than 0.5 @@ -238,69 +245,19 @@ def _update(self, agents, best_agent, iteration, n_iterations): # If random number is bigger than 1 - time control mechanism if r2 > (1 - c): - # Update jellyfish's location with type A motion (Eq. 12) + # Update jellyfish's location with type A motion (eq. 12) agent.position += self._motion_a(agent.lb, agent.ub) # If random number is smaller else: # Generates a random integer - j = r.generate_integer_random_number(0, len(agents)) + j = r.generate_integer_random_number(0, len(space.agents)) - # Updates jellyfish's location with type B motion (Eq. 16) - agent.position += self._motion_b(agent, agents[j]) + # Updates jellyfish's location with type B motion (eq. 16) + agent.position += self._motion_b(agent, space.agents[j]) # Clips the agent's limits - agent.clip_limits() - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initializes current agents with a chaotic map - self._initialize_chaotic_map(space.agents) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.clip_by_bound() class NBJS(JS): @@ -314,19 +271,18 @@ class NBJS(JS): """ - def __init__(self, algorithm='NBJS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: JS -> NBJS.') - # Override its parent class with the receiving hyperparams - super(NBJS, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(NBJS, self).__init__(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/swarm/kh.py b/opytimizer/optimizers/swarm/kh.py index a81d1abe..eb6430fe 100644 --- a/opytimizer/optimizers/swarm/kh.py +++ b/opytimizer/optimizers/swarm/kh.py @@ -4,13 +4,11 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.general as g import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,17 +27,16 @@ class KH(Optimizer): """ - def __init__(self, algorithm='KH', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(KH, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(KH, self).__init__() # Maximum induced speed self.N_max = 0.01 @@ -68,8 +65,8 @@ def __init__(self, algorithm='KH', hyperparams=None): # Mutation probability self.Mu = 0.05 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -225,6 +222,48 @@ def Mu(self, Mu): self._Mu = Mu + @property + def motion(self): + """np.array: Array of motions. + + """ + + return self._motion + + @motion.setter + def motion(self, motion): + if not isinstance(motion, np.ndarray): + raise e.TypeError('`motion` should be a numpy array') + + self._motion = motion + + @property + def foraging(self): + """np.array: Array of foragings. + + """ + + return self._foraging + + @foraging.setter + def foraging(self, foraging): + if not isinstance(foraging, np.ndarray): + raise e.TypeError('`foraging` should be a numpy array') + + self._foraging = foraging + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # Arrays of motions and foragings + self.motion = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.foraging = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + def _food_location(self, agents, function): """Calculates the food location. @@ -241,7 +280,8 @@ def _food_location(self, agents, function): food = copy.deepcopy(agents[0]) # Calculates the sum of inverse of agents' fitness * agents' position - sum_fitness_pos = np.sum([1 / (agent.fit + c.EPSILON) * agent.position for agent in agents], axis=0) + sum_fitness_pos = np.sum([1 / (agent.fit + c.EPSILON) * agent.position for agent in agents], + axis=0) # Calculates the sum of inverse of agents' fitness sum_fitness = np.sum([1 / (agent.fit + c.EPSILON) for agent in agents]) @@ -250,7 +290,7 @@ def _food_location(self, agents, function): food.position = sum_fitness_pos / sum_fitness # Clips the food's position - food.clip_limits() + food.clip_by_bound() # Evaluates the food food.fit = function(food.position) @@ -348,7 +388,8 @@ def _target_alpha(self, agent, worst, best, C_best): fitness = (agent.fit - best.fit) / (worst.fit - best.fit + c.EPSILON) # Calculates a list of krills' position based on neighbours - position = (best.position - agent.position) / (g.euclidean_distance(best.position, agent.position) + c.EPSILON) + position = (best.position - agent.position) / \ + (g.euclidean_distance(best.position, agent.position) + c.EPSILON) # Calculates the target alpha alpha = C_best * fitness * position @@ -361,7 +402,7 @@ def _neighbour_motion(self, agents, idx, iteration, n_iterations, motion): Args: agents (list): List of agents. idx (int): Selected agent. - iteration (int): Current iteration value. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. motion (np.array): Array of motions. @@ -409,7 +450,8 @@ def _food_beta(self, agent, worst, best, food, C_food): fitness = (agent.fit - food.fit) / (worst.fit - best.fit + c.EPSILON) # Calculates the positioning - position = (food.position - agent.position) / (g.euclidean_distance(food.position, agent.position) + c.EPSILON) + position = (food.position - agent.position) / \ + (g.euclidean_distance(food.position, agent.position) + c.EPSILON) # Calculates the food attraction beta = C_food * fitness * position @@ -433,7 +475,8 @@ def _best_beta(self, agent, worst, best): fitness = (agent.fit - best.fit) / (worst.fit - best.fit + c.EPSILON) # Calculates the positioning - position = (best.position - agent.position) / (g.euclidean_distance(best.position, agent.position) + c.EPSILON) + position = (best.position - agent.position) / \ + (g.euclidean_distance(best.position, agent.position) + c.EPSILON) # Calculates the food attraction beta = fitness * position @@ -446,7 +489,7 @@ def _foraging_motion(self, agents, idx, iteration, n_iterations, food, foraging) Args: agents (list): List of agents. idx (int): Selected agent. - iteration (int): Current iteration value. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. food (np.array): Food location. foraging (np.array): Array of foraging motions. @@ -476,7 +519,7 @@ def _physical_diffusion(self, n_variables, n_dimensions, iteration, n_iterations Args: n_variables (int): Number of decision variables. n_dimensions (int): Number of dimensions. - iteration (int): Current iteration value. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. Returns: @@ -498,7 +541,7 @@ def _update_position(self, agents, idx, iteration, n_iterations, food, motion, f Args: agents (list): List of agents. idx (int): Selected agent. - iteration (int): Current iteration value. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. food (np.array): Food location. motion (np.array): Array of motions. @@ -516,14 +559,15 @@ def _update_position(self, agents, idx, iteration, n_iterations, food, motion, f foraging_motion = self._foraging_motion(agents, idx, iteration, n_iterations, food, foraging) # Calculates the physical diffusion - physical_diffusion = self._physical_diffusion( - agents[idx].n_variables, agents[idx].n_dimensions, iteration, n_iterations) + physical_diffusion = self._physical_diffusion(agents[idx].n_variables, agents[idx].n_dimensions, + iteration, n_iterations) # Calculates the delta (eq. 19) delta_t = self.C_t * np.sum(agents[idx].ub - agents[idx].lb) # Updates the current agent's position (eq. 18) - new_position = agents[idx].position + delta_t * (neighbour_motion + foraging_motion + physical_diffusion) + new_position = agents[idx].position + delta_t * \ + (neighbour_motion + foraging_motion + physical_diffusion) return new_position @@ -546,11 +590,12 @@ def _crossover(self, agents, idx): m = r.generate_integer_random_number(0, len(agents), exclude_value=idx) # Calculates the current crossover probability - Cr = self.Cr * ((agents[idx].fit - agents[0].fit) / (agents[-1].fit - agents[0].fit + c.EPSILON)) + Cr = self.Cr * ((agents[idx].fit - agents[0].fit) / + (agents[-1].fit - agents[0].fit + c.EPSILON)) # Iterates through all variables for j in range(a.n_variables): - # Generating a uniform random number + # Generates a uniform random number r1 = r.generate_uniform_random_number() # If sampled uniform number if smaller than crossover probability @@ -580,16 +625,17 @@ def _mutation(self, agents, idx): q = r.generate_integer_random_number(0, len(agents), exclude_value=idx) # Calculates the current mutation probability - Mu = self.Mu / ((agents[idx].fit - agents[0].fit) / (agents[-1].fit - agents[0].fit + c.EPSILON) + c.EPSILON) + Mu = self.Mu / ((agents[idx].fit - agents[0].fit) / + (agents[-1].fit - agents[0].fit + c.EPSILON) + c.EPSILON) # Iterates through all variables for j in range(a.n_variables): - # Generating a uniform random number + # Generates a uniform random number r1 = r.generate_uniform_random_number() # If sampled uniform number if smaller than mutation probability if r1 < Mu: - # Generating another uniform random number + # Generates another uniform random number r2 = r.generate_uniform_random_number() # Mutates the current position @@ -597,81 +643,29 @@ def _mutation(self, agents, idx): return a - def _update(self, agents, function, iteration, n_iterations, motion, foraging): - """Method that wraps motion and genetic updates over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps motion and genetic updates over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - iteration (int): Current iteration value. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. - motion (np.array): Array of motions. - foraging (np.array): Array of foraging motions. """ - # Sorting agents - agents.sort(key=lambda x: x.fit) + # Sorts agents + space.agents.sort(key=lambda x: x.fit) # Calculates the food location (eq. 12) - food = self._food_location(agents, function) + food = self._food_location(space.agents, function) - # Iterate through all agents - for i, _ in enumerate(agents): + # Iterates through all agents + for i, _ in enumerate(space.agents): # Updates current agent's position - agents[i].position = self._update_position(agents, i, iteration, n_iterations, food, motion[i], foraging[i]) + space.agents[i].position = self._update_position(space.agents, i, iteration, n_iterations, + food, self.motion[i], self.foraging[i]) # Performs the crossover and mutation - agents[i] = self._crossover(agents, i) - agents[i] = self._mutation(agents, i) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instanciating array of motions and foraging motions - motion = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - foraging = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, function, t, space.n_iterations, motion, foraging) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + space.agents[i] = self._crossover(space.agents, i) + space.agents[i] = self._mutation(space.agents, i) diff --git a/opytimizer/optimizers/swarm/mfo.py b/opytimizer/optimizers/swarm/mfo.py index 6f894d46..99ccb2c8 100644 --- a/opytimizer/optimizers/swarm/mfo.py +++ b/opytimizer/optimizers/swarm/mfo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as rnd import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,25 +25,24 @@ class MFO(Optimizer): """ - def __init__(self, algorithm='MFO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> MFO.') - # Override its parent class with the receiving hyperparams - super(MFO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(MFO, self).__init__() # Spiral constant self.b = 1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -66,18 +63,18 @@ def b(self, b): self._b = b - def _update(self, agents, iteration, n_iterations): - """Method that wraps global and local pollination updates over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Moth-Flame Optimization over all agents and variables. Args: - agents (list): List of agents. + space (Space): Space containing agents and update-related information. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ # Makes a deepcopy of current population - flames = copy.deepcopy(agents) + flames = copy.deepcopy(space.agents) # Sorts the flames flames.sort(key=lambda x: x.fit) @@ -89,7 +86,7 @@ def _update(self, agents, iteration, n_iterations): r = -1 + iteration * (-1 / n_iterations) # Iterates through all agents - for i, agent in enumerate(agents): + for i, agent in enumerate(space.agents): # Iterates through every decision variable for j in range(agent.n_variables): # Generates a random `t` @@ -112,50 +109,3 @@ def _update(self, agents, iteration, n_iterations): # Updates current agent's position (eq. 3.12) agent.position[j] = D * np.exp(self.b * t) * \ np.cos(2 * np.pi * t) + flames[0].position[j] - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/mrfo.py b/opytimizer/optimizers/swarm/mrfo.py index 74f0ccfe..7f25ddb8 100644 --- a/opytimizer/optimizers/swarm/mrfo.py +++ b/opytimizer/optimizers/swarm/mrfo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,25 +26,24 @@ class MRFO(Optimizer): """ - def __init__(self, algorithm='MRFO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> MRFO.') - # Override its parent class with the receiving hyperparams - super(MRFO, self).__init__(algorithm=algorithm) + # Overrides its parent class with the receiving params + super(MRFO, self).__init__() # Somersault foraging self.S = 2.0 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -74,7 +71,7 @@ def _cyclone_foraging(self, agents, best_position, i, iteration, n_iterations): agents (list): List of agents. best_position (np.array): Global best position. i (int): Index of current manta ray. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. Returns: @@ -103,24 +100,24 @@ def _cyclone_foraging(self, agents, best_position, i, iteration, n_iterations): # Checks if the index is equal to zero if i == 0: cyclone_foraging = r_position + r3 * (r_position - agents[i].position) + \ - beta * (r_position - agents[i].position) + beta * (r_position - agents[i].position) # If index is different than zero else: cyclone_foraging = r_position + r3 * (agents[i - 1].position - agents[i].position) + \ - beta * (r_position - agents[i].position) + beta * (r_position - agents[i].position) # If current iteration proportion is bigger than random generated number else: # Checks if the index is equal to zero if i == 0: cyclone_foraging = best_position + r3 * (best_position - agents[i].position) + \ - beta * (best_position - agents[i].position) + beta * (best_position - agents[i].position) # If index is different than zero else: cyclone_foraging = best_position + r3 * (agents[i - 1].position - agents[i].position) + \ - beta * (best_position - agents[i].position) + beta * (best_position - agents[i].position) return cyclone_foraging @@ -147,14 +144,14 @@ def _chain_foraging(self, agents, best_position, i): # Checks if the index is equal to zero if i == 0: # If yes, uses this equation - chain_foraging = agents[i].position + r2 * ( - best_position - agents[i].position) + alpha * (best_position - agents[i].position) + chain_foraging = agents[i].position + r2 * (best_position - agents[i].position) + \ + alpha * (best_position - agents[i].position) # If index is different than zero else: # Uses this equation - chain_foraging = agents[i].position + r2 * ( - agents[i - 1].position - agents[i].position) + alpha * (best_position - agents[i].position) + chain_foraging = agents[i].position + r2 * (agents[i - 1].position - agents[i].position) + \ + alpha * (best_position - agents[i].position) return chain_foraging @@ -179,93 +176,46 @@ def _somersault_foraging(self, position, best_position): return somersault_foraging - def _update(self, agents, best_agent, function, iteration, n_iterations): - """Method that wraps chain, cyclone and somersault foraging updates over all agents and variables. + def update(self, space, function, iteration, n_iterations): + """Wraps Manta Ray Foraging Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - iteration (int): Number of current iteration. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # Generates an uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than 1/2 if r1 < 0.5: # Performs the cyclone foraging - agent.position = self._cyclone_foraging(agents, best_agent.position, i, iteration, n_iterations) + agent.position = self._cyclone_foraging(space.agents, space.best_agent.position, + i, iteration, n_iterations) # If random number is bigger than 1/2 else: # Performs the chain foraging - agent.position = self._chain_foraging(agents, best_agent.position, i) + agent.position = self._chain_foraging(space.agents, space.best_agent.position, i) # Clips the agent's limits - agent.clip_limits() + agent.clip_by_bound() # Evaluates the agent agent.fit = function(agent.position) # If new agent's fitness is better than best - if agent.fit < best_agent.fit: + if agent.fit < space.best_agent.fit: # Replace the best agent's position and fitness with its copy - best_agent.position = copy.deepcopy(agent.position) - best_agent.fit = copy.deepcopy(agent.fit) + space.best_agent.position = copy.deepcopy(agent.position) + space.best_agent.fit = copy.deepcopy(agent.fit) - # Iterate through all agents - for agent in agents: + # Iterates through all agents + for agent in space.agents: # Performs the somersault foraging - agent.position = self._somersault_foraging(agent.position, best_agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = self._somersault_foraging(agent.position, space.best_agent.position) diff --git a/opytimizer/optimizers/swarm/pio.py b/opytimizer/optimizers/swarm/pio.py index 401ceac1..b24871ac 100644 --- a/opytimizer/optimizers/swarm/pio.py +++ b/opytimizer/optimizers/swarm/pio.py @@ -2,12 +2,10 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -27,19 +25,18 @@ class PIO(Optimizer): """ - def __init__(self, algorithm='PIO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> PIO.') - # Override its parent class with the receiving hyperparams - super(PIO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(PIO, self).__init__() # Number of mapping iterations self.n_c1 = 150 @@ -50,8 +47,8 @@ def __init__(self, algorithm='PIO', hyperparams=None): # Map and compass factor self.R = 0.2 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -106,44 +103,51 @@ def R(self, R): self._R = R - def _update_velocity(self, position, best_position, velocity, iteration): - """Updates a particle velocity (eq. 5). + @property + def n_p(self): + """int: Number of pigeons. - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - velocity (np.array): Agent's current velocity. - iteration (int): Current iteration. + """ - Returns: - A new velocity. + return self._n_p + + @n_p.setter + def n_p(self, n_p): + if not isinstance(n_p, int): + raise e.TypeError('`n_p` should be an integer') + if n_p <= 0: + raise e.ValueError('`n_p` should be > 0') + + self._n_p = n_p + + @property + def velocity(self): + """np.array: Array of pulse rates. """ - # Generating random number - r1 = r.generate_uniform_random_number() + return self._velocity - # Calculates new velocity - new_velocity = velocity * np.exp(-self.R * (iteration + 1)) + r1 * (best_position - position) + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') - return new_velocity + self._velocity = velocity - def _update_position(self, position, velocity): - """Updates a pigeon position (eq. 6). + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. - - Returns: - A new position. + space (Space): A Space object containing meta-information. """ - # Calculates new position - new_position = position + velocity + # Number of pigeons + self.n_p = space.n_agents - return new_position + # Array of velocities + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) def _calculate_center(self, agents): """Calculates the center position (eq. 8). @@ -187,7 +191,7 @@ def _update_center_position(self, position, center): """ - # Generating random number + # Generates random number r1 = r.generate_uniform_random_number() # Calculates new position based on center @@ -195,13 +199,11 @@ def _update_center_position(self, position, center): return new_position - def _update(self, agents, best_agent, velocity, iteration): - """Method that wraps velocity and position updates over all agents and variables. + def update(self, space, iteration): + """Wraps Pigeon-Inspired Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - velocity (np.array): Array of current velocities. + space (Space): Space containing agents and update-related information. iteration (int): Current iteration. """ @@ -209,12 +211,14 @@ def _update(self, agents, best_agent, velocity, iteration): # Checks if current iteration is smaller than mapping operator if iteration < self.n_c1: # Iterates through all agents - for i, agent in enumerate(agents): - # Updates current agent velocity - velocity[i] = self._update_velocity(agent.position, best_agent.position, velocity[i], iteration) + for i, agent in enumerate(space.agents): + # Updates current agent velocity (eq. 5) + r1 = r.generate_uniform_random_number() + self.velocity[i] = self.velocity[i] * np.exp(-self.R * (iteration + 1)) + \ + r1 * (space.best_agent.position - agent.position) - # Updates current agent position - agent.position = self._update_position(agent.position, velocity[i]) + # Updates current agent position (eq. 6) + agent.position += self.velocity[i] # Checks if current iteration is smaller than landmark operator elif iteration < self.n_c2: @@ -222,65 +226,12 @@ def _update(self, agents, best_agent, velocity, iteration): self.n_p = int(self.n_p / 2) + 1 # Sorts agents according to their fitness - agents.sort(key=lambda x: x.fit) + space.agents.sort(key=lambda x: x.fit) # Calculates the center position - center = self._calculate_center(agents[:self.n_p]) + center = self._calculate_center(space.agents[:self.n_p]) # Iterates through all agents - for agent in agents: + for agent in space.agents: # Updates current agent position - agent.position = self._update_center_position( agent.position, center) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instantiating number of total pigeons - self.n_p = space.n_agents - - # Instanciating array velocities - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, velocity, t) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = self._update_center_position(agent.position, center) diff --git a/opytimizer/optimizers/swarm/pso.py b/opytimizer/optimizers/swarm/pso.py index 671c50a0..1e3dc959 100644 --- a/opytimizer/optimizers/swarm/pso.py +++ b/opytimizer/optimizers/swarm/pso.py @@ -4,13 +4,10 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c -import opytimizer.utils.decorator as d +import opytimizer.utils.constant as c import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,19 +26,18 @@ class PSO(Optimizer): """ - def __init__(self, algorithm='PSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> PSO.') - # Override its parent class with the receiving hyperparams - super(PSO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(PSO, self).__init__() # Inertia weight self.w = 0.7 @@ -52,8 +48,8 @@ def __init__(self, algorithm='PSO', hyperparams=None): # Social constant self.c2 = 1.7 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -108,74 +104,54 @@ def c2(self, c2): self._c2 = c2 - def _update_velocity(self, position, best_position, local_position, velocity): - """Updates a particle velocity (p. 295). - - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - local_position (np.array): Agent's local best position. - velocity (np.array): Agent's current velocity. - - Returns: - A new velocity. + @property + def local_position(self): + """np.array: Array of velocities. """ - # Generating random numbers - r1 = r.generate_uniform_random_number() - r2 = r.generate_uniform_random_number() + return self._local_position - # Calculates new velocity - new_velocity = self.w * velocity + self.c1 * r1 * (local_position - position) + self.c2 * \ - r2 * (best_position - position) + @local_position.setter + def local_position(self, local_position): + if not isinstance(local_position, np.ndarray): + raise e.TypeError('`local_position` should be a numpy array') - return new_velocity - - def _update_position(self, position, velocity): - """Updates a particle position (p. 294). - - Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. + self._local_position = local_position - Returns: - A new position. + @property + def velocity(self): + """np.array: Array of velocities. """ - # Calculates new position - new_position = position + velocity + return self._velocity + + @velocity.setter + def velocity(self, velocity): + if not isinstance(velocity, np.ndarray): + raise e.TypeError('`velocity` should be a numpy array') - return new_position + self._velocity = velocity - def _update(self, agents, best_agent, local_position, velocity): - """Method that wraps velocity and position updates over all agents and variables. + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - local_position (np.array): Array of local best posisitons. - velocity (np.array): Array of current velocities. + space (Space): A Space object containing meta-information. """ - # Iterates through all agents - for i, agent in enumerate(agents): - # Updates current agent velocity - velocity[i] = self._update_velocity(agent.position, best_agent.position, local_position[i], velocity[i]) - - # Updates current agent position - agent.position = self._update_position(agent.position, velocity[i]) + # Arrays of local positions and velocities + self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - @d.pre_evaluate - def _evaluate(self, space, function, local_position): + def evaluate(self, space, function): """Evaluates the search space according to the objective function. Args: space (Space): A Space object that will be evaluated. function (Function): A Function object that will be used as the objective function. - local_position (np.array): Array of local best posisitons. """ @@ -190,66 +166,35 @@ def _evaluate(self, space, function, local_position): agent.fit = fit # Also updates the local best position to current's agent position - local_position[i] = copy.deepcopy(agent.position) + self.local_position[i] = copy.deepcopy(agent.position) # If agent's fitness is better than global fitness if agent.fit < space.best_agent.fit: # Makes a deep copy of agent's local best position and fitness to the best agent - space.best_agent.position = copy.deepcopy(local_position[i]) + space.best_agent.position = copy.deepcopy(self.local_position[i]) space.best_agent.fit = copy.deepcopy(agent.fit) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space): + """Wraps Particle Swarm Optimization over all agents and variables. Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + space (Space): Space containing agents and update-related information. """ - # Instanciating array of local positions and velocities - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, local_position, velocity) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, - local=local_position, - best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Generates random numbers + r1 = r.generate_uniform_random_number() + r2 = r.generate_uniform_random_number() - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates agent's velocity (p. 294) + self.velocity[i] = self.w * self.velocity[i] + \ + self.c1 * r1 * (self.local_position[i] - agent.position) + \ + self.c2 * r2 * (space.best_agent.position - agent.position) - return history + # Updates agent's position (p. 294) + agent.position += self.velocity[i] class AIWPSO(PSO): @@ -265,12 +210,11 @@ class AIWPSO(PSO): """ - def __init__(self, algorithm='AIWPSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ @@ -282,8 +226,8 @@ def __init__(self, algorithm='AIWPSO', hyperparams=None): # Maximum inertia weight self.w_max = 0.9 - # Override its parent class with the receiving hyperparams - super(AIWPSO, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(AIWPSO, self).__init__(params) logger.info('Class overrided.') @@ -323,95 +267,75 @@ def w_max(self, w_max): self._w_max = w_max - def _compute_success(self, agents, fitness): + @property + def fitness(self): + """list: List of fitnesses. + + """ + + return self._fitness + + @fitness.setter + def fitness(self, fitness): + if not isinstance(fitness, list): + raise e.TypeError('`fitness` should be a list') + + self._fitness = fitness + + def _compute_success(self, agents): """Computes the particles' success for updating inertia weight (eq. 16). Args: agents (list): List of agents. - fitness (np.array): Array of particles' best fitness. """ # Initial counter p = 0 - # Iterating through every agent + # Iterates through every agent for i, agent in enumerate(agents): # If current agent fitness is smaller than its best - if agent.fit < fitness[i]: + if agent.fit < self.fitness[i]: # Increments the counter p += 1 # Replaces fitness with current agent's fitness - fitness[i] = agent.fit + self.fitness[i] = agent.fit # Update inertia weight value self.w = (self.w_max - self.w_min) * (p / len(agents)) + self.w_min - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space, iteration): + """Wraps Adaptive Inertia Weight Particle Swarm Optimization over all agents and variables. Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + space (Space): Space containing agents and update-related information. + iteration (int): Current iteration. """ - # Instanciating array of local positions and velocities - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # And also an array of best particle's fitness - fitness = np.zeros(space.n_agents) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) + # Checks if it is the first iteration + if iteration == 0: + # Creates a list of initial fitnesses + self.fitness = [agent.fit for agent in space.agents] - # Before starting the optimization process - # We need to copy fitness values to temporary array + # Iterates through all agents for i, agent in enumerate(space.agents): - # Copying fitness from agent's fitness - fitness[i] = agent.fit - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') + # Generates random numbers + r1 = r.generate_uniform_random_number() + r2 = r.generate_uniform_random_number() - # Updating agents - self._update(space.agents, space.best_agent, local_position, velocity) + # Updates agent's velocity + self.velocity[i] = self.w * self.velocity[i] + \ + self.c1 * r1 * (self.local_position[i] - agent.position) + \ + self.c2 * r2 * (space.best_agent.position - agent.position) - # Checking if agents meet the bounds limits - space.clip_limits() + # Updates agent's position + agent.position += self.velocity[i] - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # Computing particle's success and updating inertia weight - self._compute_success(space.agents, fitness) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, - local=local_position, - best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Computing particle's success and updating inertia weight + self._compute_success(space.agents) class RPSO(PSO): @@ -427,128 +351,74 @@ class RPSO(PSO): """ - def __init__(self, algorithm='RPSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: PSO -> RPSO.') - # Override its parent class with the receiving hyperparams - super(RPSO, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(RPSO, self).__init__(params) logger.info('Class overrided.') - def _update_velocity(self, position, best_position, local_position, max_velocity, velocity, mass): - """Updates a single particle velocity over a single variable (eq. 11). - - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - local_position (np.array): Agent's local best position. - max_velocity (float): Maximum velocity of all agents. - velocity (np.array): Agent's current velocity. - mass (float): Agent's mass. - - Returns: - A new velocity based on relativistic speed proposal. + @property + def mass(self): + """np.array: Array of masses. """ - # Generating random numbers - r1 = r.generate_uniform_random_number() - r2 = r.generate_uniform_random_number() + return self._mass - # Calculating gamma parameter - gamma = 1 / np.sqrt(1 - (max_velocity ** 2 / c.LIGHT_SPEED ** 2)) + @mass.setter + def mass(self, mass): + if not isinstance(mass, np.ndarray): + raise e.TypeError('`mass` should be a numpy array') - # Calculates new velocity - new_velocity = mass * velocity * gamma + self.c1 * r1 * (local_position - position) + self.c2 * \ - r2 * (best_position - position) + self._mass = mass - return new_velocity - - def _update(self, agents, best_agent, local_position, velocity, mass): - """Method that wraps velocity and position updates over all agents and variables. + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - local_position (np.array): Array of local best posisitons. - velocity (np.array): Array of current velocities. - mass (np.array): Array of agents' masses. + space (Space): A Space object containing meta-information. """ - # Calculating the maximum velocity - max_velocity = np.max(velocity) - - # Iterate through all agents - for i, agent in enumerate(agents): - # Updates current agent velocities - velocity[i] = self._update_velocity( - agent.position, best_agent.position, local_position[i], max_velocity, velocity[i], mass[i]) - - # Updates current agent positions - agent.position = self._update_position(agent.position, velocity[i]) + # Arrays of local positions, velocities and masses + self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.mass = r.generate_uniform_random_number(size=(space.n_agents, space.n_variables, space.n_dimensions)) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space): + """Wraps Relativistic Particle Swarm Optimization over all agents and variables. Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + space (Space): Space containing agents and update-related information. """ - # Instanciating array of local positions, velocities and masses - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - velocity = np.ones((space.n_agents, space.n_variables, space.n_dimensions)) - mass = r.generate_uniform_random_number(size=(space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, local_position, velocity, mass) + # Calculates the maximum velocity + max_velocity = np.max(self.velocity) - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, - local=local_position, - best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Generates rnadom number + r1 = r.generate_uniform_random_number() + r2 = r.generate_uniform_random_number() - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates current agent velocity (eq. 11) + gamma = 1 / np.sqrt(1 - (max_velocity ** 2 / c.LIGHT_SPEED ** 2)) + self.velocity[i] = self.mass[i] * self.velocity[i] * gamma + \ + self.c1 * r1 * (self.local_position[i] - agent.position) + \ + self.c2 * r2 * (space.best_agent.position - agent.position) - return history + # Updates current agent position + agent.position += self.velocity[i] class SAVPSO(PSO): @@ -564,79 +434,53 @@ class SAVPSO(PSO): """ - def __init__(self, algorithm='SAVPSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: PSO -> SAVPSO.') - # Override its parent class with the receiving hyperparams - super(SAVPSO, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(SAVPSO, self).__init__(params) logger.info('Class overrided.') - def _update_velocity(self, position, best_position, local_position, selected_position, velocity): - """Updates a single particle velocity (eq. 8). - - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - local_position (np.array): Agent's local best position. - selected_position (np.array): Selected agent's position. - velocity (np.array): Agent's current velocity. - - Returns: - A new velocity based on self-adaptive proposal. - - """ - - # Generating a random number - r1 = r.generate_uniform_random_number() - - # Calculates new velocity - new_velocity = self.w * np.fabs(selected_position - local_position) * np.sign(velocity) + r1 * ( - local_position - position) + (1 - r1) * (best_position - position) - - return new_velocity - - def _update(self, agents, best_agent, local_position, velocity): - """Method that wraps velocity and position updates over all agents and variables. + def update(self, space): + """Wraps Self-adaptive Velocity Particle Swarm Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - local_position (np.array): Array of local best posisitons. - velocity (np.array): Array of current velocities. + space (Space): Space containing agents and update-related information. """ # Creates an array of positions - positions = np.zeros((agents[0].position.shape[0], agents[0].position.shape[1])) + positions = np.zeros((space.agents[0].position.shape[0], space.agents[0].position.shape[1])) # For every agent - for agent in agents: + for agent in space.agents: # Sums up its position positions += agent.position # Divides by the number of agents - positions /= len(agents) + positions /= len(space.agents) - # Iterate through all agents - for i, agent in enumerate(agents): + # Iterates through all agents + for i, agent in enumerate(space.agents): # Generates a random index for selecting an agent - idx = r.generate_integer_random_number(0, len(agents)) + idx = r.generate_integer_random_number(0, len(space.agents)) - # Updates current agent's velocity - velocity[i] = self._update_velocity( - agent.position, best_agent.position, local_position[i], local_position[idx], velocity[i]) + # Updates current agent's velocity (eq. 8) + r1 = r.generate_uniform_random_number() + self.velocity[i] = self.w * np.fabs(self.local_position[idx] - self.local_position[i]) * \ + np.sign(self.velocity[i]) + r1 * (self.local_position[i] - agent.position) + \ + (1 - r1) * (space.best_agent.position - agent.position) # Updates current agent's position - agent.position = self._update_position(agent.position, velocity[i]) + agent.position += self.velocity[i] # For every decision variable for j in range(agent.n_variables): @@ -655,7 +499,7 @@ def _update(self, agents, best_agent, local_position, velocity): class VPSO(PSO): - """An VPSO class, inherited from Optimizer. + """A VPSO class, inherited from Optimizer. This is the designed class to define VPSO-related variables and methods. @@ -666,143 +510,71 @@ class VPSO(PSO): """ - def __init__(self, algorithm='VPSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: PSO -> VPSO.') - # Override its parent class with the receiving hyperparams - super(VPSO, self).__init__(algorithm, hyperparams) + # Overrides its parent class with the receiving params + super(VPSO, self).__init__(params) logger.info('Class overrided.') - def _update_velocity(self, position, best_position, local_position, velocity, v_velocity): - """Updates a single particle velocity (eq. 3-4). - - Args: - position (np.array): Agent's current position. - best_position (np.array): Global best position. - local_position (np.array): Agent's local best position. - velocity (np.array): Agent's current velocity. - v_velocity (np.array): Agent's current vertical velocity. - - Returns: - A new velocity based on vertical proposal. + @property + def v_velocity(self): + """np.array: Array of vertical velocities. """ - # Generating uniform random numbers - r1 = r.generate_uniform_random_number() - r2 = r.generate_uniform_random_number() + return self._v_velocity - # Calculates new velocity - new_velocity = self.w * velocity + self.c1 * r1 * (local_position - position) + self.c2 * \ - r2 * (best_position - position) + @v_velocity.setter + def v_velocity(self, v_velocity): + if not isinstance(v_velocity, np.ndarray): + raise e.TypeError('`v_velocity` should be a numpy array') - # Calculates new vertical velocity - new_v_velocity = v_velocity - (np.dot(new_velocity.T, v_velocity) / - np.dot(new_velocity.T, new_velocity)) * new_velocity + self._v_velocity = v_velocity - return new_velocity, new_v_velocity - - def _update_position(self, position, velocity, v_velocity): - """Updates a particle position (eq. 5). + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. Args: - position (np.array): Agent's current position. - velocity (np.array): Agent's current velocity. - v_velocity (np.array): Agent's current vertical velocity. - - Returns: - A new position based on VPSO's paper position update equation. + space (Space): A Space object containing meta-information. """ - # Generates a uniform random number - r1 = r.generate_uniform_random_number() + # Arrays of local positions, velocities and vertical velocities + self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + self.v_velocity = np.ones((space.n_agents, space.n_variables, space.n_dimensions)) - # Calculates new position - new_position = position + r1 * velocity + (1 - r1) * v_velocity - - return new_position - - def _update(self, agents, best_agent, local_position, velocity, v_velocity): - """Method that wraps velocity and position updates over all agents and variables. + def update(self, space): + """Wraps Vertical Particle Swarm Optimization over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - local_position (np.array): Array of local best posisitons. - velocity (np.array): Array of current velocities. - v_velocity (np.array): Array of currentvertical velocities. + space (Space): Space containing agents and update-related information. """ - # Iterate through all agents - for i, agent in enumerate(agents): - # Updates current agent velocity - velocity[i], v_velocity[i] = self._update_velocity( - agent.position, best_agent.position, local_position[i], velocity[i], v_velocity[i]) - - # Updates current agent positions - agent.position = self._update_position(agent.position, velocity[i], v_velocity[i]) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Instanciating array of local positions, velocities and vertical velocities - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - velocity = np.ones((space.n_agents, space.n_variables, space.n_dimensions)) - v_velocity = np.ones((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, local_position, velocity, v_velocity) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, - local=local_position, - best_agent=space.best_agent) + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Generates uniform random numbers + r1 = r.generate_uniform_random_number() + r2 = r.generate_uniform_random_number() - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # Updates current agent velocity (eq. 3) + self.velocity[i] = self.w * self.velocity[i] + self.c1 * r1 * (self.local_position[i] - agent.position) + \ + self.c2 * r2 * (space.best_agent.position - agent.position) - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # Updates current agent vertical velocity (eq. 4) + self.v_velocity[i] -= (np.dot(self.velocity[i].T, self.v_velocity[i]) / + (np.dot(self.velocity[i].T, self.velocity[i]) + c.EPSILON)) * self.velocity[i] - return history + # Updates current agent position (eq. 5) + r1 = r.generate_uniform_random_number() + agent.position += r1 * self.velocity[i] + (1 - r1) * self.v_velocity[i] diff --git a/opytimizer/optimizers/swarm/sbo.py b/opytimizer/optimizers/swarm/sbo.py index cb9dd84a..ce0f05e2 100644 --- a/opytimizer/optimizers/swarm/sbo.py +++ b/opytimizer/optimizers/swarm/sbo.py @@ -2,12 +2,10 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.distribution as d import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,17 +26,16 @@ class SBO(Optimizer): """ - def __init__(self, algorithm='SBO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the mp_mutation-heuristics. + params (dict): Contains key-value parameters to the mp_mutation-heuristics. """ - # Override its parent class with the receiving hyperparams - super(SBO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SBO, self).__init__() # Step size self.alpha = 0.9 @@ -49,8 +46,8 @@ def __init__(self, algorithm='SBO', hyperparams=None): # Percentage of width between lower and upper bounds self.z = 0.02 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -105,19 +102,44 @@ def z(self, z): self._z = z - def _update(self, agents, best_agent, function, sigma): - """Method that wraps updates over all agents and variables (eq. 1-7). + @property + def sigma(self): + """list: List of widths. + + """ + + return self._sigma + + @sigma.setter + def sigma(self, sigma): + if not isinstance(sigma, list): + raise e.TypeError('`sigma` should be a list') + + self._sigma = sigma + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # List of widths + self.sigma = [self.z * (ub - lb) for lb, ub in zip(space.lb, space.ub)] + + def update(self, space, function): + """Wraps Satin Bowerbird Optimizer over all agents and variables (eq. 1-7). Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - sigma (list): Width between lower and upper bounds. """ # Calculates a list of fitness per agent - fitness = [1 / (1 + agent.fit) if agent.fit >= 0 else 1 + np.abs(agent.fit) for agent in agents] + fitness = [1 / (1 + agent.fit) if agent.fit >= 0 else 1 + + np.abs(agent.fit) for agent in space.agents] # Calculates the total fitness total_fitness = np.sum(fitness) @@ -125,18 +147,19 @@ def _update(self, agents, best_agent, function, sigma): # Calculates the probability of each agent's fitness probs = [fit / total_fitness for fit in fitness] - # Iterate through all agents - for agent in agents: + # Iterates through all agents + for agent in space.agents: # For every decision variable for j in range(agent.n_variables): # Selects a random individual based on its probability - s = d.generate_choice_distribution(len(agents), probs, 1)[0] + s = d.generate_choice_distribution(len(space.agents), probs, 1)[0] # Calculates the lambda factor lambda_k = self.alpha / (1 + probs[s]) # Updates the decision variable position - agent.position[j] += lambda_k * ((agents[s].position[j] + best_agent.position[j]) / 2 - agent.position[j]) + agent.position[j] += lambda_k * ((space.agents[s].position[j] + space.best_agent.position[j]) / \ + 2 - agent.position[j]) # Generates an uniform random number r1 = r.generate_uniform_random_number() @@ -144,60 +167,10 @@ def _update(self, agents, best_agent, function, sigma): # If random number is smaller than probability of mutation if r1 < self.p_mutation: # Mutates the decision variable position - agent.position[j] += sigma[j] * r.generate_gaussian_random_number() + agent.position[j] += self.sigma[j] * r.generate_gaussian_random_number() - # Check agent limits - agent.clip_limits() + # Checks agent's limits + agent.clip_by_bound() # Calculates its fitness agent.fit = function(agent.position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Calculates the width between lower and upper bounds - sigma = [self.z * (ub - lb) for lb, ub in zip(space.lb, space.ub)] - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, sigma) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/sca.py b/opytimizer/optimizers/swarm/sca.py index 135e46e1..0c3f0f16 100644 --- a/opytimizer/optimizers/swarm/sca.py +++ b/opytimizer/optimizers/swarm/sca.py @@ -2,11 +2,9 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -25,19 +23,18 @@ class SCA(Optimizer): """ - def __init__(self, algorithm='SCA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SCA.') - # Override its parent class with the receiving hyperparams - super(SCA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SCA, self).__init__() # Minimum function range self.r_min = 0 @@ -48,8 +45,8 @@ def __init__(self, algorithm='SCA', hyperparams=None): # Constant for defining the next position's region self.a = 3 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -134,13 +131,12 @@ def _update_position(self, agent_position, best_position, r1, r2, r3, r4): return new_position - def _update(self, agents, best_agent, iteration, n_iterations): - """Method that wraps Sine Cosine Algorithm over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Sine Cosine Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - iteration (int): Current iteration value. + space (Space): Space containing agents and update-related information. + iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ @@ -157,54 +153,8 @@ def _update(self, agents, best_agent, iteration, n_iterations): # A random number to decide whether sine or cosine should be used r4 = r.generate_uniform_random_number() - # Iterate through all agents - for agent in agents: + # Iterates through all agents + for agent in space.agents: # Updates agent's position - agent.position = self._update_position(agent.position, best_agent.position, r1, r2, r3, r4) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = self._update_position(agent.position, space.best_agent.position, + r1, r2, r3, r4) diff --git a/opytimizer/optimizers/swarm/sfo.py b/opytimizer/optimizers/swarm/sfo.py index d8a1f98e..9df46af7 100644 --- a/opytimizer/optimizers/swarm/sfo.py +++ b/opytimizer/optimizers/swarm/sfo.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as ex -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -29,19 +27,18 @@ class SFO(Optimizer): """ - def __init__(self, algorithm='SFO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SFO.') - # Override its parent class with the receiving hyperparams - super(SFO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SFO, self).__init__() # Percentage of initial sailfishes self.PP = 0.1 @@ -52,8 +49,8 @@ def __init__(self, algorithm='SFO', hyperparams=None): # Attack power decrease self.e = 0.001 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -108,6 +105,36 @@ def e(self, e): self._e = e + @property + def sardines(self): + """list: List of sardines. + + """ + + return self._sardines + + @sardines.setter + def sardines(self, sardines): + if not isinstance(sardines, list): + raise ex.TypeError('`sardines` should be a list') + + self._sardines = sardines + + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. + + Args: + space (Space): A Space object containing meta-information. + + """ + + # List of sardines + self.sardines = [self._generate_random_agent(space.best_agent) + for _ in range(int(space.n_agents / self.PP))] + + # Sorts the population of sardines + self.sardines.sort(key=lambda x: x.fit) + def _generate_random_agent(self, agent): """Generates a new random-based agent. @@ -122,10 +149,8 @@ def _generate_random_agent(self, agent): # Makes a deepcopy of agent a = copy.deepcopy(agent) - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(a.lb, a.ub)): - # For each decision variable, we generate uniform random numbers - a.position[j] = r.generate_uniform_random_number(lb, ub, a.n_dimensions) + # Fills agent with new random positions + a.fill_with_uniform() return a @@ -175,38 +200,36 @@ def _update_sailfish(self, agent, best_agent, best_sardine, lambda_i): return new_position - def _update(self, agents, best_agent, function, sardines, iteration): - """Method that wraps Sailfish Optimizer updates. + def update(self, space, function, iteration): + """Wraps Sailfish Optimizer over all agents and variables. Args: - agents (list): List of agents (sailfishes). - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. - sardines (list): List of agents (sardines). - iteration (int): Current iteration value. + iteration (int): Current iteration. """ # Gathers the best sardine - best_sardine = sardines[0] + best_sardine = self.sardines[0] # Calculates the number of sailfishes and sardines - n_sailfishes = len(agents) - n_sardines = len(sardines) + n_sailfishes = len(space.agents) + n_sardines = len(self.sardines) # Calculates the number of decision variables - n_variables = agents[0].n_variables + n_variables = space.agents[0].n_variables # Iterates through every agent - for agent in agents: + for agent in space.agents: # Calculates the lambda value lambda_i = self._calculate_lambda_i(n_sailfishes, n_sardines) # Updates agent's position - agent.position = self._update_sailfish(agent, best_agent, best_sardine, lambda_i) + agent.position = self._update_sailfish(agent, space.best_agent, best_sardine, lambda_i) # Clips agent's limits - agent.clip_limits() + agent.clip_by_bound() # Re-evaluates agent's fitness agent.fit = function(agent.position) @@ -217,7 +240,7 @@ def _update(self, agents, best_agent, function, sardines, iteration): # Checks if attack power is smaller than 0.5 if AP < 0.5: # Calculates the number of sardines possible replacements (eq. 11) - alpha = int(len(sardines) * AP) + alpha = int(len(self.sardines) * AP) # Calculates the number of variables possible replacements (eq. 12) beta = int(n_variables * AP) @@ -236,100 +259,42 @@ def _update(self, agents, best_agent, function, sardines, iteration): r1 = r.generate_uniform_random_number() # Updates the sardine's position (eq. 9) - sardines[i].position[j] = r1 * (best_agent.position[j] - sardines[i].position[j] + AP) + self.sardines[i].position[j] = r1 * \ + (space.best_agent.position[j] - self.sardines[i].position[j] + AP) # Clips sardine's limits - sardines[i].clip_limits() + self.sardines[i].clip_by_bound() # Re-calculates its fitness - sardines[i].fit = function(sardines[i].position) + self.sardines[i].fit = function(self.sardines[i].position) # If attack power is bigger than 0.5 else: # Iterates through every sardine - for sardine in sardines: + for sardine in self.sardines: # Generates a uniform random number r1 = r.generate_uniform_random_number() # Updates the sardine's position (eq. 9) - sardine.position = r1 * (best_agent.position - sardine.position + AP) + sardine.position = r1 * (space.best_agent.position - sardine.position + AP) # Clips sardine's limits - sardine.clip_limits() + sardine.clip_by_bound() # Re-calculates its fitness sardine.fit = function(sardine.position) # Sorts the population of agents (sailfishes) and sardines - agents.sort(key=lambda x: x.fit) - sardines.sort(key=lambda x: x.fit) + space.agents.sort(key=lambda x: x.fit) + self.sardines.sort(key=lambda x: x.fit) # Iterates through every agent - for agent in agents: + for agent in space.agents: # Iterates through every sardine - for sardine in sardines: + for sardine in self.sardines: # If agent is worse than sardine (eq. 13) if agent.fit > sardine.fit: # Copies sardine to agent agent = copy.deepcopy(sardine) break - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initializes a population of sardines - sardines = [self._generate_random_agent(space.best_agent) - for _ in range(int(space.n_agents / self.PP))] - - # Iterates through every sardine - for sardine in sardines: - # Calculates its fitness - sardine.fit = function(sardine.position) - - # Sorts the population of sardines - sardines.sort(key=lambda x: x.fit) - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function, sardines, t) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history diff --git a/opytimizer/optimizers/swarm/sos.py b/opytimizer/optimizers/swarm/sos.py index 59d29d37..7866363a 100644 --- a/opytimizer/optimizers/swarm/sos.py +++ b/opytimizer/optimizers/swarm/sos.py @@ -4,10 +4,8 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -26,22 +24,21 @@ class SOS(Optimizer): """ - def __init__(self, algorithm='SOS', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SOS.') - # Override its parent class with the receiving hyperparams - super(SOS, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SOS, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -60,7 +57,7 @@ def _mutualism(self, agent_i, agent_j, best_agent, function): a = copy.deepcopy(agent_i) b = copy.deepcopy(agent_j) - # Calculates the mutual vector (Eq. 3) + # Calculates the mutual vector (eq. 3) mutual_vector = (agent_i.position + agent_j.position) / 2 # Calculates the benefitial factors @@ -69,13 +66,13 @@ def _mutualism(self, agent_i, agent_j, best_agent, function): # Generates a uniform random number r1 = r.generate_uniform_random_number() - # Re-calculates the new positions (Eq. 1 and 2) + # Re-calculates the new positions (eq. 1 and 2) a.position += r1 * (best_agent.position - mutual_vector * BF_1) b.position += r1 * (best_agent.position - mutual_vector * BF_2) # Checks their limits - a.clip_limits() - b.clip_limits() + a.clip_by_bound() + b.clip_by_bound() # Evaluates both agents a.fit = function(a.position) @@ -110,11 +107,11 @@ def _commensalism(self, agent_i, agent_j, best_agent, function): # Generates a uniform random number r1 = r.generate_uniform_random_number(-1, 1) - # Updates the agent's position (Eq. 4) + # Updates the agent's position (eq. 4) a.position += r1 * (best_agent.position - agent_j.position) # Checks its limits - a.clip_limits() + a.clip_by_bound() # Evaluates its new position a.fit = function(a.position) @@ -145,7 +142,7 @@ def _parasitism(self, agent_i, agent_j, function): p.position[r1] = r.generate_uniform_random_number(p.lb[r1], p.ub[r1]) # Checks its limits - p.clip_limits() + p.clip_by_bound() # Evaluates its position p.fit = function(p.position) @@ -156,73 +153,25 @@ def _parasitism(self, agent_i, agent_j, function): agent_j.position = copy.deepcopy(p.position) agent_j.fit = copy.deepcopy(p.fit) - def _update(self, agents, best_agent, function): - """Method that wraps Symbiotic Organisms Search. over all agents and variables. + def update(self, space, function): + """Wraps Symbiotic Organisms Search over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. function (Function): A Function object that will be used as the objective function. """ # Iterates through all agents - for i, agent in enumerate(agents): + for i, agent in enumerate(space.agents): # Generates a random integer for mutualism and performs it - j = r.generate_integer_random_number(0, len(agents), exclude_value=i) - self._mutualism(agent, agents[j], best_agent, function) + j = r.generate_integer_random_number( 0, len(space.agents), exclude_value=i) + self._mutualism(agent, space.agents[j], space.best_agent, function) # Re-generates a random integer for commensalism and performs it - j = r.generate_integer_random_number(0, len(agents), exclude_value=i) - self._commensalism(agent, agents[j], best_agent, function) + j = r.generate_integer_random_number(0, len(space.agents), exclude_value=i) + self._commensalism(agent, space.agents[j], space.best_agent, function) # Re-generates a random integer for parasitism and performs it - j = r.generate_integer_random_number(0, len(agents), exclude_value=i) - self._parasitism(agent, agents[j], function) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, function) - - # Checking if agents meets the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + j = r.generate_integer_random_number(0, len(space.agents), exclude_value=i) + self._parasitism(agent, space.agents[j], function) diff --git a/opytimizer/optimizers/swarm/ssa.py b/opytimizer/optimizers/swarm/ssa.py index 9b31bef6..b27adb9e 100644 --- a/opytimizer/optimizers/swarm/ssa.py +++ b/opytimizer/optimizers/swarm/ssa.py @@ -2,10 +2,8 @@ """ import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -24,107 +22,58 @@ class SSA(Optimizer): """ - def __init__(self, algorithm='SSA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SSA.') - # Override its parent class with the receiving hyperparams - super(SSA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SSA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') - def _update(self, agents, best_agent, iteration, n_iterations): - """Method that wraps the Salp Swarm Algorithm over all agents and variables. + def update(self, space, iteration, n_iterations): + """Wraps Salp Swarm Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. + space (Space): Space containing agents and update-related information. iteration (int): Current iteration. n_iterations (int): Maximum number of iterations. """ - # Calculates the `c1` coefficient (Eq. 3.2) + # Calculates the `c1` coefficient (eq. 3.2) c1 = 2 * np.exp(-(4 * iteration / n_iterations) ** 2) # Iterates through every agent - for i, _ in enumerate(agents): + for i, _ in enumerate(space.agents): # Checks if it is the first agent if i == 0: # Iterates through every decision variable - for j, (lb, ub) in enumerate(zip(agents[i].lb, agents[i].ub)): + for j, (lb, ub) in enumerate(zip(space.agents[i].lb, space.agents[i].ub)): # Generates two uniform random numbers c2 = r.generate_uniform_random_number() c3 = r.generate_uniform_random_number() # Checks if random number is smaller than 0.5 if c3 < 0.5: - # Updates the leading salp position (Eq. 3.1 - part 1) - agents[i].position[j] = best_agent.position[j] + c1 * ((ub - lb) * c2 + lb) + # Updates the leading salp position (eq. 3.1 - part 1) + space.agents[i].position[j] = space.best_agent.position[j] + c1 * ((ub - lb) * c2 + lb) # If random number is bigger or equal to 0.5 else: - # Updates the leading salp position (Eq. 3.1 - part 2) - agents[i].position[j] = best_agent.position[j] - c1 * ((ub - lb) * c2 + lb) + # Updates the leading salp position (eq. 3.1 - part 2) + space.agents[i].position[j] = space.best_agent.position[j] - c1 * ((ub - lb) * c2 + lb) # If it is not the first agent else: - # Updates the follower salp position (Eq. 3.4) - agents[i].position = 0.5 * (agents[i].position + agents[i-1].position) - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, t, space.n_iterations) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + # Updates the follower salp position (eq. 3.4) + space.agents[i].position = 0.5 * (space.agents[i].position + space.agents[i-1].position) diff --git a/opytimizer/optimizers/swarm/sso.py b/opytimizer/optimizers/swarm/sso.py index 741fbd45..b30b7557 100644 --- a/opytimizer/optimizers/swarm/sso.py +++ b/opytimizer/optimizers/swarm/sso.py @@ -4,12 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.decorator as d import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as l from opytimizer.core.optimizer import Optimizer @@ -28,19 +25,18 @@ class SSO(Optimizer): """ - def __init__(self, algorithm='SSO', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> SSO.') - # Override its parent class with the receiving hyperparams - super(SSO, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(SSO, self).__init__() # Weighing constant self.C_w = 0.1 @@ -51,8 +47,8 @@ def __init__(self, algorithm='SSO', hyperparams=None): # Global constant self.C_g = 0.9 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -107,57 +103,44 @@ def C_g(self, C_g): self._C_g = C_g - def _update(self, agents, best_agent, local_position): - """Method that wraps velocity and position updates over all agents and variables. - - Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - local_position (np.array): Array of local best posisitons. + @property + def local_position(self): + """np.array: Array of local positions. """ - # Iterates through all agents - for i, agent in enumerate(agents): - # Iterates through every decision variable - for j in range(agent.n_variables): - # Generates a uniform random number - r1 = r.generate_uniform_random_number() + return self._local_position - # If random number is smaller than `C_w` - if r1 < self.C_w: - # Ignores the position update - pass + @local_position.setter + def local_position(self, local_position): + if not isinstance(local_position, np.ndarray): + raise e.TypeError('`local_position` should be a numpy array') - # If random number is between `C_w` and `C_p` - elif r1 < self.C_p: - # Updates agent's position with its local position - agent.position[j] = local_position[i][j] + self._local_position = local_position - # If random number is between `C_p` and `C_g` - elif r1 < self.C_g: - # Updates agent's position with best position - agent.position[j] = best_agent.position[j] + def create_additional_attrs(self, space): + """Creates additional attributes that are used by this optimizer. - # If random number is greater than `C_g` - else: - # Updates agent's position with random number - agent.position[j] = r.generate_uniform_random_number(size=agent.n_dimensions) + Args: + space (Space): A Space object containing meta-information. + + """ - @d.pre_evaluate - def _evaluate(self, space, function, local_position): + # Arrays of local positions + self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) + + def evaluate(self, space, function): """Evaluates the search space according to the objective function. Args: space (Space): A Space object that will be evaluated. function (Function): A Function object that will be used as the objective function. - local_position (np.array): Array of local best posisitons. """ - # Iterate through all agents + # Iterates through all agents for i, agent in enumerate(space.agents): - # Calculate the fitness value of current agent + # Calculates the fitness value of current agent fit = function(agent.position) # If fitness is better than agent's best fit @@ -166,62 +149,45 @@ def _evaluate(self, space, function, local_position): agent.fit = fit # Also updates the local best position to current's agent position - local_position[i] = copy.deepcopy(agent.position) + self.local_position[i] = copy.deepcopy(agent.position) # If agent's fitness is better than global fitness if agent.fit < space.best_agent.fit: # Makes a deep copy of agent's local best position and fitness to the best agent - space.best_agent.position = copy.deepcopy(local_position[i]) + space.best_agent.position = copy.deepcopy(self.local_position[i]) space.best_agent.fit = copy.deepcopy(agent.fit) - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. + def update(self, space): + """Wraps Simplified Swarm Optimization over all agents and variables. Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. + space (Space): Space containing agents and update-related information. """ - # Instanciating array of local positions - local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions)) - - # Initial search space evaluation - self._evaluate(space, function, local_position, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Updating agents - self._update(space.agents, space.best_agent, local_position) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, local_position, hook=pre_evaluate) + # Iterates through all agents + for i, agent in enumerate(space.agents): + # Iterates through every decision variable + for j in range(agent.n_variables): + # Generates a uniform random number + r1 = r.generate_uniform_random_number() - # Every iteration, we need to dump agents, local positions and best agent - history.dump(agents=space.agents, - local=local_position, - best_agent=space.best_agent) + # If random number is smaller than `C_w` + if r1 < self.C_w: + # Ignores the position update + pass - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() + # If random number is between `C_w` and `C_p` + elif r1 < self.C_p: + # Updates agent's position with its local position + agent.position[j] = self.local_position[i][j] - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') + # If random number is between `C_p` and `C_g` + elif r1 < self.C_g: + # Updates agent's position with best position + agent.position[j] = space.best_agent.position[j] - return history + # If random number is greater than `C_g` + else: + # Updates agent's position with random number + agent.position[j] = r.generate_uniform_random_number(size=agent.n_dimensions) diff --git a/opytimizer/optimizers/swarm/stoa.py b/opytimizer/optimizers/swarm/stoa.py index bc5ce8b3..b14eef9c 100644 --- a/opytimizer/optimizers/swarm/stoa.py +++ b/opytimizer/optimizers/swarm/stoa.py @@ -7,7 +7,7 @@ from tqdm import tqdm import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.history as h import opytimizer.utils.logging as l @@ -28,21 +28,20 @@ class STOA(Optimizer): """ - def __init__(self, algorithm='STOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ logger.info('Overriding class: Optimizer -> STOA.') - # Override its parent class with the receiving hyperparams - super(STOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(STOA, self).__init__() - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') diff --git a/opytimizer/optimizers/swarm/woa.py b/opytimizer/optimizers/swarm/woa.py index d77960ac..dd480d24 100644 --- a/opytimizer/optimizers/swarm/woa.py +++ b/opytimizer/optimizers/swarm/woa.py @@ -4,11 +4,9 @@ import copy import numpy as np -from tqdm import tqdm import opytimizer.math.random as r import opytimizer.utils.exception as e -import opytimizer.utils.history as h import opytimizer.utils.logging as log from opytimizer.core.optimizer import Optimizer @@ -27,23 +25,22 @@ class WOA(Optimizer): """ - def __init__(self, algorithm='WOA', hyperparams=None): + def __init__(self, params=None): """Initialization method. Args: - algorithm (str): Indicates the algorithm name. - hyperparams (dict): Contains key-value parameters to the meta-heuristics. + params (dict): Contains key-value parameters to the meta-heuristics. """ - # Override its parent class with the receiving hyperparams - super(WOA, self).__init__(algorithm) + # Overrides its parent class with the receiving params + super(WOA, self).__init__() # Logarithmic spiral self.b = 1 - # Now, we need to build this class up - self._build(hyperparams) + # Builds the class + self.build(params) logger.info('Class overrided.') @@ -76,27 +73,27 @@ def _generate_random_agent(self, agent): # Makes a deepcopy of agent a = copy.deepcopy(agent) - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(a.lb, a.ub)): - # For each decision variable, we generate uniform random numbers - a.position[j] = r.generate_uniform_random_number( - lb, ub, a.n_dimensions) + # Fills agent with new random positions + a.fill_with_uniform() return a - def _update(self, agents, best_agent, coefficient): - """Method that wraps Whale Optimization Algorithm updates. + def update(self, space, iteration, n_iterations): + """Wraps Whale Optimization Algorithm over all agents and variables. Args: - agents (list): List of agents. - best_agent (Agent): Global best agent. - coefficient (float): A linearly decreased coefficient. + space (Space): Space containing agents and update-related information. + iteration (int): Current iteration. + n_iterations (int): Maximum number of iterations """ + # Linearly decreases the coefficient + coefficient = 2 - 2 * iteration / (n_iterations - 1) + # Iterates through all agents - for agent in agents: - # Generating an uniform random number + for agent in space.agents: + # Generates an uniform random number r1 = r.generate_uniform_random_number() # Calculates the `A` coefficient @@ -113,10 +110,10 @@ def _update(self, agents, best_agent, coefficient): # If `A` is smaller than 1 if np.fabs(A) < 1: # Calculates the distance coefficient - D = np.fabs(C * best_agent.position - agent.position) + D = np.fabs(C * space.best_agent.position - agent.position) # Updates the agent's position - agent.position = best_agent.position - A * D + agent.position = space.best_agent.position - A * D # If `A` is bigger or equal to 1 else: @@ -135,57 +132,7 @@ def _update(self, agents, best_agent, coefficient): l = r.generate_gaussian_random_number() # Calculates the distance coefficient - D = np.fabs(best_agent.position - agent.position) + D = np.fabs(space.best_agent.position - agent.position) # Updates the agent's position - agent.position = D * np.exp(self.b * l) * np.cos(2 * np.pi * l) + best_agent.position - - def run(self, space, function, store_best_only=False, pre_evaluate=None): - """Runs the optimization pipeline. - - Args: - space (Space): A Space object that will be evaluated. - function (Function): A Function object that will be used as the objective function. - store_best_only (bool): If True, only the best agent of each iteration is stored in History. - pre_evaluate (callable): This function is executed before evaluating the function being optimized. - - Returns: - A History object holding all agents' positions and fitness achieved during the task. - - """ - - # Initial search space evaluation - self._evaluate(space, function, hook=pre_evaluate) - - # We will define a History object for further dumping - history = h.History(store_best_only) - - # Initializing a progress bar - with tqdm(total=space.n_iterations) as b: - # These are the number of iterations to converge - for t in range(space.n_iterations): - logger.to_file(f'Iteration {t+1}/{space.n_iterations}') - - # Linearly decreases the coefficient - a = 2 - 2 * t / (space.n_iterations - 1) - - # Updating agents - self._update(space.agents, space.best_agent, a) - - # Checking if agents meet the bounds limits - space.clip_limits() - - # After the update, we need to re-evaluate the search space - self._evaluate(space, function, hook=pre_evaluate) - - # Every iteration, we need to dump agents and best agent - history.dump(agents=space.agents, best_agent=space.best_agent) - - # Updates the `tqdm` status - b.set_postfix(fitness=space.best_agent.fit) - b.update() - - logger.to_file(f'Fitness: {space.best_agent.fit}') - logger.to_file(f'Position: {space.best_agent.position}') - - return history + agent.position = D * np.exp(self.b * l) * np.cos(2 * np.pi * l) + space.best_agent.position diff --git a/opytimizer/opytimizer.py b/opytimizer/opytimizer.py index 3f219f0c..e57ea9a0 100644 --- a/opytimizer/opytimizer.py +++ b/opytimizer/opytimizer.py @@ -1,10 +1,16 @@ -"""Opytimizer entry point. +"""Optimization entry point. """ +import pickle import time +from inspect import signature + +from tqdm import tqdm import opytimizer.utils.exception as e import opytimizer.utils.logging as l +from opytimizer.utils.callback import CallbackVessel +from opytimizer.utils.history import History logger = l.get_logger(__name__) @@ -15,39 +21,46 @@ class Opytimizer: """ - def __init__(self, space=None, optimizer=None, function=None): + def __init__(self, space, optimizer, function, save_agents=False): """Initialization method. Args: - space (Space): A Space's object, - where it has to be a child (e.g., SearchSpace, HyperComplexSpace, etc). - optimizer (Optimizer): An Optimizer's object, - where it has to be a child (e.g., PSO, BA, etc). - function (Function): A Function's object, - where it can be a child (e.g., WeightedFunction). + space (Space): Space-child instance. + optimizer (Optimizer): Optimizer-child instance. + function (Function): Function or Function-child instance. + save_agents (bool): Saves all agents in the search space. """ logger.info('Creating class: Opytimizer.') - # Attaches the space to Opytimizer + # Space self.space = space - # Attaches the optimizer + # Optimizer (and its additional variables) self.optimizer = optimizer + self.optimizer.create_additional_attrs(space) - # Lastly, attaches the function + # Function self.function = function - # We will log some important information + # Optimization history + self.history = History(save_agents) + + # Current iteration + self.iteration = 0 + + # Total number of iterations + self.total_iterations = 0 + + # Logs the properties logger.debug('Space: %s | Optimizer: %s| Function: %s.', self.space, self.optimizer, self.function) - logger.info('Class created.') @property def space(self): - """Space: A Space's object, where it has to be a child (SearchSpace, HyperComplexSpace, etc). + """Space: Space-child instance (SearchSpace, HyperComplexSpace, etc). """ @@ -62,7 +75,7 @@ def space(self, space): @property def optimizer(self): - """Optimizer: An Optimizer's object, where it has to be a child (PSO, BA, etc). + """Optimizer: Optimizer-child instance (PSO, BA, etc). """ @@ -77,7 +90,7 @@ def optimizer(self, optimizer): @property def function(self): - """Function: A Function's object, where it can be a child (WeightedFunction). + """Function: Function or Function-child instance (ConstrainedFunction, WeightedFunction, etc). """ @@ -90,39 +103,205 @@ def function(self, function): self._function = function - def start(self, store_best_only=False, pre_evaluate=None): + @property + def history(self): + """History: Optimization history. + + """ + + return self._history + + @history.setter + def history(self, history): + if not isinstance(history, History): + raise e.TypeError('`history` should be a History') + + self._history = history + + @property + def iteration(self): + """int: Current iteration. + + """ + + return self._iteration + + @iteration.setter + def iteration(self, iteration): + if not isinstance(iteration, int): + raise e.TypeError('`iteration` should be an integer') + if iteration < 0: + raise e.ValueError('`iteration` should be >= 0') + + self._iteration = iteration + + @property + def total_iterations(self): + """int: Total number of iterations. + + """ + + return self._total_iterations + + @total_iterations.setter + def total_iterations(self, total_iterations): + if not isinstance(total_iterations, int): + raise e.TypeError('`total_iterations` should be an integer') + if total_iterations < 0: + raise e.ValueError('`total_iterations` should be >= 0') + + self._total_iterations = total_iterations + + @property + def evaluate_args(self): + """Converts the optimizer `evaluate` arguments into real variables. + + """ + + # Inspects the `evaluate` and retrieves its parameters + args = signature(self.optimizer.evaluate).parameters + + return [getattr(self, v) for v in args] + + @property + def update_args(self): + """Converts the optimizer `update` arguments into real variables. + + """ + + # Inspects the `update` and retrieves its parameters + args = signature(self.optimizer.update).parameters + + return [getattr(self, v) for v in args] + + def evaluate(self, callbacks): + """Wraps the `evaluate` pipeline with its corresponding callbacks. + + Args: + callback (list): List of callbacks. + + """ + + # Invokes the `on_evaluate_before` callback + callbacks.on_evaluate_before(*self.evaluate_args) + + # Performs an evaluation over the search space + self.optimizer.evaluate(*self.evaluate_args) + + # Invokes the `on_evaluate_after` callback + callbacks.on_evaluate_after(*self.evaluate_args) + + def update(self, callbacks): + """Wraps the `update` pipeline with its corresponding callbacks. + + Args: + callback (list): List of callbacks. + + """ + + # Invokes the `on_update_before` callback + callbacks.on_update_before(*self.update_args) + + # Performs an update over the search space + self.optimizer.update(*self.update_args) + + # Invokes the `on_update_after` callback + callbacks.on_update_after(*self.update_args) + + # Regardless of callbacks or not, every update on the search space + # must meet the bounds limits + self.space.clip_by_bound() + + def start(self, n_iterations=1, callbacks=None): """Starts the optimization task. Args - store_best_only (bool): If True, only the best agent - of each iteration is stored in History. - pre_evaluate (callable): This function is executed - before evaluating the function being optimized. - - Returns: - A History object describing the agents position and best fitness values - at each iteration throughout the optimization process. + n_iterations (int): Maximum number of iterations. + callback (list): List of callbacks. """ logger.info('Starting optimization task.') - # Starting timer to count optimization task + # Additional properties + self.n_iterations = n_iterations + callbacks = CallbackVessel(callbacks) + + # Triggers starting time start = time.time() - # Starting optimizer - history = self.optimizer.run(self.space, self.function, store_best_only, pre_evaluate) + # Evaluates the search space + self.evaluate(callbacks) - # Ending timer - end = time.time() + # Initializes a progress bar + with tqdm(total=n_iterations) as b: + # Loops through all iterations + for t in range(n_iterations): + logger.to_file(f'Iteration {t+1}/{n_iterations}') + + # Saves the number of total iterations and current iteration + self.total_iterations += 1 + self.iteration = t + + # Invokes the `on_iteration_begin` callback + callbacks.on_iteration_begin(self.total_iterations, self) - # Calculating optimization task time + # Updates the search space + self.update(callbacks) + + # Re-evaluates the search space + self.evaluate(callbacks) + + # Updates the progress bar status + b.set_postfix(fitness=self.space.best_agent.fit) + b.update() + + # Dumps keyword arguments to model's history + self.history.dump(agents=self.space.agents, + best_agent=self.space.best_agent) + + # Invokes the `on_iteration_end` callback + callbacks.on_iteration_end(self.total_iterations, self) + + logger.to_file(f'Fitness: {self.space.best_agent.fit}') + logger.to_file(f'Position: {self.space.best_agent.position}') + + # Stops the timer and calculates the optimization time + end = time.time() opt_time = end - start - # Dumping the elapsed time to optimization history - history.dump(time=opt_time) + # Dumps the elapsed time to model's history + self.history.dump(time=opt_time) logger.info('Optimization task ended.') logger.info('It took %s seconds.', opt_time) - return history + def save(self, file_path): + """Saves the optimization model to a pickle file. + + Args: + file_path (str): Path of file to be saved. + + """ + + # Opens an output file + with open(file_path, 'wb') as output_file: + # Dumps object to file + pickle.dump(self, output_file) + + @classmethod + def load(cls, file_path): + """Loads the optimization model from a pickle file without needing + to instantiate the class. + + Args: + file_path (str): Path of file to be loaded. + + """ + + # Opens an input file + with open(file_path, "rb") as input_file: + # Loads object from file + opt_model = pickle.load(input_file) + + return opt_model diff --git a/opytimizer/spaces/__init__.py b/opytimizer/spaces/__init__.py index dc0e01b2..0651ab15 100644 --- a/opytimizer/spaces/__init__.py +++ b/opytimizer/spaces/__init__.py @@ -1,3 +1,9 @@ -"""A customized space module to provide different search spaces +"""Customizable space module that provides different search spaces implementations. """ + +from opytimizer.spaces.boolean import BooleanSpace +from opytimizer.spaces.grid import GridSpace +from opytimizer.spaces.hyper_complex import HyperComplexSpace +from opytimizer.spaces.search import SearchSpace +from opytimizer.spaces.tree import TreeSpace diff --git a/opytimizer/spaces/boolean.py b/opytimizer/spaces/boolean.py index 812a0db7..e4d943fc 100644 --- a/opytimizer/spaces/boolean.py +++ b/opytimizer/spaces/boolean.py @@ -1,11 +1,12 @@ """Boolean-based search space. """ +import copy + import numpy as np -import opytimizer.math.random as r import opytimizer.utils.logging as l -from opytimizer.core.space import Space +from opytimizer.core import Space logger = l.get_logger(__name__) @@ -16,50 +17,40 @@ class BooleanSpace(Space): """ - def __init__(self, n_agents=1, n_variables=1, n_iterations=10): + def __init__(self, n_agents, n_variables): """Initialization method. Args: n_agents (int): Number of agents. n_variables (int): Number of decision variables. - n_iterations (int): Number of iterations. """ logger.info('Overriding class: Space -> BooleanSpace.') - # Override its parent class with the receiving arguments - super(BooleanSpace, self).__init__(n_agents, n_variables, n_iterations=n_iterations) - - # Defining the lower and upper bounds + # Defines missing override arguments + n_dimensions = 1 lower_bound = np.zeros(n_variables) upper_bound = np.ones(n_variables) - # Now, we need to build this class up - self._build(lower_bound, upper_bound) + # Overrides its parent class with the receiving arguments + super(BooleanSpace, self).__init__(n_agents, n_variables, n_dimensions, + lower_bound, upper_bound) - # Initializing agents - self._initialize_agents() + # Builds the class + self.build() - # We will log some important information logger.info('Class overrided.') def _initialize_agents(self): - """Initialize agents' position array with boolean random numbers. + """Initializes agents with their positions and defines a best agent. """ - logger.debug('Running private method: initialize_agents().') - # Iterates through all agents for agent in self.agents: - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(self.lb, self.ub)): - # For each decision variable, we generate binary random numbers - agent.position[j] = r.generate_binary_random_number(size=agent.n_dimensions) - - # Applies the lower bound and upper bounds - agent.lb[j] = lb - agent.ub[j] = ub + # Initializes the agent + agent.fill_with_binary() - logger.debug('Agents initialized.') + # Defines a best agent + self.best_agent = copy.deepcopy(self.agents[0]) diff --git a/opytimizer/spaces/grid.py b/opytimizer/spaces/grid.py index 0dc18d4e..81df0369 100644 --- a/opytimizer/spaces/grid.py +++ b/opytimizer/spaces/grid.py @@ -1,12 +1,13 @@ """Grid-based search space. """ +import copy + import numpy as np -import opytimizer.math.random as r import opytimizer.utils.exception as e import opytimizer.utils.logging as l -from opytimizer.core.space import Space +from opytimizer.core import Space logger = l.get_logger(__name__) @@ -17,39 +18,42 @@ class GridSpace(Space): """ - def __init__(self, n_variables=1, step=(0.1,), lower_bound=(0,), upper_bound=(1,)): + def __init__(self, n_variables, step, lower_bound, upper_bound): """Initialization method. Args: n_variables (int): Number of decision variables. - step (tuple): Size of each variable step in the grid. - lower_bound (tuple): Lower bound tuple with the minimum possible values. - upper_bound (tuple): Upper bound tuple with the maximum possible values. + step (float, list, tuple, np.array): Variables' steps. + lower_bound (float, list, tuple, np.array): Minimum possible values. + upper_bound (float, list, tuple, np.array): Maximum possible values. """ logger.info('Overriding class: Space -> GridSpace.') - # Defining a property to hold the step size - self.step = step + # Defines missing override arguments + # `n_agents = 1` is used as a placeholder for now + n_agents = 1 + n_dimensions = 1 - # Creating the searching grid - self._create_grid(step, lower_bound, upper_bound) + # Overrides its parent class with the receiving arguments + super(GridSpace, self).__init__(n_agents, n_variables, n_dimensions, + lower_bound, upper_bound) - # Override its parent class with the receiving arguments - super(GridSpace, self).__init__(len(self.grid), n_variables, n_iterations=1) + # Step size of each variable + self.step = np.asarray(step) - # Now, we need to build this class up - self._build(lower_bound, upper_bound) + # Creates the grid + self._create_grid() - # Initializing agents - self._initialize_agents() + # Builds the class + self.build() logger.info('Class overrided.') @property def step(self): - """tuple: Size of each variable step. + """np.array: Step size of each variable. """ @@ -57,14 +61,18 @@ def step(self): @step.setter def step(self, step): - if not isinstance(step, tuple): - raise e.TypeError('`step` should be a tuple') + if not isinstance(step, np.ndarray): + raise e.TypeError('`step` should be a numpy array') + if not step.shape: + step = np.expand_dims(step, -1) + if step.shape[0] != self.n_variables: + raise e.SizeError('`step` should be the same size as `n_variables`') self._step = step @property def grid(self): - """list: Grid with possible searching values. + """np.array: Grid with possible search values. """ @@ -77,48 +85,29 @@ def grid(self, grid): self._grid = grid - def _create_grid(self, step, lower_bound, upper_bound): - """Creates a grid of possible searches. - - Args: - step (tuple): Size of each variable step in the grid. - lower_bound (tuple): Lower bound tuple with the minimum possible values. - upper_bound (tuple): Upper bound tuple with the maximum possible values. + def _create_grid(self): + """Creates a grid of possible search values. """ - logger.debug('Running private method: create_grid().') - - # Checks if number of steps equals the number of lower and upper bounds - if len(step) != len(lower_bound) or len(step) != len(upper_bound): - # If not, raises an error - raise e.SizeError('`step` should have the same size of `lower_bound` and `upper_bound`') - - # Creating a meshgrid with all possible searches + # Creates a meshgrid with all possible search values mesh = np.meshgrid(*[s * np.arange(lb / s, ub / s + s) - for s, lb, ub in zip(step, lower_bound, upper_bound)]) + for s, lb, ub in zip(self.step, self.lb, self.ub)]) - # Transforming the meshgrid into a list of possible searches + # Transforms the meshgrid into a list + # and re-defines the number of agents to the length of grid self.grid = np.array(([m.ravel() for m in mesh])).T - - logger.debug('Grid created with step size equal to %s.', step) + self.n_agents = len(self.grid) def _initialize_agents(self): - """Initialize agents' position array with grid values. + """Initializes agents with their positions and defines a best agent. """ - logger.debug('Running private method: initialize_agents().') - # Iterates through all agents and grid options for agent, grid in zip(self.agents, self.grid): - # Iterates through all decision variables - for j, (lb, ub, g) in enumerate(zip(self.lb, self.ub, grid)): - # For each decision variable, we use the grid values - agent.position[j] = r.generate_uniform_random_number(g, g, agent.n_dimensions) - - # Applies the lower and upper bounds - agent.lb[j] = lb - agent.ub[j] = ub + # Initializes the agent + agent.fill_with_static(grid) - logger.debug('Agents initialized.') + # Defines a best agent + self.best_agent = copy.deepcopy(self.agents[0]) diff --git a/opytimizer/spaces/hyper_complex.py b/opytimizer/spaces/hyper_complex.py index 88aafc79..ac85b25e 100644 --- a/opytimizer/spaces/hyper_complex.py +++ b/opytimizer/spaces/hyper_complex.py @@ -1,9 +1,12 @@ """Hypercomplex-based search space. """ -import opytimizer.math.random as r +import copy + +import numpy as np + import opytimizer.utils.logging as l -from opytimizer.core.space import Space +from opytimizer.core import Space logger = l.get_logger(__name__) @@ -14,49 +17,40 @@ class HyperComplexSpace(Space): """ - def __init__(self, n_agents=1, n_variables=1, n_dimensions=2, n_iterations=10, - lower_bound=(0,), upper_bound=(1,)): + def __init__(self, n_agents, n_variables, n_dimensions): """Initialization method. Args: n_agents (int): Number of agents. n_variables (int): Number of decision variables. - n_dimensions (int): Dimension of search space. - n_iterations (int): Number of iterations. - lower_bound (tuple): Lower bound tuple with the minimum possible values. - upper_bound (tuple): Upper bound tuple with the maximum possible values. + n_dimensions (int): Number of search space dimensions. """ logger.info('Overriding class: Space -> HyperComplexSpace.') - # Override its parent class with the receiving arguments - super(HyperComplexSpace, self).__init__(n_agents, n_variables, n_dimensions, n_iterations) + # Defines missing override arguments + lower_bound = np.zeros(n_variables) + upper_bound = np.ones(n_variables) - # Now, we need to build this class up - self._build(lower_bound, upper_bound) + # Overrides its parent class with the receiving arguments + super(HyperComplexSpace, self).__init__(n_agents, n_variables, n_dimensions, + lower_bound, upper_bound) - # Initializing agents - self._initialize_agents() + # Builds the class + self.build() logger.info('Class overrided.') def _initialize_agents(self): - """Initialize agents' position array with uniform random numbers. + """Initializes agents with their positions and defines a best agent. """ - logger.debug('Running private method: initialize_agents().') - # Iterates through all agents for agent in self.agents: - # Iterates through all decision variables - for j in range(agent.n_variables): - # For each decision variable, we generate uniform random numbers - agent.position[j] = r.generate_uniform_random_number(size=agent.n_dimensions) - - # Applies the lower and upper bounds - agent.lb[j] = 0 - agent.ub[j] = 1 + # Initializes the agent + agent.fill_with_uniform() - logger.debug('Agents initialized.') + # Defines a best agent + self.best_agent = copy.deepcopy(self.agents[0]) diff --git a/opytimizer/spaces/search.py b/opytimizer/spaces/search.py index a08ca372..8f5d3ea3 100644 --- a/opytimizer/spaces/search.py +++ b/opytimizer/spaces/search.py @@ -1,9 +1,10 @@ """Traditional-based search space. """ -import opytimizer.math.random as r +import copy + import opytimizer.utils.logging as l -from opytimizer.core.space import Space +from opytimizer.core import Space logger = l.get_logger(__name__) @@ -14,48 +15,40 @@ class SearchSpace(Space): """ - def __init__(self, n_agents=1, n_variables=1, n_iterations=10, - lower_bound=(0,), upper_bound=(1,)): + def __init__(self, n_agents, n_variables, lower_bound, upper_bound): """Initialization method. Args: n_agents (int): Number of agents. n_variables (int): Number of decision variables. - n_iterations (int): Number of iterations. - lower_bound (tuple): Lower bound tuple with the minimum possible values. - upper_bound (tuple): Upper bound tuple with the maximum possible values. + lower_bound (float, list, tuple, np.array): Minimum possible values. + upper_bound (float, list, tuple, np.array): Maximum possible values. """ logger.info('Overriding class: Space -> SearchSpace.') - # Override its parent class with the receiving arguments - super(SearchSpace, self).__init__(n_agents, n_variables, n_iterations=n_iterations) + # Defines missing override arguments + n_dimensions = 1 - # Now, we need to build this class up - self._build(lower_bound, upper_bound) + # Override its parent class with the receiving arguments + super(SearchSpace, self).__init__(n_agents, n_variables, n_dimensions, + lower_bound, upper_bound) - # Initializing agents - self._initialize_agents() + # Builds the class + self.build() logger.info('Class overrided.') def _initialize_agents(self): - """Initialize agents' position array with uniform random numbers. + """Initializes agents with their positions and defines a best agent. """ - logger.debug('Running private method: initialize_agents().') - # Iterates through all agents for agent in self.agents: - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(self.lb, self.ub)): - # For each decision variable, we generate uniform random numbers - agent.position[j] = r.generate_uniform_random_number(lb, ub, agent.n_dimensions) - - # Applies the lower and upper bounds - agent.lb[j] = lb - agent.ub[j] = ub + # Initializes the agent + agent.fill_with_uniform() - logger.debug('Agents initialized.') + # Defines a best agent + self.best_agent = copy.deepcopy(self.agents[0]) diff --git a/opytimizer/spaces/tree.py b/opytimizer/spaces/tree.py index b5cae8af..3007a3c8 100644 --- a/opytimizer/spaces/tree.py +++ b/opytimizer/spaces/tree.py @@ -4,12 +4,10 @@ import copy import opytimizer.math.random as r -import opytimizer.utils.constants as c +import opytimizer.utils.constant as c import opytimizer.utils.exception as e import opytimizer.utils.logging as l -from opytimizer.core.agent import Agent -from opytimizer.core.node import Node -from opytimizer.core.space import Space +from opytimizer.core import Agent, Node, Space logger = l.get_logger(__name__) @@ -20,31 +18,30 @@ class TreeSpace(Space): """ - def __init__(self, n_trees=1, n_terminals=1, n_variables=1, n_iterations=10, - min_depth=1, max_depth=3, functions=None, - lower_bound=(0,), upper_bound=(1,)): + def __init__(self, n_agents, n_variables, lower_bound, upper_bound, + n_terminals=1, min_depth=1, max_depth=3, functions=None): """Initialization method. Args: - n_trees (int): Number of trees. - n_terminals (int): Number of terminal nodes. + n_agents (int): Number of agents (trees). n_variables (int): Number of decision variables. - n_iterations (int): Number of iterations. + lower_bound (float, list, tuple, np.array): Minimum possible values. + upper_bound (float, list, tuple, np.array): Maximum possible values. + n_terminals (int): Number of terminal nodes. min_depth (int): Minimum depth of the trees. max_depth (int): Maximum depth of the trees. - functions (list): Functions nodes. - lower_bound (tuple): Lower bound tuple with the minimum possible values. - upper_bound (tuple): Upper bound tuple with the maximum possible values. + functions (list): Function nodes. """ logger.info('Overriding class: Space -> TreeSpace.') - # Override its parent class with the receiving arguments - super(TreeSpace, self).__init__(n_trees, n_variables, n_iterations=n_iterations) + # Defines missing override arguments + n_dimensions = 1 - # Number of trees - self.n_trees = n_trees + # Override its parent class with the receiving arguments + super(TreeSpace, self).__init__(n_agents, n_variables, n_dimensions, + lower_bound, upper_bound) # Number of terminal nodes self.n_terminals = n_terminals @@ -55,44 +52,20 @@ def __init__(self, n_trees=1, n_terminals=1, n_variables=1, n_iterations=10, # Maximum depth of the trees self.max_depth = max_depth - # Checks if functions do not exist + # Function nodes if functions is None: - # Creates a list for compatibility self.functions = [] - - # If functions really exist else: - # Attach them to a property self.functions = functions - # Now, we need to build this class up - self._build(lower_bound, upper_bound) - - # Initializing the agents (structures that will hold trees' position and fitness) - self._initialize_agents() - - # Creating the terminal nodes and trees + # Creates terminals and trees self._create_terminals() self._create_trees() - logger.info('Class overrided.') - - @property - def n_trees(self): - """int: Number of trees. + # Builds the class + self.build() - """ - - return self._n_trees - - @n_trees.setter - def n_trees(self, n_trees): - if not isinstance(n_trees, int): - raise e.TypeError('`n_trees` should be an integer') - if n_trees <= 0: - raise e.ValueError('`n_trees` should be > 0') - - self._n_trees = n_trees + logger.info('Class overrided.') @property def n_terminals(self): @@ -147,7 +120,7 @@ def max_depth(self, max_depth): @property def functions(self): - """list: Functions nodes. + """list: Function nodes. """ @@ -177,7 +150,7 @@ def terminals(self, terminals): @property def trees(self): - """list: Trees instances (derived from the Node class). + """list: Trees (derived from the Node class). """ @@ -192,7 +165,7 @@ def trees(self, trees): @property def best_tree(self): - """Node: A best tree object from Node class. + """Node: Best tree. """ @@ -205,87 +178,55 @@ def best_tree(self, best_tree): self._best_tree = best_tree - def _initialize_agents(self): - """Initialize agents' position array with uniform random numbers. + def _create_terminals(self): + """Creates a list of terminals. """ - logger.debug('Running private method: initialize_agents().') + # List of terminals + self.terminals = [Agent(self.n_variables, self.n_dimensions, + self.lb, self.ub) for _ in range(self.n_terminals)] - # Iterates through all agents - for agent in self.agents: - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(self.lb, self.ub)): - # For each decision variable, we generate uniform random numbers - agent.position[j] = r.generate_uniform_random_number(lb, ub, agent.n_dimensions) - - # Applies the lower and upper bounds - agent.lb[j] = lb - agent.ub[j] = ub - - logger.debug('Agents initialized.') - - def _create_terminals(self): - """Creates a list of terminals based on the Agent class. - - Returns: - A list of terminals. + def _create_trees(self): + """Creates a list of trees based on the GROW algorithm. """ - logger.debug('Running private method: create_terminals().') - - # Creating a list of terminals, which will be Agent instances - self.terminals = [Agent(self.n_variables, self.n_dimensions) - for _ in range(self.n_terminals)] + # List of trees + self.trees = [self.grow(self.min_depth, self.max_depth) + for _ in range(self.n_agents)] - logger.debug('Terminals created.') - - def _create_trees(self, algorithm='GROW'): - """Creates a list of random trees using a specific algorithm. + # Defines a best tree + self.best_tree = copy.deepcopy(self.trees[0]) - Args: - algorithm (str): Algorithm's used to create the initial trees. + logger.debug('Depth: [%d, %d] | Terminals: %d | Function: %s.', + self.min_depth, self.max_depth, self.n_terminals, self.functions) - Returns: - The created trees and their fitness values. + def _initialize_agents(self): + """Initializes agents with their positions and defines a best agent. """ - logger.debug('Running private method: create_trees().') - - # Checks if the chosen algorithm is GROW - if algorithm == 'GROW': - # Creates a list of random trees - self.trees = [self.grow(self.min_depth, self.max_depth) - for _ in range(self.n_trees)] - - # Applies the first tree as the best one - self.best_tree = copy.deepcopy(self.trees[0]) + # Iterates through all agents + for agent in self.agents: + # Initializes the agent + agent.fill_with_uniform() - logger.debug('Trees: %d | Depth: [%d, %d] | ' - 'Terminals: %d | Functions: %s | Algorithm: %s.', - self.n_trees, self.min_depth, self.max_depth, - self.n_terminals, self.functions, algorithm) + # Defines a best agent and a best tree + self.best_agent = copy.deepcopy(self.agents[0]) def _initialize_terminals(self): - """Initialize terminals' position array with uniform random numbers. + """Initializes terminals with their positions. """ # Iterates through all terminals for terminal in self.terminals: - # Iterates through all decision variables - for j, (lb, ub) in enumerate(zip(self.lb, self.ub)): - # For each decision variable, we generate uniform random numbers - terminal.position[j] = r.generate_uniform_random_number(lb, ub, terminal.n_dimensions) - - # Applies the lower and upper bounds - terminal.lb[j] = lb - terminal.ub[j] = ub + # Initializes the terminal + terminal.fill_with_uniform() def grow(self, min_depth=1, max_depth=3): - """It creates a random tree based on the GROW algorithm. + """Creates a random tree based on the GROW algorithm. References: S. Luke. Two Fast Tree-Creation Algorithms for Genetic Programming. @@ -296,11 +237,11 @@ def grow(self, min_depth=1, max_depth=3): max_depth (int): Maximum depth of the tree. Returns: - A random tree based on the GROW algorithm. + Random tree based on the GROW algorithm. """ - # Re-initialize the terminals to provide diversity + # Re-initializes the terminals to provide diversity self._initialize_terminals() # If minimum depth equals the maximum depth @@ -308,7 +249,6 @@ def grow(self, min_depth=1, max_depth=3): # Generates a terminal identifier terminal_id = r.generate_integer_random_number(0, self.n_terminals) - # Return the terminal node with its id and corresponding position return Node(terminal_id, 'TERMINAL', self.terminals[terminal_id].position) # Generates a node identifier @@ -320,14 +260,13 @@ def grow(self, min_depth=1, max_depth=3): # Gathers its real identifier terminal_id = node_id - len(self.functions) - # Return the terminal node with its id and corresponding position return Node(terminal_id, 'TERMINAL', self.terminals[terminal_id].position) - # Generates a new function node + # Generates a function node function_node = Node(self.functions[node_id], 'FUNCTION') # For every possible function argument - for i in range(c.N_ARGS_FUNCTION[self.functions[node_id]]): + for i in range(c.FUNCTION_N_ARGS[self.functions[node_id]]): # Calls recursively the grow function and creates a temporary node node = self.grow(min_depth + 1, max_depth) diff --git a/opytimizer/utils/__init__.py b/opytimizer/utils/__init__.py index 86df8150..0e0d4a00 100644 --- a/opytimizer/utils/__init__.py +++ b/opytimizer/utils/__init__.py @@ -1,2 +1,2 @@ -"""An utility package for all common opytimizer modules. +"""Utility package for all common opytimizer modules. """ diff --git a/opytimizer/utils/callback.py b/opytimizer/utils/callback.py new file mode 100644 index 00000000..66399de8 --- /dev/null +++ b/opytimizer/utils/callback.py @@ -0,0 +1,236 @@ +"""Callbacks. +""" + +import opytimizer.utils.exception as e + + +class Callback: + """A Callback class that handles additional variables and methods + manipulation that are not provided by the library. + + """ + + def __init__(self): + """Initialization method. + + """ + + pass + + def on_iteration_begin(self, iteration, opt_model): + """Performs a callback whenever an iteration begins. + + Args: + iteration (int): Current iteration. + opt_model (Opytimizer): An instance of the optimization model. + + """ + + pass + + def on_iteration_end(self, iteration, opt_model): + """Performs a callback whenever an iteration ends. + + Args: + iteration (int): Current iteration. + opt_model (Opytimizer): An instance of the optimization model. + + """ + + pass + + def on_evaluate_before(self, *evaluate_args): + """Performs a callback prior to the `evaluate` method. + + """ + + pass + + def on_evaluate_after(self, *evaluate_args): + """Performs a callback after the `evaluate` method. + + """ + + pass + + def on_update_before(self, *update_args): + """Performs a callback prior to the `update` method. + + """ + + pass + + def on_update_after(self, *update_args): + """Performs a callback after the `update` method. + + """ + + pass + + +class CallbackVessel: + """Wraps multiple callbacks in an ready-to-use class. + + """ + + def __init__(self, callbacks): + """Initialization method. + + Args: + callbacks (list): List of Callback-based childs. + + """ + + # Callbacks + self.callbacks = callbacks or [] + + @property + def callbacks(self): + """Space: List of Callback-based childs. + + """ + + return self._callbacks + + @callbacks.setter + def callbacks(self, callbacks): + if not isinstance(callbacks, list): + raise e.TypeError('`callbacks` should be a list') + + self._callbacks = callbacks + + def on_iteration_begin(self, iteration, opt_model): + """Performs a list of callbacks whenever an iteration begins. + + Args: + iteration (int): Current iteration. + opt_model (Opytimizer): An instance of the optimization model. + + """ + + # Iterates through all callbacks and invokes their method + for callback in self.callbacks: + callback.on_iteration_begin(iteration, opt_model) + + def on_iteration_end(self, iteration, opt_model): + """Performs a list of callbacks whenever an iteration ends. + + Args: + iteration (int): Current iteration. + opt_model (Opytimizer): An instance of the optimization model. + + """ + + # Iterates through all callbacks and invokes their method + for callback in self.callbacks: + callback.on_iteration_end(iteration, opt_model) + + def on_evaluate_before(self, *evaluate_args): + """Performs a list of callbacks prior to the `evaluate` method. + + """ + + # Iterates through all callbacks and invokes their method + for callback in self.callbacks: + callback.on_evaluate_before(*evaluate_args) + + def on_evaluate_after(self, *evaluate_args): + """Performs a list of callbacks after the `evaluate` method. + + """ + + # Iterates through all callbacks and invokes their method + for callback in self.callbacks: + callback.on_evaluate_after(*evaluate_args) + + def on_update_before(self, *update_args): + """Performs a list of callbacks prior to the `update` method. + + """ + + # Iterates through all callbacks and invokes their method + for callback in self.callbacks: + callback.on_update_before(*update_args) + + def on_update_after(self, *update_args): + """Performs a list of callbacks after the `update` method. + + """ + + # Iterates through all callbacks and invokes their method + for callback in self.callbacks: + callback.on_update_after(*update_args) + + +class CheckpointCallback(Callback): + """A CheckpointCallback class that handles additional logging and + model's checkpointing. + + """ + + def __init__(self, file_path=None, frequency=0): + """Initialization method. + + Args: + file_path (str): Path of file to be saved. + frequency (int): Interval between checkpoints. + + """ + + # Overrides its parent class with the receiving arguments + super(CheckpointCallback, self).__init__() + + # File's path + self.file_path = file_path or 'checkpoint.pkl' + + # Interval between checkpoints + self.frequency = frequency + + @property + def file_path(self): + """str: File's path. + + """ + + return self._file_path + + @file_path.setter + def file_path(self, file_path): + if not isinstance(file_path, str): + raise e.TypeError('`file_path` should be a string') + + self._file_path = file_path + + @property + def frequency(self): + """int: Interval between checkpoints. + + """ + + return self._frequency + + @frequency.setter + def frequency(self, frequency): + if not isinstance(frequency, int): + raise e.TypeError('`frequency` should be an integer') + if frequency < 0: + raise e.ValueError('`frequency` should be >= 0') + + self._frequency = frequency + + def on_iteration_end(self, iteration, opt_model): + """Performs a callback whenever an iteration ends. + + Args: + iteration (int): Current iteration. + opt_model (Opytimizer): An instance of the optimization model. + + """ + + # Checks if frequency is a positive number different than zero + if self.frequency > 0: + # If `mod` equals to zero + # It means that current iteration must be checkpointed + if iteration % self.frequency == 0: + # Checkpoints the current model's state + opt_model.save(f'iter_{iteration}_{self.file_path}') diff --git a/opytimizer/utils/constants.py b/opytimizer/utils/constant.py similarity index 54% rename from opytimizer/utils/constants.py rename to opytimizer/utils/constant.py index 3eadd079..0687002c 100644 --- a/opytimizer/utils/constants.py +++ b/opytimizer/utils/constant.py @@ -3,25 +3,21 @@ import sys -# A constant value used to avoid division by zero, zero logarithms -# and any possible mathematical error +# Constant value used to avoid division by zero, zero logarithms +# and any possible mathematical errors EPSILON = 1e-32 -# When the agents are initialized, their fitness is defined as -# the maximum float value possible +# When the agents are initialized, their fitness are defined as +# the maximum float possible FLOAT_MAX = sys.float_info.max -# If necessary, one can apply custom rxules to keys' dumping -# when using the History object -HISTORY_KEYS = ['agents', 'best_agent', 'local'] - # When working with relativity theories, it is necessary # to define a constant for the speed of light LIGHT_SPEED = 3e5 # When using Genetic Programming, each function node needs an unique number of arguments, # which is defined by this dictionary -N_ARGS_FUNCTION = { +FUNCTION_N_ARGS = { 'SUM': 2, 'SUB': 2, 'MUL': 2, @@ -34,10 +30,10 @@ 'COS': 1 } -# A test passes if the best solution found by the agent in the target function +# Test passes if the best solution found by the agent in the target function # is smaller than this value TEST_EPSILON = 100 -# When using the Tournament Selection, one must provide the size of rounds, -# where individuals will compete among themselves +# When using the Tournament Selection, one must provide the size of rounds +# where individuals compete among themselves TOURNAMENT_SIZE = 2 diff --git a/opytimizer/utils/decorator.py b/opytimizer/utils/decorator.py deleted file mode 100644 index 562c0b6d..00000000 --- a/opytimizer/utils/decorator.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Decorators. -""" - -from functools import wraps - -import opytimizer.math.hyper as h - - -def hyper_spanning(lb, ub): - """Spans a hyper-value between lower and upper bounds. - - Args: - lb (tuple, np.array): Lower bounds. - ub (tuple, np.array): Upper bounds. - - Returns: - The output of the incoming objective function with a spanned input. - - """ - - def _hyper_spanning(f): - """Actually decorates the incoming objective function. - - Args: - f (callable): Incoming objective function. - - Returns: - The wrapped objective function. - - """ - - @wraps(f) - def __hyper_spanning(x): - """Wraps the objective function for calculating its output. - - Args: - x (np.array): Array of hyper-values. - - Returns: - The objective function itself. - - """ - - # Spans `x` between lower and upper bounds - x = h.span(x, lb, ub) - - return f(x) - - return __hyper_spanning - - return _hyper_spanning - - -def pre_evaluate(f): - """Pre-evaluates an objective function. - - Args: - f (callable): Incoming objective function. - - Returns: - The incoming objective function with its pre-evaluation. - - """ - - @wraps(f) - def _pre_evaluate(*args, **kwargs): - """Wraps the objective function for calculating its pre-evaluation. - - Returns: - The objective function itself. - - """ - - # Check if there is a `hook` in keyword arguments - if 'hook' in kwargs: - # Applies it to a variable - hook = kwargs['hook'] - - # Check if variable is different than None - if hook: - # Calls the pre evaluation hook with the following arguments: - # optimizer, space, function - hook(args[0], args[1], args[2]) - - return f(*args) - - return _pre_evaluate diff --git a/opytimizer/utils/exception.py b/opytimizer/utils/exception.py index 3667bd3f..04a68c54 100644 --- a/opytimizer/utils/exception.py +++ b/opytimizer/utils/exception.py @@ -9,7 +9,7 @@ class Error(Exception): """A generic Error class derived from Exception. - Essentially, it gets the class and message and logs the error to the logger. + Essentially, it gets a class object and a message, and logs the error to the logger. """ @@ -22,7 +22,7 @@ def __init__(self, cls, msg): """ - # Override its parent class + # Overrides its parent class super(Error, self).__init__() # Logs the error in a formatted way @@ -42,7 +42,7 @@ def __init__(self, error): """ - # Override its parent class with class name and error message + # Overrides its parent class with class name and error message super(ArgumentError, self).__init__('ArgumentError', error) @@ -59,7 +59,7 @@ def __init__(self, error): """ - # Override its parent class with class name and error message + # Overrides its parent class with class name and error message super(BuildError, self).__init__('BuildError', error) @@ -76,7 +76,7 @@ def __init__(self, error): """ - # Override its parent class with class name and error message + # Overrides its parent class with class name and error message super(SizeError, self).__init__('SizeError', error) @@ -93,7 +93,7 @@ def __init__(self, error): """ - # Override its parent class with class name and error message + # Overrides its parent class with class name and error message super(TypeError, self).__init__('TypeError', error) @@ -110,5 +110,5 @@ def __init__(self, error): """ - # Override its parent class with class name and error message + # Overrides its parent class with class name and error message super(ValueError, self).__init__('ValueError', error) diff --git a/opytimizer/utils/history.py b/opytimizer/utils/history.py index f904c065..56ef4ff3 100644 --- a/opytimizer/utils/history.py +++ b/opytimizer/utils/history.py @@ -1,11 +1,8 @@ """History-based object that helps in saving the optimization history. """ -import pickle - import numpy as np -import opytimizer.utils.constants as c import opytimizer.utils.exception as e @@ -17,169 +14,124 @@ class History: """ - def __init__(self, store_best_only=False): + def __init__(self, save_agents=False): """Initialization method. Args: - store_best_only (bool): If True, only the best agent of each iteration - is stored in History. + save_agents (bool): Saves all agents in the search space. """ - # Whether only the best agent should be stored or not - self.store_best_only = store_best_only + # Stores only the best agent + self.save_agents = save_agents @property - def store_best_only(self): - """bool: Whether only the best agent should be stored in the class or not. + def save_agents(self): + """bool: Saves all agents in the search space. """ - return self._store_best_only - - @store_best_only.setter - def store_best_only(self, store_best_only): - if not isinstance(store_best_only, bool): - raise e.TypeError('`store_best_only` should be a boolean') - - self._store_best_only = store_best_only - - def __str__(self): - """Prints in a formatted way the history of best agents throughout the - optimization task. - - """ + return self._save_agents - # For every iteration - for i, best in enumerate(self.best_agent): - print(f'\nIteration {i+1}/{len(self.best_agent)}') - print(f'\nPosition: {best[0]} | Fitness: {best[1]}') + @save_agents.setter + def save_agents(self, save_agents): + if not isinstance(save_agents, bool): + raise e.TypeError('`save_agents` should be a boolean') - return '' + self._save_agents = save_agents def _parse(self, key, value): - """Parses a value according to the key's requirement. + """Parses incoming values with specified formats. Args: - key (str): Key's identifier. - value (any): Any possible value. + key (str): Key. + value (any): Value. Returns: - The parsed (formatted) value according to the key. + Parsed value according to the specified format. """ # Checks if the key is `agents` if key == 'agents': - # Returns a list of agents' tuples (position, fit) + # Returns a list of tuples (position, fit) return [(v.position.tolist(), v.fit) for v in value] # Checks if the key is `best_agent` if key == 'best_agent': - # Returns the best agent's tuple (position, fit) + # Returns a tuple (position, fit) return (value.position.tolist(), value.fit) - # Checks if the key is `local` - if key == 'local': + # Checks if the key is `local_position` + if key == 'local_position': # Returns a list of local positions return [v.tolist() for v in value] def dump(self, **kwargs): - """Dumps key-value pairs into lists attributes. - - Note that if an attribute already exists, it will be appended - in the list. + """Dumps keyword pairs into self-class attributes. """ - # For every key-value pair + # Iterates through all keyword arguments for (key, value) in kwargs.items(): - # Checks if it is supposed to only store the best agent - if self.store_best_only: - # Checks if key is different from `best_agent` or `time` - if key not in ['best_agent', 'time']: - # Breaks the current loop - continue - - # Checks if current key has a specific rule - if key in c.HISTORY_KEYS: - # Parses information using specific rules, if defined - out = self._parse(key, value) + # If current `key` is `agents` and they should not be saved, + # we skip this loop iteration + if key == 'agents' and not self.save_agents: + continue + + # If current `key` has a specific parsing rule, + # we need to parse it accordingly + if key in ['agents', 'best_agent', 'local_position']: + output = self._parse(key, value) else: - # Just applies the information - out = value + output = value - # If there is no attribute + # If class still does not have a `key` property, + # we need to set its initial value as a list if not hasattr(self, key): - # Sets its initial value as a list - setattr(self, key, [out]) - - # If there is already an attribute + setattr(self, key, [output]) else: - # Appends the new value to the attribute - getattr(self, key).append(out) + getattr(self, key).append(output) - def get(self, key, index): - """Gets the desired key based on the input index. + def get_convergence(self, key, index=0): + """Gets the convergence list of a specified key. Args: - key (str): Key's name to be retrieved. - index (tuple): A tuple indicating which indexes should be retrieved. + key (str): Key to be retrieved. + index (tuple): Index to be retrieved. Returns: - All key's values based on the input index. - Note that this method returns all records, i.e., all values from the `t` iterations. + Values based on key and index. """ - # Checks if index is a tuple - if not isinstance(index, tuple): - raise e.TypeError('`index` should be a tuple') - # Gathers the numpy array from the attribute attr = np.asarray(getattr(self, key), dtype=list) - # Checks if attribute's dimensions are equal to the length of input index - # We use `- 1` as the method retrieves values from all iterations - if attr.ndim - 1 != len(index): - raise e.SizeError( - f'`index` = {len(index)} should have one less dimension than `key` = {attr.ndim}') - - # Slices the array based on the input index - # Again, slice(None) will retrieve values from all iterations - attr = attr[(slice(None),) + index] - - # We use hstack to horizontally concatenate the axis, - # allowing an easier input to the visualization package - attr = np.hstack(attr) - - return attr - - def save(self, file_name): - """Saves the object to a pickle encoding. + # Checks if the key is `agents` + if key in ['agents']: + # Gathers positions and fitnesses + attr_pos = np.hstack(attr[(slice(None), index, 0)]) + attr_fit = np.hstack(attr[(slice(None), index, 1)]) - Args: - file_name (str): File's name to be saved. + return attr_pos, attr_fit - """ + # Checks if the key is `best_agent` + if key in ['best_agent']: + # Gathers positions and fitnesses + attr_pos = np.hstack(attr[(slice(None), 0)]) + attr_fit = np.hstack(attr[(slice(None), 1)]) - # Opening a destination file - with open(file_name, 'wb') as dest_file: - # Dumping History to file - pickle.dump(self, dest_file) + return attr_pos, attr_fit - def load(self, file_name): - """Loads the object from a pickle encoding. + # Checks if the key is `local_position` + if key in ['local_position']: + # Gathers positions + attr_pos = np.hstack(attr[(slice(None), index)]) - Args: - file_name (str): Pickle's file path to be loaded. + return attr_pos - """ + # Gathers the attribute + attr = np.hstack(attr[(slice(None))]) - # Trying to open the file - with open(file_name, "rb") as input_file: - # Loading History from file - history = pickle.load(input_file) - - # Updating all values - self.__dict__.update(history.__dict__) + return attr diff --git a/opytimizer/utils/logging.py b/opytimizer/utils/logging.py index 00ab3f88..19c1b27a 100644 --- a/opytimizer/utils/logging.py +++ b/opytimizer/utils/logging.py @@ -12,12 +12,13 @@ class Logger(logging.Logger): - """A customized Logger file that enables the possibility of only logging to file. + """Customized Logger class that enables the possibility + of directly logging to files. """ def to_file(self, msg, *args, **kwargs): - """Logs the message only to the logging file. + """Logs the message directly to the logging file. Args: msg (str): Message to be logged. @@ -35,10 +36,10 @@ def to_file(self, msg, *args, **kwargs): def get_console_handler(): - """Gets a console handler to handle logging into console. + """Gets a console handler to handle console logging. Returns: - A handler to output information into console. + Handler to output information into console. """ @@ -50,10 +51,10 @@ def get_console_handler(): def get_timed_file_handler(): - """Gets a timed file handler to handle logging into files. + """Gets a timed file handler to handle timed-files logging. Returns: - A handler to output information into timed files. + Handler to output information into timed-files. """ @@ -65,17 +66,17 @@ def get_timed_file_handler(): def get_logger(logger_name): - """Gets a log and make it avaliable for further use. + """Gets a log and makes it avaliable for further use. Args: logger_name (str): The name of the logger. Returns: - A handler to output information into console's. + Handler to output information into console. """ - # Defining a customized logger in order to have the possibility + # Defines a customized logger in order to have the possibility # of only logging to file when desired logging.setLoggerClass(Logger) @@ -89,7 +90,7 @@ def get_logger(logger_name): logger.addHandler(get_console_handler()) logger.addHandler(get_timed_file_handler()) - # True or False for propagating logs + # Do not propagate any log logger.propagate = False return logger diff --git a/opytimizer/visualization/__init__.py b/opytimizer/visualization/__init__.py index fcc6b936..e8036965 100644 --- a/opytimizer/visualization/__init__.py +++ b/opytimizer/visualization/__init__.py @@ -1,2 +1,2 @@ -"""A visualization package for all common opytimizer modules. +"""Visualization package for all common opytimizer modules. """ diff --git a/opytimizer/visualization/convergence.py b/opytimizer/visualization/convergence.py index 31c395df..d13a25f3 100644 --- a/opytimizer/visualization/convergence.py +++ b/opytimizer/visualization/convergence.py @@ -1,4 +1,4 @@ -"""Customizable convergence plots. +"""Convergence plots. """ import matplotlib.pyplot as plt @@ -11,12 +11,12 @@ def plot(*args, labels=None, title='', subtitle='', xlabel='iteration', ylabel=' """Plots the convergence graph of desired variables. Essentially, each variable is a list or numpy array - with size equals to (iterations x 1). + with size equals to `n_iterations`. Args: labels (list): Labels to be applied for each plot in legend. - title (str): The title of the plot. - subtitle (str): The subtitle of the plot. + title (str): Title of the plot. + subtitle (str): Subtitle of the plot. xlabel (str): Axis `x` label. ylabel (str): Axis `y` label. grid (bool): If grid should be used or not. @@ -24,44 +24,41 @@ def plot(*args, labels=None, title='', subtitle='', xlabel='iteration', ylabel=' """ - # Creating figure and axis subplots + # Creates the figure and axis subplots _, ax = plt.subplots(figsize=(7, 5)) - # Defining some properties, such as axis labels + # Defines some properties, such as labels, title and subtitle ax.set(xlabel=xlabel, ylabel=ylabel) - - # Setting both title and subtitles ax.set_title(title, loc='left', fontsize=14) ax.set_title(subtitle, loc='right', fontsize=8, color='grey') - # If grid usage is true + # If grid usage is `True` if grid: # Adds the grid property to the axis ax.grid() - # Check if labels argument exists + # Checks if `labels` really exists if labels: - # Also check if it is a list + # Checks a set of pre-defined `labels` conditions if not isinstance(labels, list): raise e.TypeError('`labels` should be a list') - # And check if it has the same size of arguments if len(labels) != len(args): raise e.SizeError('`args` and `labels` should have the same size') - # If labels argument does not exists + # If `labels` do not exists else: - # Creates a list with indicators + # Creates pre-defined `labels` labels = [f'variable_{i}' for i in range(len(args))] - # Plotting the axis + # Plots every argument for (arg, label) in zip(args, labels): ax.plot(arg, label=label) - # If legend usage is true + # If legend usage is `True` if legend: # Adds the legend property to the axis ax.legend() - # Displaying the plot + # Displays the plot plt.show() diff --git a/opytimizer/visualization/surface.py b/opytimizer/visualization/surface.py index eef2612b..107b9dad 100644 --- a/opytimizer/visualization/surface.py +++ b/opytimizer/visualization/surface.py @@ -9,40 +9,33 @@ def plot(points, title='', subtitle='', style='winter', colorbar=True): Args: points (np.array): Points to be plotted with shape equal to (3, n, n). - title (str): The title of the plot. - subtitle (str): The subtitle of the plot. + title (str): Title of the plot. + subtitle (str): Subtitle of the plot. style (str): Surface's style. colorbar (bool): If colorbar should be used or not. """ - # Creating figure + # Creates the figure and axis fig = plt.figure(figsize=(9, 5)) - - # Creating the axis ax = plt.axes(projection='3d') - # Defining some properties, such as axis labels + # Defines some properties, such as labels, title, subtitle and ticks ax.set(xlabel='$x$', ylabel='$y$', zlabel='$f(x, y)$') - - # Reducing the size of the ticks - ax.tick_params(labelsize=8) - - # Setting both title and subtitles ax.set_title(title, loc='left', fontsize=14) ax.set_title(subtitle, loc='right', fontsize=8, color='grey') + ax.tick_params(labelsize=8) - # PLotting the wireframe + # Plots the wireframe and the surface ax.plot_wireframe(points[0], points[1], points[2], color='grey') - - # Plotting the surface surface = ax.plot_surface(points[0], points[1], points[2], - rstride=1, cstride=1, cmap=style, edgecolor='none') + rstride=1, cstride=1, cmap=style, + edgecolor='none') - # If colorbar usage is true + # If colorbar usage is `True` if colorbar: # Adds the colorbar property to the figure fig.colorbar(surface, shrink=0.5, aspect=10) - # Displaying the plot + # Displays the plot plt.show() diff --git a/setup.py b/setup.py index df089166..31f9c677 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ long_description = f.read() setup(name='opytimizer', - version='2.1.4', + version='3.0.0', description='Nature-Inspired Python Optimizer', long_description=long_description, long_description_content_type='text/markdown', diff --git a/tests/opytimizer/core/test_agent.py b/tests/opytimizer/core/test_agent.py index fab76b40..f6d24cc2 100644 --- a/tests/opytimizer/core/test_agent.py +++ b/tests/opytimizer/core/test_agent.py @@ -6,53 +6,53 @@ def test_agent_n_variables(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert new_agent.n_variables == 5 + assert new_agent.n_variables == 1 def test_agent_n_variables_setter(): try: - new_agent = agent.Agent(n_variables=0.0, n_dimensions=4) + new_agent = agent.Agent(0.0, 1, 0, 1) except: - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) try: - new_agent = agent.Agent(n_variables=0, n_dimensions=4) + new_agent = agent.Agent(0, 4, 0, 1) except: - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert new_agent.n_variables == 5 + assert new_agent.n_variables == 1 def test_agent_n_dimensions(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert new_agent.n_dimensions == 4 + assert new_agent.n_dimensions == 1 def test_agent_n_dimensions_setter(): try: - new_agent = agent.Agent(n_variables=5, n_dimensions=0.0) + new_agent = agent.Agent(1, 0.0, 0, 1) except: - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) try: - new_agent = agent.Agent(n_variables=5, n_dimensions=0) + new_agent = agent.Agent(1, 0, 0, 1) except: - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert new_agent.n_dimensions == 4 + assert new_agent.n_dimensions == 1 def test_agent_position(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert new_agent.position.shape == (5, 4) + assert new_agent.position.shape == (1, 1) def test_agent_position_setter(): - new_agent = agent.Agent(n_variables=1, n_dimensions=1) + new_agent = agent.Agent(1, 1, 0, 1) try: new_agent.position = 10 @@ -63,13 +63,13 @@ def test_agent_position_setter(): def test_agent_fit(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) assert new_agent.fit == sys.float_info.max def test_agent_fit_setter(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) try: new_agent.fit = np.array([0]) @@ -80,13 +80,13 @@ def test_agent_fit_setter(): def test_agent_lb(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert len(new_agent.lb) == 5 + assert len(new_agent.lb) == 1 def test_agent_lb_setter(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) try: new_agent.lb = [1] @@ -95,15 +95,22 @@ def test_agent_lb_setter(): assert new_agent.lb[0] == 1 + try: + new_agent.lb = np.array([1, 2]) + except: + new_agent.lb = np.array([1]) + + assert new_agent.lb[0] == 1 + def test_agent_ub(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) - assert len(new_agent.ub) == 5 + assert len(new_agent.ub) == 1 def test_agent_ub_setter(): - new_agent = agent.Agent(n_variables=5, n_dimensions=4) + new_agent = agent.Agent(1, 1, 0, 1) try: new_agent.ub = [1] @@ -112,14 +119,49 @@ def test_agent_ub_setter(): assert new_agent.ub[0] == 1 + try: + new_agent.ub = np.array([1, 2]) + except: + new_agent.ub = np.array([1]) + + assert new_agent.ub[0] == 1 + -def test_agent_clip_limits(): - new_agent = agent.Agent(n_variables=1, n_dimensions=1) +def test_agent_clip_by_bound(): + new_agent = agent.Agent(1, 1, 0, 1) new_agent.lb = np.array([10]) new_agent.ub = np.array([10]) - new_agent.clip_limits() + new_agent.clip_by_bound() assert new_agent.position[0] == 10 + + +def test_agent_fill_with_binary(): + new_agent = agent.Agent(1, 1, 0, 1) + + new_agent.fill_with_binary() + + assert new_agent.position[0] in [0, 1] + + +def test_agent_fill_with_static(): + new_agent = agent.Agent(1, 1, 0, 1) + + try: + new_agent.fill_with_static([20, 20]) + except: + new_agent.fill_with_static(20) + + assert new_agent.position[0] == 20 + + +def test_agent_fill_with_uniform(): + new_agent = agent.Agent(1, 1, 0, 1) + + new_agent.fill_with_uniform() + + assert new_agent.position[0] >= 0 + assert new_agent.position[0] <= 1 diff --git a/tests/opytimizer/core/test_function.py b/tests/opytimizer/core/test_function.py index f63ce602..a543b8bf 100644 --- a/tests/opytimizer/core/test_function.py +++ b/tests/opytimizer/core/test_function.py @@ -1,76 +1,40 @@ import numpy as np from opytimizer.core import function -from opytimizer.utils import constants +from opytimizer.utils import constant -def test_function_name(): - new_function = function.Function() - - assert new_function.name == 'callable' - - -def test_function_name_setter(): - new_function = function.Function() - - try: - new_function.name = 1 - except: - new_function.name = 'callable' - - assert new_function.name == 'callable' - - -def test_function_constraints(): - new_function = function.Function() - - assert new_function.constraints == [] - +def pointer(x): + return x -def test_function_constraints_setter(): - def c_1(x): - return x**2 +assert pointer(1) == 1 - assert c_1(2) == 4 - try: - new_function = function.Function(constraints=c_1) - except: - new_function = function.Function(constraints=[c_1]) - - assert len(new_function.constraints) == 1 - - -def test_function_penalty(): - new_function = function.Function() - - assert new_function.penalty == 0.0 +def test_function_name(): + new_function = function.Function(pointer) + assert new_function.name == 'pointer' -def test_function_penalty_setter(): - new_function = function.Function() - try: - new_function.penalty = 'a' - except: - new_function.penalty = 1 +def test_function_name_setter(): + new_function = function.Function(pointer) try: - new_function.penalty = -1 + new_function.name = 1 except: - new_function.penalty = 1 + new_function.name = 'pointer' - assert new_function.penalty == 1 + assert new_function.name == 'pointer' def test_function_pointer(): - new_function = function.Function() + new_function = function.Function(pointer) - assert new_function.pointer.__name__ == '_constrain_pointer' + assert new_function.pointer.__name__ == 'pointer' def test_function_pointer_setter(): - new_function = function.Function() + new_function = function.Function(pointer) try: new_function.pointer = 'a' @@ -81,20 +45,20 @@ def test_function_pointer_setter(): def test_function_built(): - new_function = function.Function() + new_function = function.Function(pointer) assert new_function.built == True def test_function_built_setter(): - new_function = function.Function() + new_function = function.Function(pointer) new_function.built = False assert new_function.built == False -def test_function_create_pointer(): +def test_function_call(): def square(x): return np.sum(x**2) @@ -105,23 +69,16 @@ def square2(x, y): assert square2(2, 2) == 8 - def c_1(x): - return x[0] + x[1] <= 0 - - assert c_1(np.zeros(2)) == True - - new_function = function.Function(pointer=square, constraints=[c_1], penalty=100) + new_function = function.Function(square) assert new_function(np.zeros(2)) == 0 - assert new_function(np.ones(2)) == 202 - try: - new_function = function.Function(pointer=square2) + new_function = function.Function(square2) except: - new_function = function.Function() + new_function = function.Function(square) - assert new_function.name == 'callable' + assert new_function.name == 'square' def test_function(): @@ -133,6 +90,6 @@ def __call__(self, x): assert s(2) == 4 - new_function = function.Function(pointer=s) + new_function = function.Function(s) assert new_function.name == 'Square' diff --git a/tests/opytimizer/core/test_node.py b/tests/opytimizer/core/test_node.py index ecfc79ea..f5ecc2b1 100644 --- a/tests/opytimizer/core/test_node.py +++ b/tests/opytimizer/core/test_node.py @@ -4,186 +4,259 @@ def test_node(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') print(repr(new_node)) print(new_node) def test_node_name(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.name == '0' def test_node_name_setter(): try: - new_node = node.Node(name=0.0, node_type='FUNCTION') + new_node = node.Node(name=0.0, category='FUNCTION') except: - new_node = node.Node(name=0, node_type='FUNCTION') + new_node = node.Node(name=0, category='FUNCTION') try: - new_node = node.Node(name=0.0, node_type='FUNCTION') + new_node = node.Node(name=0.0, category='FUNCTION') except: - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert str(new_node.name) == '0' -def test_node_type(): - new_node = node.Node(name='0', node_type='FUNCTION') +def test_category(): + new_node = node.Node(name='0', category='FUNCTION') - assert new_node.node_type == 'FUNCTION' + assert new_node.category == 'FUNCTION' -def test_node_type_setter(): +def test_category_setter(): try: - new_node = node.Node(name=0, node_type='F') + new_node = node.Node(name=0, category='F') except: - new_node = node.Node(name=0, node_type='FUNCTION') + new_node = node.Node(name=0, category='FUNCTION') - assert new_node.node_type == 'FUNCTION' + assert new_node.category == 'FUNCTION' try: - new_node = node.Node(name=0, node_type='T') + new_node = node.Node(name=0, category='T') except: - new_node = node.Node(name=0, node_type='TERMINAL', value=np.array(0)) + new_node = node.Node(name=0, category='TERMINAL', value=np.array(0)) - assert new_node.node_type == 'TERMINAL' + assert new_node.category == 'TERMINAL' def test_node_value(): - new_node = node.Node(name='0', node_type='TERMINAL', value=np.array(0)) + new_node = node.Node(name='0', category='TERMINAL', value=np.array(0)) assert new_node.value == 0 def test_node_value_setter(): try: - new_node = node.Node(name=0, node_type='TERMINAL', value=0) + new_node = node.Node(name=0, category='TERMINAL', value=0) except: - new_node = node.Node(name=0, node_type='TERMINAL', value=np.array(0)) + new_node = node.Node(name=0, category='TERMINAL', value=np.array(0)) assert new_node.value == 0 def test_node_left(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.left == None def test_node_left_setter(): try: - new_node = node.Node(name=0, node_type='FUNCTION', left=1) + new_node = node.Node(name=0, category='FUNCTION', left=1) except: - new_node2 = node.Node(name=0, node_type='TERMINAL', value=np.array(0)) + new_node2 = node.Node(name=0, category='TERMINAL', value=np.array(0)) - new_node = node.Node(name=0, node_type='FUNCTION', left=new_node2) + new_node = node.Node(name=0, category='FUNCTION', left=new_node2) assert isinstance(new_node.left, node.Node) def test_node_right(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.right == None def test_node_right_setter(): try: - new_node = node.Node(name=0, node_type='FUNCTION', right=1) + new_node = node.Node(name=0, category='FUNCTION', right=1) except: - new_node2 = node.Node(name=0, node_type='TERMINAL', value=np.array(0)) + new_node2 = node.Node(name=0, category='TERMINAL', value=np.array(0)) - new_node = node.Node(name=0, node_type='FUNCTION', right=new_node2) + new_node = node.Node(name=0, category='FUNCTION', right=new_node2) assert isinstance(new_node.right, node.Node) def test_node_parent(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.parent == None def test_node_parent_setter(): try: - new_node = node.Node(name=0, node_type='FUNCTION', parent=1) + new_node = node.Node(name=0, category='FUNCTION', parent=1) except: - new_node2 = node.Node(name=0, node_type='TERMINAL', value=np.array(0)) + new_node2 = node.Node(name=0, category='TERMINAL', value=np.array(0)) - new_node = node.Node(name=0, node_type='FUNCTION', parent=new_node2) + new_node = node.Node(name=0, category='FUNCTION', parent=new_node2) assert isinstance(new_node.parent, node.Node) def test_node_flag(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.flag == True def test_node_flag_setter(): try: - new_node = node.Node(name=0, node_type='FUNCTION') + new_node = node.Node(name=0, category='FUNCTION') new_node.flag = 10 except: - new_node = node.Node(name=0, node_type='FUNCTION') + new_node = node.Node(name=0, category='FUNCTION') assert new_node.flag == True def test_node_min_depth(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.min_depth == 0 def test_node_max_depth(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.max_depth == 0 def test_node_n_leaves(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.n_leaves == 1 def test_node_n_nodes(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='0', category='FUNCTION') assert new_node.n_nodes == 1 def test_node_position(): - new_node = node.Node(name='0', node_type='TERMINAL', value=np.array(0)) + new_node = node.Node(name='0', category='TERMINAL', value=np.array(0)) assert new_node.position == 0 def test_node_pre_order(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='SUM', category='FUNCTION') + new_node_1 = node.Node(name='1', category='TERMINAL', value=np.array(0)) + new_node_2 = node.Node(name='2', category='TERMINAL', value=np.array(0)) - assert len(new_node.pre_order) == 1 + new_node.left = new_node_1 + new_node.right = new_node_2 + new_node_1.parent = new_node + new_node_2.parent = new_node + + assert len(new_node.pre_order) == 3 def test_node_post_order(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='SUM', category='FUNCTION') + new_node_1 = node.Node(name='1', category='TERMINAL', value=np.array(0)) + new_node_2 = node.Node(name='2', category='TERMINAL', value=np.array(0)) + + new_node.left = new_node_1 + new_node.right = new_node_2 + new_node_1.parent = new_node + new_node_2.parent = new_node - assert len(new_node.post_order) == 1 + assert len(new_node.post_order) == 3 def test_node_find_node(): - new_node = node.Node(name='0', node_type='TERMINAL', value=np.array(0)) + new_node = node.Node(name='0', category='TERMINAL', value=np.array(0)) assert new_node.find_node(0) == (None, True) assert new_node.find_node(1) == (None, False) + new_node = node.Node(name='SUM', category='FUNCTION', value=np.array(0)) + + assert new_node.find_node(0) == (None, False) + assert new_node.find_node(1) == (None, False) + + +def test_node_evaluate(): + def _create_node(function_type): + new_node = node.Node(name=function_type, category='FUNCTION') + new_node_1 = node.Node( + name='1', category='TERMINAL', value=np.array(1)) + new_node_2 = node.Node( + name='2', category='TERMINAL', value=np.array(1)) + + new_node.left = new_node_1 + new_node.right = new_node_2 + new_node_1.parent = new_node + new_node_2.parent = new_node + + return new_node + + new_node = _create_node('SUM') + assert node._evaluate(new_node) == 2 + + new_node = _create_node('SUB') + assert node._evaluate(new_node) == 0 + + new_node = _create_node('MUL') + assert node._evaluate(new_node) == 1 + + new_node = _create_node('DIV') + assert node._evaluate(new_node) == 1 + + new_node = _create_node('EXP') + assert np.round(node._evaluate(new_node)) == 3 + + new_node = _create_node('SQRT') + assert node._evaluate(new_node) == 1 + + new_node = _create_node('LOG') + assert node._evaluate(new_node) == 0 + + new_node = _create_node('ABS') + assert node._evaluate(new_node) == 1 + + new_node = _create_node('SIN') + assert np.round(node._evaluate(new_node)) == 1 + + new_node = _create_node('COS') + assert np.round(node._evaluate(new_node)) == 1 + def test_node_properties(): - new_node = node.Node(name='0', node_type='FUNCTION') + new_node = node.Node(name='SUM', category='FUNCTION') + new_node_1 = node.Node(name='1', category='TERMINAL', value=np.array(0)) + new_node_2 = node.Node(name='2', category='TERMINAL', value=np.array(0)) + + new_node.left = new_node_1 + new_node.right = new_node_2 + new_node_1.parent = new_node + new_node_2.parent = new_node assert isinstance(node._properties(new_node), dict) + assert print(new_node) is None diff --git a/tests/opytimizer/core/test_optimizer.py b/tests/opytimizer/core/test_optimizer.py index a24367a8..4c129b81 100644 --- a/tests/opytimizer/core/test_optimizer.py +++ b/tests/opytimizer/core/test_optimizer.py @@ -1,43 +1,45 @@ import sys import numpy as np -import pytest -from opytimizer import Opytimizer from opytimizer.core import function, optimizer -from opytimizer.core.function import Function -from opytimizer.optimizers.swarm.pso import PSO from opytimizer.spaces import search -from opytimizer.spaces.search import SearchSpace -def square(x): - return np.sum(x**2) +def test_optimizer_algorithm(): + new_optimizer = optimizer.Optimizer() + assert new_optimizer.algorithm == 'Optimizer' -def test_optimizer_algorithm(): - new_optimizer = optimizer.Optimizer(algorithm='PSO') - assert new_optimizer.algorithm == 'PSO' +def test_optimizer_algorithm_setter(): + new_optimizer = optimizer.Optimizer() + + try: + new_optimizer.algorithm = 0 + except: + new_optimizer.algorithm = 'Optimizer' + + assert new_optimizer.algorithm == 'Optimizer' -def test_optimizer_hyperparams(): +def test_optimizer_params(): new_optimizer = optimizer.Optimizer() - assert new_optimizer.hyperparams == None + assert new_optimizer.params == {} -def test_optimizer_hyperparams_setter(): +def test_optimizer_params_setter(): new_optimizer = optimizer.Optimizer() try: - new_optimizer.hyperparams = 1 + new_optimizer.params = 1 except: - new_optimizer.hyperparams = { + new_optimizer.params = { 'w': 1.5 } - assert new_optimizer.hyperparams['w'] == 1.5 + assert new_optimizer.params['w'] == 1.5 def test_optimizer_built(): @@ -49,90 +51,41 @@ def test_optimizer_built(): def test_optimizer_built_setter(): new_optimizer = optimizer.Optimizer() - new_optimizer.built = True + try: + new_optimizer.built = 1 + except: + new_optimizer.built = True assert new_optimizer.built == True -def test_optimizer_update(): +def test_optimizer_build(): new_optimizer = optimizer.Optimizer() - with pytest.raises(NotImplementedError): - new_optimizer._update() + new_optimizer.build({'w': 1.5}) -def test_optimizer_evaluate(): - new_function = function.Function(pointer=square) - - search_space = search.SearchSpace(n_agents=20, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - +def test_optimizer_create_additional_attrs(): new_optimizer = optimizer.Optimizer() - new_optimizer._evaluate(search_space, new_function) - - assert search_space.best_agent.fit < sys.float_info.max + new_optimizer.create_additional_attrs(None) -def test_optimizer_run(): +def test_optimizer_update(): new_optimizer = optimizer.Optimizer() - with pytest.raises(NotImplementedError): - target_fn = Function(lambda x: x) - search_space = SearchSpace() - new_optimizer.run(search_space, target_fn) - - -def test_store_best_agent_only(): - pso = PSO() - n_iters = 10 - target_fn = Function(pointer=square) - space = SearchSpace(lower_bound=[-10], upper_bound=[10], n_iterations=n_iters) - - history = Opytimizer(space, pso, target_fn).start(store_best_only=True) - assert not hasattr(history, 'agents') - - assert hasattr(history, 'best_agent') - assert len(history.best_agent) == n_iters - - -def test_store_all_agents(): - pso = PSO() - n_iters = 10 - n_agents = 2 - target_fn = Function(pointer=square) - space = SearchSpace(lower_bound=[-10], upper_bound=[10], n_iterations=n_iters, n_agents=n_agents) + new_optimizer.update() - history = Opytimizer(space, pso, target_fn).start() - assert hasattr(history, 'agents') - # Ensuring that the amount of entries is the same as the amount of iterations and - # that for each iteration all agents are kept - assert len(history.agents) == n_iters - assert all(len(iter_agents) == n_agents for iter_agents in history.agents) - - assert hasattr(history, 'best_agent') - assert len(history.best_agent) == n_iters - - -def test_hook(): - pso = PSO() - n_iters = 10 - counter = 0 - - target_fn = Function(pointer=square) - space = SearchSpace(lower_bound=[-10], upper_bound=[10], n_iterations=n_iters, n_agents=15) - - def eval_hook(arg_opt, arg_space, arg_target_fn): - assert arg_opt is pso - assert arg_space is space - assert arg_target_fn is target_fn +def test_optimizer_evaluate(): + def square(x): + return np.sum(x**2) - nonlocal counter - counter += 1 + new_function = function.Function(square) + new_search_space = search.SearchSpace(n_agents=1, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - Opytimizer(space, pso, target_fn).start(pre_evaluate=eval_hook) + new_optimizer = optimizer.Optimizer() + new_optimizer.evaluate(new_search_space, new_function) - # The hook is evaluated for each iteration plus initialization - assert counter == n_iters + 1 + assert new_search_space.best_agent.fit < sys.float_info.max diff --git a/tests/opytimizer/core/test_space.py b/tests/opytimizer/core/test_space.py index a3e81ccf..c8be854f 100644 --- a/tests/opytimizer/core/test_space.py +++ b/tests/opytimizer/core/test_space.py @@ -25,23 +25,23 @@ def test_space_n_agents_setter(): def test_space_n_variables(): - new_space = space.Space(n_variables=2) + new_space = space.Space(n_variables=1) - assert new_space.n_variables == 2 + assert new_space.n_variables == 1 def test_space_n_variables_setter(): try: new_space = space.Space(n_variables=0.0) except: - new_space = space.Space(n_variables=2) + new_space = space.Space(n_variables=1) try: new_space = space.Space(n_variables=0) except: - new_space = space.Space(n_variables=2) + new_space = space.Space(n_variables=1) - assert new_space.n_variables == 2 + assert new_space.n_variables == 1 def test_space_n_dimensions(): @@ -64,26 +64,6 @@ def test_space_n_dimensions_setter(): assert new_space.n_dimensions == 1 -def test_space_n_iterations(): - new_space = space.Space(n_iterations=10) - - assert new_space.n_iterations == 10 - - -def test_space_n_iterations_setter(): - try: - new_space = space.Space(n_iterations=0.0) - except: - new_space = space.Space(n_iterations=10) - - try: - new_space = space.Space(n_iterations=0) - except: - new_space = space.Space(n_iterations=10) - - assert new_space.n_iterations == 10 - - def test_space_agents(): new_space = space.Space() @@ -113,57 +93,78 @@ def test_space_best_agent_setter(): try: new_space.best_agent = None except: - new_space.best_agent = agent.Agent() + new_space.best_agent = agent.Agent(1, 1, 0, 1) assert isinstance(new_space.best_agent, agent.Agent) def test_space_lb(): - new_space = space.Space(n_variables=10) + new_space = space.Space(n_variables=1) - assert new_space.lb.shape == (10, ) + assert new_space.lb.shape == (1, ) def test_space_lb_setter(): - new_space = space.Space(n_variables=2) + new_space = space.Space(n_variables=1) try: - new_space.lb = [0, 1] + new_space.lb = [1] except: - new_space.lb = np.array([0, 1]) + new_space.lb = np.array([1]) + + assert new_space.lb[0] == 1 try: - new_space.lb = np.array([0]) + new_space.lb = np.array([1, 2]) except: - new_space.lb = np.array([0, 1]) + new_space.lb = np.array([1]) - assert new_space.lb.shape == (2, ) + assert new_space.lb[0] == 1 def test_space_ub(): - new_space = space.Space(n_variables=10) + new_space = space.Space(n_variables=1) - assert new_space.ub.shape == (10, ) + assert new_space.ub.shape == (1, ) def test_space_ub_setter(): - new_space = space.Space(n_variables=2) + new_space = space.Space(n_variables=1) try: - new_space.ub = [0, 1] + new_space.ub = [1] except: - new_space.ub = np.array([0, 1]) + new_space.ub = np.array([1]) + + assert new_space.ub[0] == 1 try: - new_space.ub = np.array([0]) + new_space.ub = np.array([1, 2]) except: - new_space.ub = np.array([0, 1]) + new_space.ub = np.array([1]) - assert new_space.ub.shape == (2, ) + assert new_space.ub[0] == 1 + + +def test_space_built(): + new_space = space.Space() + + assert new_space.built == False + + +def test_space_built_setter(): + new_space = space.Space() + + try: + new_space.built = 1 + except: + new_space.built = True + + assert new_space.built == True def test_space_create_agents(): - new_space = space.Space(n_agents=2, n_variables=2, n_dimensions=1) + new_space = space.Space(n_agents=2, n_variables=1, n_dimensions=1) new_space._create_agents() @@ -171,39 +172,23 @@ def test_space_create_agents(): def test_space_initialize_agents(): - new_space = space.Space(n_agents=2, n_variables=2, n_dimensions=1) + new_space = space.Space(n_agents=2, n_variables=1, n_dimensions=1) - with pytest.raises(NotImplementedError): - new_space._initialize_agents() + new_space._initialize_agents() def test_space_build(): new_space = space.Space() - try: - lb = None - - ub = [10] - - new_space._build(lb, ub) - except: - lb = [0] - - ub = [10] - - new_space._build(lb, ub) - - try: - lb = [0] + new_space.build() - ub = None + assert new_space.built == True - new_space._build(lb, ub) - except: - lb = [0] - ub = [10] +def test_space_clip_by_bound(): + new_space = space.Space() - new_space._build(lb, ub) + new_space.build() + new_space.clip_by_bound() - assert new_space.built == True + assert new_space.agents[0].position[0] == 0 diff --git a/tests/opytimizer/functions/test_constrained.py b/tests/opytimizer/functions/test_constrained.py new file mode 100644 index 00000000..24b61acf --- /dev/null +++ b/tests/opytimizer/functions/test_constrained.py @@ -0,0 +1,124 @@ +import numpy as np + +from opytimizer.functions import constrained +from opytimizer.utils import constant + + +def pointer(x): + return x + +assert pointer(1) == 1 + + +def test_constrained_function_name(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + assert new_constrained_function.name == 'pointer' + + +def test_constrained_function_name_setter(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + try: + new_constrained_function.name = 1 + except: + new_constrained_function.name = 'callable' + + assert new_constrained_function.name == 'callable' + + +def test_constrained_function_constraints(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + assert new_constrained_function.constraints == [] + + +def test_constrained_function_constraints_setter(): + def c_1(x): + return x**2 + + assert c_1(2) == 4 + + try: + new_constrained_function = constrained.ConstrainedFunction( + pointer, constraints=c_1) + except: + new_constrained_function = constrained.ConstrainedFunction( + pointer, constraints=[c_1]) + + assert len(new_constrained_function.constraints) == 1 + + +def test_constrained_function_penalty(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + assert new_constrained_function.penalty == 0.0 + + +def test_constrained_function_penalty_setter(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + try: + new_constrained_function.penalty = 'a' + except: + new_constrained_function.penalty = 1 + + try: + new_constrained_function.penalty = -1 + except: + new_constrained_function.penalty = 1 + + assert new_constrained_function.penalty == 1 + + +def test_constrained_function_pointer(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + assert new_constrained_function.pointer.__name__ == 'pointer' + + +def test_constrained_function_pointer_setter(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + try: + new_constrained_function.pointer = 'a' + except: + new_constrained_function.pointer = callable + + assert new_constrained_function.pointer.__class__.__name__ == 'builtin_constrained_function_or_method' or 'builtin_constrained_function' + + +def test_constrained_function_built(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + assert new_constrained_function.built == True + + +def test_constrained_function_built_setter(): + new_constrained_function = constrained.ConstrainedFunction(pointer, []) + + new_constrained_function.built = False + + assert new_constrained_function.built == False + + +def test_constrained_call(): + def square(x): + return np.sum(x**2) + + assert square(2) == 4 + + def square2(x, y): + return x**2 + y**2 + + assert square2(2, 2) == 8 + + def c_1(x): + return x[0] + x[1] <= 0 + + assert c_1(np.zeros(2)) == True + + new_constrained_function = constrained.ConstrainedFunction(square, [c_1], 100) + + assert new_constrained_function(np.zeros(2)) == 0 + assert new_constrained_function(np.ones(2)) == 202 diff --git a/tests/opytimizer/functions/test_weighted.py b/tests/opytimizer/functions/test_weighted.py index 080b5fbe..d199d602 100644 --- a/tests/opytimizer/functions/test_weighted.py +++ b/tests/opytimizer/functions/test_weighted.py @@ -4,13 +4,13 @@ def test_weighted_functions(): - new_weighted = weighted.WeightedFunction() + new_weighted = weighted.WeightedFunction([], []) assert type(new_weighted.functions) == list def test_weighted_functions_setter(): - new_weighted = weighted.WeightedFunction() + new_weighted = weighted.WeightedFunction([], []) try: new_weighted.functions = None @@ -21,33 +21,28 @@ def test_weighted_functions_setter(): def test_weighted_weights(): - new_weighted = weighted.WeightedFunction() + new_weighted = weighted.WeightedFunction([], []) assert type(new_weighted.weights) == list def test_weighted_weights_setter(): - new_weighted = weighted.WeightedFunction() + new_weighted = weighted.WeightedFunction([], []) try: new_weighted.weights = None except: - new_weighted.weights = [0.5, 0.5] + new_weighted.weights = [] - assert len(new_weighted.weights) == 2 - - -def test_weighted_build(): - new_weighted = weighted.WeightedFunction() - - assert type(new_weighted.functions) == list - - assert type(new_weighted.pointer).__name__ == 'function' + try: + new_weighted.weights = [1.0] + except: + new_weighted.weights = [] - assert new_weighted.built == True + assert len(new_weighted.weights) == 0 -def test_weighted_create_multi_objective(): +def test_weighted_call(): def square(x): return x**2 @@ -61,6 +56,4 @@ def cube(x): new_weighted = weighted.WeightedFunction( functions=[square, cube], weights=[0.5, 0.5]) - new_weighted._create_multi_objective() - assert new_weighted(2) == 6 diff --git a/tests/opytimizer/math/test_general.py b/tests/opytimizer/math/test_general.py index 4b559403..bceb6891 100644 --- a/tests/opytimizer/math/test_general.py +++ b/tests/opytimizer/math/test_general.py @@ -16,7 +16,7 @@ def test_n_wise(): pairs = general.n_wise(list) - for p in pairs: + for _ in pairs: pass assert type(pairs).__name__ == 'callable_iterator' or 'generator' diff --git a/tests/opytimizer/math/test_hyper.py b/tests/opytimizer/math/test_hyper.py index d860c26f..2d9d6df5 100644 --- a/tests/opytimizer/math/test_hyper.py +++ b/tests/opytimizer/math/test_hyper.py @@ -21,3 +21,16 @@ def test_span(): span_array = hyper.span(array, lb, ub) assert span_array > 0 + + +def test_span_to_hyper_value(): + lb = np.full(1, 10) + ub = np.full(1, 20) + + @hyper.span_to_hyper_value(lb, ub) + def call(x): + return np.sum(x) + + y = call(np.array([[0.5], [0.5]])) + + assert y == 30 diff --git a/tests/opytimizer/math/test_random.py b/tests/opytimizer/math/test_random.py index 17e7ae2e..cde3f529 100644 --- a/tests/opytimizer/math/test_random.py +++ b/tests/opytimizer/math/test_random.py @@ -1,5 +1,9 @@ +import numpy as np + from opytimizer.math import random +np.random.seed(0) + def test_generate_binary_random_number(): binary_array = random.generate_binary_random_number(5) @@ -18,9 +22,9 @@ def test_generate_integer_random_number(): assert integer_array.shape == (5, ) - integer_array = random.generate_integer_random_number(0, 10, 1, 5) + integer_array = random.generate_integer_random_number(0, 10, 1, 9) - assert integer_array.shape == (5, ) + assert integer_array.shape == (9, ) def test_generate_uniform_random_number(): diff --git a/tests/opytimizer/optimizers/boolean/test_bmrfo.py b/tests/opytimizer/optimizers/boolean/test_bmrfo.py index 90bb09d6..29d787e6 100644 --- a/tests/opytimizer/optimizers/boolean/test_bmrfo.py +++ b/tests/opytimizer/optimizers/boolean/test_bmrfo.py @@ -5,22 +5,19 @@ from opytimizer.core import function from opytimizer.optimizers.boolean import bmrfo from opytimizer.spaces import boolean -from opytimizer.utils import constants -np.random.seed(0) - -def test_bmrfo_hyperparams(): - hyperparams = { +def test_bmrfo_params(): + params = { 'S': r.generate_binary_random_number(size=(1, 1)) } - new_bmrfo = bmrfo.BMRFO(hyperparams=hyperparams) + new_bmrfo = bmrfo.BMRFO(params=params) assert new_bmrfo.S == 0 or new_bmrfo.S == 1 -def test_bmrfo_hyperparams_setter(): +def test_bmrfo_params_setter(): new_bmrfo = bmrfo.BMRFO() try: @@ -31,20 +28,28 @@ def test_bmrfo_hyperparams_setter(): assert new_bmrfo.S == 0 or new_bmrfo.S == 1 -def test_bmrfo_build(): +def test_bmrfo_cyclone_foraging(): new_bmrfo = bmrfo.BMRFO() - assert new_bmrfo.built == True + boolean_space = boolean.BooleanSpace(n_agents=100, n_variables=2) + cyclone = new_bmrfo._cyclone_foraging( + boolean_space.agents, boolean_space.best_agent.position, 0, 1, 100) -def test_bmrfo_cyclone_foraging(): - new_bmrfo = bmrfo.BMRFO() + assert cyclone[0] == False or cyclone[0] == True + + cyclone = new_bmrfo._cyclone_foraging( + boolean_space.agents, boolean_space.best_agent.position, 1, 1, 100) - boolean_space = boolean.BooleanSpace( - n_agents=5, n_iterations=20, n_variables=2) + assert cyclone[0] == False or cyclone[0] == True + + cyclone = new_bmrfo._cyclone_foraging( + boolean_space.agents, boolean_space.best_agent.position, 0, 1, 1) + + assert cyclone[0] == False or cyclone[0] == True cyclone = new_bmrfo._cyclone_foraging( - boolean_space.agents, boolean_space.best_agent.position, 1, 1, 20) + boolean_space.agents, boolean_space.best_agent.position, 1, 1, 1) assert cyclone[0] == False or cyclone[0] == True @@ -52,11 +57,10 @@ def test_bmrfo_cyclone_foraging(): def test_bmrfo_chain_foraging(): new_bmrfo = bmrfo.BMRFO() - boolean_space = boolean.BooleanSpace( - n_agents=5, n_iterations=20, n_variables=2) + boolean_space = boolean.BooleanSpace(n_agents=100, n_variables=2) chain = new_bmrfo._chain_foraging( - boolean_space.agents, boolean_space.best_agent.position, 1) + boolean_space.agents, boolean_space.best_agent.position, 0) assert chain[0] == False or chain[0] == True @@ -64,8 +68,7 @@ def test_bmrfo_chain_foraging(): def test_bmrfo_somersault_foraging(): new_bmrfo = bmrfo.BMRFO() - boolean_space = boolean.BooleanSpace( - n_agents=5, n_iterations=20, n_variables=2) + boolean_space = boolean.BooleanSpace(n_agents=100, n_variables=2) somersault = new_bmrfo._somersault_foraging( boolean_space.agents[0].position, boolean_space.best_agent.position) @@ -73,22 +76,12 @@ def test_bmrfo_somersault_foraging(): assert somersault[0] == False or somersault[0] == True -def test_bmrfo_run(): - def hook(optimizer, space, function): - return - +def test_bmrfo_update(): new_function = function.Function(pointer=Knapsack( values=(55, 10, 47, 5, 4), weights=(95, 4, 60, 32, 23), max_capacity=100)) new_bmrfo = bmrfo.BMRFO() - boolean_space = boolean.BooleanSpace( - n_agents=10, n_iterations=20, n_variables=5) - - history = new_bmrfo.run(boolean_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + boolean_space = boolean.BooleanSpace(n_agents=100, n_variables=5) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm bmrfo failed to converge.' + new_bmrfo.update(boolean_space, new_function, 1, 20) diff --git a/tests/opytimizer/optimizers/boolean/test_bpso.py b/tests/opytimizer/optimizers/boolean/test_bpso.py index 1b0ca934..240d4d0d 100644 --- a/tests/opytimizer/optimizers/boolean/test_bpso.py +++ b/tests/opytimizer/optimizers/boolean/test_bpso.py @@ -1,31 +1,27 @@ import sys import numpy as np -from opytimark.markers.boolean import Knapsack import opytimizer.math.random as r from opytimizer.core import function from opytimizer.optimizers.boolean import bpso from opytimizer.spaces import boolean -from opytimizer.utils import constants -np.random.seed(0) - -def test_bpso_hyperparams(): - hyperparams = { +def test_bpso_params(): + params = { 'c1': r.generate_binary_random_number(size=(1, 1)), 'c2': r.generate_binary_random_number(size=(1, 1)) } - new_bpso = bpso.BPSO(hyperparams=hyperparams) + new_bpso = bpso.BPSO(params=params) assert new_bpso.c1 == 0 or new_bpso.c1 == 1 assert new_bpso.c2 == 0 or new_bpso.c2 == 1 -def test_bpso_hyperparams_setter(): +def test_bpso_params_setter(): new_bpso = bpso.BPSO() try: @@ -43,27 +39,25 @@ def test_bpso_hyperparams_setter(): assert new_bpso.c2 == 0 or new_bpso.c2 == 1 -def test_bpso_build(): - new_bpso = bpso.BPSO() - - assert new_bpso.built == True - - -def test_bpso_update_velocity(): +def test_bpso_create_additional_attrs(): + boolean_space = boolean.BooleanSpace(n_agents=2, n_variables=5) + new_bpso = bpso.BPSO() + new_bpso.create_additional_attrs(boolean_space) - velocity = new_bpso._update_velocity( - np.array([1]), np.array([1]), np.array([1])) - - assert velocity == 0 or velocity == 1 + try: + new_bpso.local_position = 1 + except: + new_bpso.local_position = np.array([1]) + assert new_bpso.local_position == 1 -def test_bpso_update_position(): - new_bpso = bpso.BPSO() - - position = new_bpso._update_position(1, 1) + try: + new_bpso.velocity = 1 + except: + new_bpso.velocity = np.array([1]) - assert position == 0 or position == 1 + assert new_bpso.velocity == 1 def test_bpso_evaluate(): @@ -72,35 +66,20 @@ def square(x): new_function = function.Function(pointer=square) - boolean_space = boolean.BooleanSpace( - n_agents=2, n_iterations=10, n_variables=2) + boolean_space = boolean.BooleanSpace(n_agents=2, n_variables=5) new_bpso = bpso.BPSO() + new_bpso.create_additional_attrs(boolean_space) - local_position = np.zeros((2, 2, 1)) - - new_bpso._evaluate(boolean_space, new_function, local_position) + new_bpso.evaluate(boolean_space, new_function) assert boolean_space.best_agent.fit < sys.float_info.max -def test_bpso_run(): - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=Knapsack( - values=(55, 10, 47, 5, 4), weights=(95, 4, 60, 32, 23), max_capacity=100)) +def test_bpso_update(): + boolean_space = boolean.BooleanSpace(n_agents=2, n_variables=5) new_bpso = bpso.BPSO() + new_bpso.create_additional_attrs(boolean_space) - boolean_space = boolean.BooleanSpace( - n_agents=2, n_iterations=10, n_variables=5) - - history = new_bpso.run(boolean_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - assert len(history.local) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm bpso failed to converge.' + new_bpso.update(boolean_space) diff --git a/tests/opytimizer/optimizers/boolean/test_umda.py b/tests/opytimizer/optimizers/boolean/test_umda.py index 7ef69752..31661b8d 100644 --- a/tests/opytimizer/optimizers/boolean/test_umda.py +++ b/tests/opytimizer/optimizers/boolean/test_umda.py @@ -1,22 +1,17 @@ import numpy as np -from opytimark.markers.boolean import Knapsack -from opytimizer.core import function from opytimizer.optimizers.boolean import umda from opytimizer.spaces import boolean -from opytimizer.utils import constants -np.random.seed(0) - -def test_umda_hyperparams(): - hyperparams = { +def test_umda_params(): + params = { 'p_selection': 0.75, 'lower_bound': 0.05, 'upper_bound': 0.95 } - new_umda = umda.UMDA(hyperparams=hyperparams) + new_umda = umda.UMDA(params=params) assert new_umda.p_selection == 0.75 @@ -25,7 +20,7 @@ def test_umda_hyperparams(): assert new_umda.upper_bound == 0.95 -def test_umda_hyperparams_setter(): +def test_umda_params_setter(): new_umda = umda.UMDA() try: @@ -78,17 +73,10 @@ def test_umda_hyperparams_setter(): assert new_umda.upper_bound == 0.95 -def test_umda_build(): - new_umda = umda.UMDA() - - assert new_umda.built == True - - def test_umda_calculate_probability(): new_umda = umda.UMDA() - boolean_space = boolean.BooleanSpace( - n_agents=5, n_iterations=20, n_variables=2) + boolean_space = boolean.BooleanSpace(n_agents=5, n_variables=2) probs = new_umda._calculate_probability(boolean_space.agents) @@ -105,22 +93,9 @@ def test_umda_sample_position(): assert position == 1 -def test_umda_run(): - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=Knapsack( - values=(55, 10, 47, 5, 4), weights=(95, 4, 60, 32, 23), max_capacity=100)) - +def test_umda_update(): new_umda = umda.UMDA() - boolean_space = boolean.BooleanSpace( - n_agents=2, n_iterations=10, n_variables=5) - - history = new_umda.run(boolean_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + boolean_space = boolean.BooleanSpace(n_agents=2, n_variables=5) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm umda failed to converge.' + new_umda.update(boolean_space) diff --git a/tests/opytimizer/optimizers/evolutionary/test_bsa.py b/tests/opytimizer/optimizers/evolutionary/test_bsa.py index 4c6ed347..4f56bc00 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_bsa.py +++ b/tests/opytimizer/optimizers/evolutionary/test_bsa.py @@ -1,27 +1,25 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.evolutionary import bsa from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_bsa_hyperparams(): - hyperparams = { +def test_bsa_params(): + params = { 'F': 3.0, 'mix_rate': 1 } - new_bsa = bsa.BSA(hyperparams=hyperparams) + new_bsa = bsa.BSA(params=params) assert new_bsa.F == 3.0 assert new_bsa.mix_rate == 1 -def test_bsa_hyperparams_setter(): +def test_bsa_params_setter(): new_bsa = bsa.BSA() try: @@ -42,31 +40,62 @@ def test_bsa_hyperparams_setter(): assert new_bsa.mix_rate == 1 -def test_bsa_build(): +def test_bsa_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_bsa = bsa.BSA() + new_bsa.create_additional_attrs(search_space) - assert new_bsa.built == True + try: + new_bsa.old_agents = 1 + except: + new_bsa.old_agents = [] + assert new_bsa.old_agents == [] + + +def test_bsa_permute(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_bsa = bsa.BSA() + new_bsa.create_additional_attrs(search_space) + + new_bsa._permute(search_space.agents) -def test_bsa_run(): - def square(x): - return np.sum(x**2) - def hook(optimizer, space, function): - return +def test_bsa_mutate(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) + new_bsa = bsa.BSA() + new_bsa.create_additional_attrs(search_space) + + trial_agents = new_bsa._mutate(search_space.agents) + + assert len(trial_agents) == 10 + + +def test_bsa_crossover(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_bsa = bsa.BSA() + new_bsa.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + trial_agents = new_bsa._mutate(search_space.agents) + new_bsa._crossover(search_space.agents, trial_agents) - history = new_bsa.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_bsa_run(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=75, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_bsa = bsa.BSA() + new_bsa.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm bsa failed to converge.' + new_bsa.update(search_space, square) diff --git a/tests/opytimizer/optimizers/evolutionary/test_de.py b/tests/opytimizer/optimizers/evolutionary/test_de.py index abe90dcd..306b76b8 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_de.py +++ b/tests/opytimizer/optimizers/evolutionary/test_de.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.evolutionary import de from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_de_hyperparams(): - hyperparams = { +def test_de_params(): + params = { 'CR': 0.9, 'F': 0.7 } - new_de = de.DE(hyperparams=hyperparams) + new_de = de.DE(params=params) assert new_de.CR == 0.9 assert new_de.F == 0.7 -def test_de_hyperparams_setter(): +def test_de_params_setter(): new_de = de.DE() try: @@ -49,18 +45,11 @@ def test_de_hyperparams_setter(): assert new_de.F == 0.5 -def test_de_build(): - new_de = de.DE() - - assert new_de.built == True - - def test_de_mutate_agent(): new_de = de.DE() - search_space = search.SearchSpace(n_agents=4, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=4, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) agent = new_de._mutate_agent( search_space.agents[0], search_space.agents[1], search_space.agents[2], search_space.agents[3]) @@ -72,26 +61,9 @@ def test_de_run(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'CR': 0.9, - 'F': 0.7 - } - - new_de = de.DE(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_de.run(search_space, new_function, pre_evaluate=hook) + new_de = de.DE() - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm de failed to converge.' + new_de.update(search_space, square) diff --git a/tests/opytimizer/optimizers/evolutionary/test_ep.py b/tests/opytimizer/optimizers/evolutionary/test_ep.py index f13c40b7..1af229ea 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_ep.py +++ b/tests/opytimizer/optimizers/evolutionary/test_ep.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.evolutionary import ep from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_ep_hyperparams(): - hyperparams = { +def test_ep_params(): + params = { 'bout_size': 0.1, 'clip_ratio': 0.05 } - new_ep = ep.EP(hyperparams=hyperparams) + new_ep = ep.EP(params=params) assert new_ep.bout_size == 0.1 assert new_ep.clip_ratio == 0.05 -def test_ep_hyperparams_setter(): +def test_ep_params_setter(): new_ep = ep.EP() try: @@ -49,63 +45,56 @@ def test_ep_hyperparams_setter(): assert new_ep.clip_ratio == 0.5 -def test_ep_build(): +def test_ep_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_ep = ep.EP() + new_ep.create_additional_attrs(search_space) + + try: + new_ep.strategy = 1 + except: + new_ep.strategy = np.array([1]) - assert new_ep.built == True + assert new_ep.strategy == np.array([1]) def test_ep_mutate_parent(): def square(x): return np.sum(x**2) - new_ep = ep.EP() - - search_space = search.SearchSpace(n_agents=4, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - strategy = np.zeros(4) + new_ep = ep.EP() + new_ep.create_additional_attrs(search_space) - agent = new_ep._mutate_parent(search_space.agents[0], square, strategy[0]) + agent = new_ep._mutate_parent(search_space.agents[0], 0, square) assert agent.position[0][0] > 0 def test_ep_update_strategy(): - new_ep = ep.EP() + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - strategy = np.zeros((4, 1)) + new_ep = ep.EP() + new_ep.create_additional_attrs(search_space) - new_strategy = new_ep._update_strategy(strategy, [1], [2]) + new_ep._update_strategy(0, [1], [2]) - assert new_strategy[0][0] > 0 + assert new_ep.strategy[0][0] > 0 -def test_ep_run(): +def test_ep_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) - - hyperparams = { - 'bout_size': 0.1, - 'clip_ratio': 0.05 - } - - new_ep = ep.EP(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_ep.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_ep = ep.EP() + new_ep.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm de failed to converge.' + new_ep.update(search_space, square) diff --git a/tests/opytimizer/optimizers/evolutionary/test_es.py b/tests/opytimizer/optimizers/evolutionary/test_es.py index 85e0861d..e500492b 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_es.py +++ b/tests/opytimizer/optimizers/evolutionary/test_es.py @@ -1,24 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.evolutionary import es from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_es_hyperparams(): - hyperparams = { +def test_es_params(): + params = { 'child_ratio': 0.5 } - new_es = es.ES(hyperparams=hyperparams) + new_es = es.ES(params=params) assert new_es.child_ratio == 0.5 -def test_es_hyperparams_setter(): +def test_es_params_setter(): new_es = es.ES() try: @@ -34,62 +30,70 @@ def test_es_hyperparams_setter(): assert new_es.child_ratio == 0.5 -def test_es_build(): +def test_es_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_es = es.ES() + new_es.create_additional_attrs(search_space) + + try: + new_es.n_children = 'a' + except: + new_es.n_children = 0 + + assert new_es.n_children == 0 + + try: + new_es.n_children = -1 + except: + new_es.n_children = 0 + + assert new_es.n_children == 0 + + try: + new_es.strategy = 1 + except: + new_es.strategy = np.array([1]) - assert new_es.built == True + assert new_es.strategy == np.array([1]) def test_es_mutate_parent(): def square(x): return np.sum(x**2) - new_es = es.ES() - - search_space = search.SearchSpace(n_agents=4, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - strategy = np.zeros(4) + new_es = es.ES() + new_es.create_additional_attrs(search_space) - agent = new_es._mutate_parent(search_space.agents[0], square, strategy[0]) + agent = new_es._mutate_parent(search_space.agents[0], 0, square) assert agent.position[0][0] > 0 def test_es_update_strategy(): - new_es = es.ES() + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - strategy = np.ones((4, 1)) + new_es = es.ES() + new_es.create_additional_attrs(search_space) - new_strategy = new_es._update_strategy(strategy) + new_es._update_strategy(0) - assert new_strategy[0][0] > 0 + assert new_es.strategy[0][0] > 0 -def test_es_run(): +def test_es_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - hyperparams = { - 'child_ratio': 0.5 - } - - new_es = es.ES(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_es.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_es = es.ES() + new_es.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm de failed to converge.' + new_es.update(search_space, square) diff --git a/tests/opytimizer/optimizers/evolutionary/test_ga.py b/tests/opytimizer/optimizers/evolutionary/test_ga.py index 582c51c2..04ced993 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_ga.py +++ b/tests/opytimizer/optimizers/evolutionary/test_ga.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.evolutionary import ga from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_ga_hyperparams(): - hyperparams = { +def test_ga_params(): + params = { 'p_selection': 0.75, 'p_mutation': 0.25, 'p_crossover': 0.5, } - new_ga = ga.GA(hyperparams=hyperparams) + new_ga = ga.GA(params=params) assert new_ga.p_selection == 0.75 @@ -24,7 +20,7 @@ def test_ga_hyperparams(): assert new_ga.p_crossover == 0.5 -def test_ga_hyperparams_setter(): +def test_ga_params_setter(): new_ga = ga.GA() try: @@ -64,50 +60,49 @@ def test_ga_hyperparams_setter(): assert new_ga.p_crossover == 0.5 -def test_ga_build(): +def test_ga_roulette_selection(): new_ga = ga.GA() - assert new_ga.built == True + fitness = [10, 20, 30, 40, 50] + idx = new_ga._roulette_selection(len(fitness), fitness) -def test_ga_update(): - def square(x): - return np.sum(x**2) + assert len(idx) == 4 - new_function = function.Function(pointer=square) - new_ga = ga.GA() +def test_ga_crossover(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=10, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) + new_ga = ga.GA() - new_ga._evaluate(search_space, new_function) + alpha, beta = new_ga._crossover( + search_space.agents[0], search_space.agents[1]) - new_ga._update(search_space.agents, new_function) + assert type(alpha).__name__ == 'Agent' + assert type(beta).__name__ == 'Agent' - assert search_space.agents[0].position[0] != 0 +def test_ga_mutation(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) -def test_ga_run(): - def square(x): - return np.sum(x**2) + new_ga = ga.GA() - def hook(optimizer, space, function): - return + alpha, beta = new_ga._mutation( + search_space.agents[0], search_space.agents[1]) - new_function = function.Function(pointer=square) + assert type(alpha).__name__ == 'Agent' + assert type(beta).__name__ == 'Agent' - new_ga = ga.GA() - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) +def test_ga_update(): + def square(x): + return np.sum(x**2) - history = new_ga.run(search_space, new_function, pre_evaluate=hook) + new_ga = ga.GA() - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ga failed to converge.' + new_ga.update(search_space, square) diff --git a/tests/opytimizer/optimizers/evolutionary/test_gp.py b/tests/opytimizer/optimizers/evolutionary/test_gp.py index 48b88636..a1157b8d 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_gp.py +++ b/tests/opytimizer/optimizers/evolutionary/test_gp.py @@ -5,20 +5,17 @@ from opytimizer.core import function from opytimizer.optimizers.evolutionary import gp from opytimizer.spaces import tree -from opytimizer.utils import constants -np.random.seed(0) - -def test_gp_hyperparams(): - hyperparams = { +def test_gp_params(): + params = { 'p_reproduction': 1.0, 'p_mutation': 0.5, 'p_crossover': 0.5, 'prunning_ratio': 0.5 } - new_gp = gp.GP(hyperparams=hyperparams) + new_gp = gp.GP(params=params) assert new_gp.p_reproduction == 1.0 @@ -29,7 +26,7 @@ def test_gp_hyperparams(): assert new_gp.prunning_ratio == 0.5 -def test_gp_hyperparams_setter(): +def test_gp_params_setter(): new_gp = gp.GP() try: @@ -81,12 +78,6 @@ def test_gp_hyperparams_setter(): assert new_gp.prunning_ratio == 0.25 -def test_gp_build(): - new_gp = gp.GP() - - assert new_gp.built == True - - def test_gp_prune_nodes(): new_gp = gp.GP() @@ -104,22 +95,52 @@ def test_gp_prune_nodes(): def test_gp_reproduction(): new_gp = gp.GP() + tree_space = tree.TreeSpace(n_agents=10, n_terminals=2, n_variables=1, + min_depth=1, max_depth=5, + functions=['SUM'], lower_bound=[0], upper_bound=[10]) + + new_gp._reproduction(tree_space) + def test_gp_mutation(): new_gp = gp.GP() + tree_space = tree.TreeSpace(n_agents=10, n_terminals=2, n_variables=1, + min_depth=1, max_depth=5, + functions=['SUM'], lower_bound=[0], upper_bound=[10]) + + new_gp._mutation(tree_space) + def test_gp_mutate(): new_gp = gp.GP() + tree_space = tree.TreeSpace(n_agents=10, n_terminals=2, n_variables=1, + min_depth=1, max_depth=5, + functions=['SUM'], lower_bound=[0], upper_bound=[10]) + + new_gp._mutate(tree_space, tree_space.trees[0], 1) + def test_gp_crossover(): new_gp = gp.GP() + tree_space = tree.TreeSpace(n_agents=10, n_terminals=2, n_variables=1, + min_depth=1, max_depth=5, + functions=['SUM'], lower_bound=[0], upper_bound=[10]) + + new_gp._crossover(tree_space) + def test_gp_cross(): new_gp = gp.GP() + tree_space = tree.TreeSpace(n_agents=10, n_terminals=2, n_variables=1, + min_depth=1, max_depth=5, + functions=['SUM'], lower_bound=[0], upper_bound=[10]) + + new_gp._cross(tree_space.trees[0], tree_space.trees[1], 1, 1) + def test_gp_evaluate(): def square(x): @@ -127,51 +148,22 @@ def square(x): new_function = function.Function(pointer=square) - tree_space = tree.TreeSpace(n_trees=1000, n_terminals=2, n_variables=1, - n_iterations=10, min_depth=1, max_depth=5, + tree_space = tree.TreeSpace(n_agents=1000, n_terminals=2, n_variables=1, + min_depth=1, max_depth=5, functions=['SUM'], lower_bound=[0], upper_bound=[10]) new_gp = gp.GP() - new_gp._evaluate(tree_space, new_function) - - for t in tree_space.trees: - print(t) + new_gp.evaluate(tree_space, new_function) assert tree_space.best_agent.fit < sys.float_info.max -def test_gp_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - +def test_gp_update(): new_gp = gp.GP() - tree_space = tree.TreeSpace(n_trees=10, n_terminals=2, n_variables=1, - n_iterations=500, min_depth=1, max_depth=2, + tree_space = tree.TreeSpace(n_agents=10, n_terminals=2, n_variables=1, + min_depth=1, max_depth=2, functions=['SUM', 'SUB', 'MUL', 'DIV'], lower_bound=[0], upper_bound=[10]) - history = new_gp.run(tree_space, new_function, pre_evaluate=hook) - - print(tree_space.best_tree) - print(tree_space.best_tree.post_order) - - tree_space = tree.TreeSpace(n_trees=10, n_terminals=2, n_variables=1, - n_iterations=500, min_depth=2, max_depth=3, - functions=['EXP', 'LOG', 'SQRT', 'ABS', 'COS', 'SIN'], lower_bound=[0], upper_bound=[10]) - - history = new_gp.run(tree_space, new_function, pre_evaluate=hook) - - print(tree_space.best_tree) - print(tree_space.best_tree.post_order) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm gp failed to converge.' + new_gp.update(tree_space) diff --git a/tests/opytimizer/optimizers/evolutionary/test_hs.py b/tests/opytimizer/optimizers/evolutionary/test_hs.py index e108e27f..2b7c8311 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_hs.py +++ b/tests/opytimizer/optimizers/evolutionary/test_hs.py @@ -3,19 +3,19 @@ from opytimizer.core import function from opytimizer.optimizers.evolutionary import hs from opytimizer.spaces import search -from opytimizer.utils import constants +from opytimizer.utils import constant np.random.seed(0) -def test_hs_hyperparams(): - hyperparams = { +def test_hs_params(): + params = { 'HMCR': 0.7, 'PAR': 0.7, 'bw': 10.0 } - new_hs = hs.HS(hyperparams=hyperparams) + new_hs = hs.HS(params=params) assert new_hs.HMCR == 0.7 @@ -24,7 +24,7 @@ def test_hs_hyperparams(): assert new_hs.bw == 10.0 -def test_hs_hyperparams_setter(): +def test_hs_params_setter(): new_hs = hs.HS() try: @@ -66,63 +66,38 @@ def test_hs_hyperparams_setter(): assert new_hs.bw == 5 -def test_hs_build(): - new_hs = hs.HS() - - assert new_hs.built == True - - def test_hs_generate_new_harmony(): new_hs = hs.HS() - search_space = search.SearchSpace(n_agents=2, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) agent = new_hs._generate_new_harmony(search_space.agents) assert agent.fit > 0 -def test_hs_run(): +def test_hs_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'HMCR': 0.7, - 'PAR': 0.7, - 'bw': 10.0 - } - - new_hs = hs.HS(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_hs.run(search_space, new_function, pre_evaluate=hook) + new_hs = hs.HS() - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm hs failed to converge.' + new_hs.update(search_space, square) -def test_ihs_hyperparams(): - hyperparams = { +def test_ihs_params(): + params = { 'PAR_min': 0.5, 'PAR_max': 1, 'bw_min': 2, 'bw_max': 5 } - new_ihs = hs.IHS(hyperparams=hyperparams) + new_ihs = hs.IHS(params=params) assert new_ihs.PAR_min == 0.5 @@ -133,7 +108,7 @@ def test_ihs_hyperparams(): assert new_ihs.bw_max == 5 -def test_ihs_hyperparams_setter(): +def test_ihs_params_setter(): new_ihs = hs.IHS() try: @@ -195,42 +170,23 @@ def test_ihs_hyperparams_setter(): assert new_ihs.bw_max == 10.0 -def test_ihs_rebuild(): - new_ihs = hs.IHS() - - assert new_ihs.built == True - - -def test_ihs_run(): +def test_ihs_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - new_ihs = hs.IHS() - search_space = search.SearchSpace(n_agents=20, n_iterations=50, - n_variables=2, lower_bound=[0, 0], - upper_bound=[5, 5]) - - history = new_ihs.run(search_space, new_function, pre_evaluate=hook) + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[5, 5]) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ihs failed to converge.' + new_ihs.update(search_space, square, 1, 10) def test_ghs_generate_new_harmony(): new_ghs = hs.GHS() - search_space = search.SearchSpace(n_agents=2, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) agent = new_ghs._generate_new_harmony(search_space.agents) @@ -243,8 +199,8 @@ def test_ghs_generate_new_harmony(): assert agent.fit > 0 -def test_sghs_hyperparams(): - hyperparams = { +def test_sghs_params(): + params = { 'LP': 100, 'HMCRm': 0.98, 'PARm': 0.9, @@ -252,7 +208,7 @@ def test_sghs_hyperparams(): 'bw_max': 10 } - new_sghs = hs.SGHS(hyperparams=hyperparams) + new_sghs = hs.SGHS(params=params) assert new_sghs.LP == 100 @@ -265,7 +221,7 @@ def test_sghs_hyperparams(): assert new_sghs.bw_max == 10 -def test_sghs_hyperparams_setter(): +def test_sghs_params_setter(): new_sghs = hs.SGHS() try: @@ -348,18 +304,48 @@ def test_sghs_hyperparams_setter(): assert new_sghs.bw_max == 10.0 -def test_sghs_rebuild(): +def test_sghs_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_sghs = hs.SGHS() + new_sghs.create_additional_attrs(search_space) - assert new_sghs.built == True + try: + new_sghs.lp = 'a' + except: + new_sghs.lp = 1 + + assert new_sghs.lp == 1 + + try: + new_sghs.lp = -1 + except: + new_sghs.lp = 1 + + assert new_sghs.lp == 1 + + try: + new_sghs.HMCR_history = 'a' + except: + new_sghs.HMCR_history = [] + + assert new_sghs.HMCR_history == [] + + try: + new_sghs.PAR_history = 'a' + except: + new_sghs.PAR_history = [] + + assert new_sghs.PAR_history == [] def test_sghs_generate_new_harmony(): - new_sghs = hs.SGHS() + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=2, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_sghs = hs.SGHS() + new_sghs.create_additional_attrs(search_space) agent = new_sghs._generate_new_harmony(search_space.agents) @@ -376,37 +362,32 @@ def test_sghs_run(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_sghs = hs.SGHS(hyperparams={'LP': 10}) - - search_space = search.SearchSpace(n_agents=20, n_iterations=50, - n_variables=2, lower_bound=[0, 0], - upper_bound=[5, 5]) + new_sghs = hs.SGHS() + new_sghs.create_additional_attrs(search_space) - history = new_sghs.run(search_space, new_function, pre_evaluate=hook) + new_sghs.update(search_space, square, 1, 10) + new_sghs.update(search_space, square, 6, 10) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_sghs.lp = 1 + new_sghs.LP = 1 - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ihs failed to converge.' + new_sghs.update(search_space, square, 1, 10) -def test_nghs_hyperparams(): - hyperparams = { +def test_nghs_params(): + params = { 'pm': 0.1 } - new_nghs = hs.NGHS(hyperparams=hyperparams) + new_nghs = hs.NGHS(params=params) assert new_nghs.pm == 0.1 -def test_nghs_hyperparams_setter(): +def test_nghs_params_setter(): new_nghs = hs.NGHS() try: @@ -424,16 +405,11 @@ def test_nghs_hyperparams_setter(): assert new_nghs.pm == 0.1 -def test_nghs_rebuild(): - new_nghs = hs.NGHS() - - assert new_nghs.built == True - - def test_nghs_generate_new_harmony(): new_nghs = hs.NGHS() + new_nghs.pm = 1 - search_space = search.SearchSpace(n_agents=2, n_iterations=100, + search_space = search.SearchSpace(n_agents=2, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]) @@ -447,15 +423,13 @@ def test_nghs_update(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) - new_nghs = hs.NGHS() - search_space = search.SearchSpace(n_agents=2, n_iterations=100, + search_space = search.SearchSpace(n_agents=2, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]) - new_nghs._update(search_space.agents, new_function) + new_nghs.update(search_space, square) assert search_space.agents[0].fit > 0 @@ -463,11 +437,11 @@ def square(x): def test_goghs_generate_opposition_harmony(): new_goghs = hs.GOGHS() - search_space = search.SearchSpace(n_agents=2, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - agent = new_goghs._generate_opposition_harmony(search_space.agents[0], search_space.agents) + agent = new_goghs._generate_opposition_harmony( + search_space.agents[0], search_space.agents) assert agent.fit > 0 @@ -476,14 +450,11 @@ def test_goghs_update(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) - new_goghs = hs.GOGHS() - search_space = search.SearchSpace(n_agents=2, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_goghs._update(search_space.agents, new_function) + new_goghs.update(search_space, square) assert search_space.agents[0].fit > 0 diff --git a/tests/opytimizer/optimizers/evolutionary/test_iwo.py b/tests/opytimizer/optimizers/evolutionary/test_iwo.py index d59adce3..333a2d90 100644 --- a/tests/opytimizer/optimizers/evolutionary/test_iwo.py +++ b/tests/opytimizer/optimizers/evolutionary/test_iwo.py @@ -1,15 +1,13 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.evolutionary import iwo from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_iwo_hyperparams(): - hyperparams = { +def test_iwo_params(): + params = { 'min_seeds': 0, 'max_seeds': 5, 'e': 2, @@ -17,7 +15,7 @@ def test_iwo_hyperparams(): 'init_sigma': 3 } - new_iwo = iwo.IWO(hyperparams=hyperparams) + new_iwo = iwo.IWO(params=params) assert new_iwo.min_seeds == 0 @@ -30,7 +28,7 @@ def test_iwo_hyperparams(): assert new_iwo.init_sigma == 3 -def test_iwo_hyperparams_setter(): +def test_iwo_params_setter(): new_iwo = iwo.IWO() try: @@ -98,51 +96,45 @@ def test_iwo_hyperparams_setter(): assert new_iwo.init_sigma == 2.0 + try: + new_iwo.sigma = 'f' + except: + new_iwo.sigma = 1 + + assert new_iwo.sigma == 1 + -def test_iwo_build(): +def test_iwo_spatial_dispersal(): new_iwo = iwo.IWO() - assert new_iwo.built == True + new_iwo._spatial_dispersal(1, 10) + assert new_iwo.sigma == 2.43019 -def test_iwo_update(): + +def test_iwo_produce_offspring(): def square(x): return np.sum(x**2) - assert square(2) == 4 - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_iwo = iwo.IWO() - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) + agent = new_iwo._produce_offspring(search_space.agents[0], square) - new_iwo._update(search_space.agents, search_space.n_agents, new_function) + assert type(agent).__name__ == 'Agent' - assert search_space.agents[0].position[0] != 0 - -def test_iwo_run(): +def test_iwo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - new_iwo = iwo.IWO() + new_iwo.min_seeds = 5 + new_iwo.max_seeds = 20 - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_iwo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm iwo failed to converge.' + new_iwo.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/misc/test_aoa.py b/tests/opytimizer/optimizers/misc/test_aoa.py index 70ad60db..66195ddc 100644 --- a/tests/opytimizer/optimizers/misc/test_aoa.py +++ b/tests/opytimizer/optimizers/misc/test_aoa.py @@ -1,22 +1,16 @@ -import numpy as np - -from opytimizer.core import function from opytimizer.optimizers.misc import aoa from opytimizer.spaces import search -from opytimizer.utils import constants - -np.random.seed(0) -def test_aoa_hyperparams(): - hyperparams = { +def test_aoa_params(): + params = { 'a_min': 0.2, 'a_max': 1.0, 'alpha': 5.0, 'mu': 0.499 } - new_aoa = aoa.AOA(hyperparams=hyperparams) + new_aoa = aoa.AOA(params=params) assert new_aoa.a_min == 0.2 @@ -27,7 +21,7 @@ def test_aoa_hyperparams(): assert new_aoa.mu == 0.499 -def test_aoa_hyperparams_setter(): +def test_aoa_params_setter(): new_aoa = aoa.AOA() try: @@ -84,38 +78,10 @@ def test_aoa_hyperparams_setter(): assert new_aoa.mu == 0.499 -def test_aoa_build(): +def test_aoa_update(): new_aoa = aoa.AOA() - assert new_aoa.built == True - - -def test_aoa_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'a_min': 0.2, - 'a_max': 1.0, - 'alpha': 5, - 'mu': 0.499 - } - - new_aoa = aoa.AOA(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_aoa.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm aoa failed to converge.' + new_aoa.update(search_space, 1, 10) diff --git a/tests/opytimizer/optimizers/misc/test_cem.py b/tests/opytimizer/optimizers/misc/test_cem.py index 30a0dc9f..76a3e522 100644 --- a/tests/opytimizer/optimizers/misc/test_cem.py +++ b/tests/opytimizer/optimizers/misc/test_cem.py @@ -3,25 +3,22 @@ from opytimizer.core import function from opytimizer.optimizers.misc import cem from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_cem_hyperparams(): - hyperparams = { +def test_cem_params(): + params = { 'n_updates': 5, 'alpha': 0.7, } - new_cem = cem.CEM(hyperparams=hyperparams) + new_cem = cem.CEM(params=params) assert new_cem.n_updates == 5 assert new_cem.alpha == 0.7 -def test_cem_hyperparams_setter(): +def test_cem_params_setter(): new_cem = cem.CEM() try: @@ -49,69 +46,75 @@ def test_cem_hyperparams_setter(): assert new_cem.alpha == 0.5 -def test_cem_build(): +def test_cem_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_cem = cem.CEM() + new_cem.create_additional_attrs(search_space) - assert new_cem.built == True + try: + new_cem.mean = 1 + except: + new_cem.mean = np.array([1]) + + assert new_cem.mean == 1 + + try: + new_cem.std = 1 + except: + new_cem.std = np.array([1]) + + assert new_cem.std == 1 def test_cem_create_new_samples(): def square(x): return np.sum(x**2) - new_cem = cem.CEM() - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_cem._create_new_samples( - search_space.agents, square, np.array([1, 1]), np.array([1, 1])) + new_cem = cem.CEM() + new_cem.create_additional_attrs(search_space) + new_cem._create_new_samples(search_space.agents, square) def test_cem_update_mean(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_cem = cem.CEM() + new_cem.create_additional_attrs(search_space) - mean = new_cem._update_mean(np.array([1, 1]), 1) + new_cem._update_mean(np.array([1, 1])) - assert mean != 0 + assert new_cem.mean[0] != 0 def test_cem_update_std(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_cem = cem.CEM() + new_cem.create_additional_attrs(search_space) - std = new_cem._update_std(np.array([1, 1]), 1, 0.25) + new_cem._update_std(np.array([1, 1])) - assert std != 0 + assert new_cem.std[0] != 0 -def test_cem_run(): +def test_cem_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) - hyperparams = { - 'n_updates': 5, - 'alpha': 0.7 - } + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_cem = cem.CEM(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_cem.run(search_space, new_function, pre_evaluate=hook) - - history = new_cem.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_cem = cem.CEM() + new_cem.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm cem failed to converge.' + new_cem.update(search_space, new_function) diff --git a/tests/opytimizer/optimizers/misc/test_doa.py b/tests/opytimizer/optimizers/misc/test_doa.py index 06568144..7118bd97 100644 --- a/tests/opytimizer/optimizers/misc/test_doa.py +++ b/tests/opytimizer/optimizers/misc/test_doa.py @@ -1,24 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.misc import doa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_doa_hyperparams(): - hyperparams = { +def test_doa_params(): + params = { 'r': 1.0 } - new_doa = doa.DOA(hyperparams=hyperparams) + new_doa = doa.DOA(params=params) assert new_doa.r == 1.0 - -def test_doa_hyperparams_setter(): + +def test_doa_params_setter(): new_doa = doa.DOA() try: @@ -34,31 +30,34 @@ def test_doa_hyperparams_setter(): assert new_doa.r == 1.0 -def test_doa_build(): +def test_doa_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_doa = doa.DOA() + new_doa.create_additional_attrs(search_space) - assert new_doa.built == True + try: + new_doa.chaotic_map = 1 + except: + new_doa.chaotic_map = np.array([1]) + assert new_doa.chaotic_map == 1 -def test_doa_run(): - def square(x): - return np.sum(x**2) - def hook(optimizer, space, function): - return +def test_doa_calculate_chaotic_map(): + new_doa = doa.DOA() - new_function = function.Function(pointer=square) + c_map = new_doa._calculate_chaotic_map(0, 1) - new_doa = doa.DOA() + assert c_map.shape == (1,) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - history = new_doa.run(search_space, new_function, pre_evaluate=hook) +def test_doa_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_doa = doa.DOA() + new_doa.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm doa failed to converge.' + new_doa.update(search_space) diff --git a/tests/opytimizer/optimizers/misc/test_gs.py b/tests/opytimizer/optimizers/misc/test_gs.py index 9eb76b72..aaa9d4b3 100644 --- a/tests/opytimizer/optimizers/misc/test_gs.py +++ b/tests/opytimizer/optimizers/misc/test_gs.py @@ -1,37 +1,7 @@ -import numpy as np - -from opytimizer.core import function from opytimizer.optimizers.misc import gs -from opytimizer.spaces import grid -from opytimizer.utils import constants - -np.random.seed(0) -def test_gs_build(): +def test_gs(): new_gs = gs.GS() assert new_gs.built == True - - -def test_gs_run(): - def square(x): - return np.sum(x) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_gs = gs.GS() - - grid_space = grid.GridSpace(n_variables=2, step=(0.1, 0.1), lower_bound=( - 0, 0), upper_bound=(5, 5)) - - history = new_gs.run(grid_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm gs failed to converge.' diff --git a/tests/opytimizer/optimizers/misc/test_hc.py b/tests/opytimizer/optimizers/misc/test_hc.py index fc1127bc..6e86dfce 100644 --- a/tests/opytimizer/optimizers/misc/test_hc.py +++ b/tests/opytimizer/optimizers/misc/test_hc.py @@ -3,25 +3,25 @@ from opytimizer.core import function from opytimizer.optimizers.misc import hc from opytimizer.spaces import search -from opytimizer.utils import constants +from opytimizer.utils import constant np.random.seed(0) -def test_hc_hyperparams(): - hyperparams = { +def test_hc_params(): + params = { 'r_mean': 0, 'r_var': 0.1, } - new_hc = hc.HC(hyperparams=hyperparams) + new_hc = hc.HC(params=params) assert new_hc.r_mean == 0 assert new_hc.r_var == 0.1 -def test_hc_hyperparams_setter(): +def test_hc_params_setter(): new_hc = hc.HC() try: @@ -44,38 +44,10 @@ def test_hc_hyperparams_setter(): assert new_hc.r_var == 2 -def test_hc_build(): - new_hc = hc.HC() - - assert new_hc.built == True - - -def test_hc_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'r_mean': 0, - 'r_var': 0.1 - } - - new_hc = hc.HC(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=50, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) +def test_hc_update(): + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - history = new_hc.run(search_space, new_function, pre_evaluate=hook) - - history = new_hc.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_hc = hc.HC() - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm hc failed to converge.' + new_hc.update(search_space) diff --git a/tests/opytimizer/optimizers/population/test_aeo.py b/tests/opytimizer/optimizers/population/test_aeo.py index 2f422ba6..d4ce1100 100644 --- a/tests/opytimizer/optimizers/population/test_aeo.py +++ b/tests/opytimizer/optimizers/population/test_aeo.py @@ -1,38 +1,90 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.population import aeo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) +def test_aeo_production(): + new_aeo = aeo.AEO() + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + a = new_aeo._production( + search_space.agents[0], search_space.best_agent, 1, 10) + + assert type(a).__name__ == 'Agent' + + +def test_aeo_herbivore_consumption(): + new_aeo = aeo.AEO() + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_aeo_build(): + a = new_aeo._herbivore_consumption( + search_space.agents[0], search_space.agents[1], 0.5) + + assert type(a).__name__ == 'Agent' + + +def test_aeo_omnivore_consumption(): new_aeo = aeo.AEO() - assert new_aeo.built == True + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + a = new_aeo._omnivore_consumption( + search_space.agents[0], search_space.agents[1], search_space.agents[2], 0.5) + + assert type(a).__name__ == 'Agent' + + +def test_aeo_carnivore_consumption(): + new_aeo = aeo.AEO() + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_aeo_run(): + a = new_aeo._carnivore_consumption( + search_space.agents[0], search_space.agents[1], 0.5) + + assert type(a).__name__ == 'Agent' + + +def test_aeo_update_composition(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + new_aeo = aeo.AEO() + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_aeo._update_composition( + search_space.agents, search_space.best_agent, square, 1, 10) - new_function = function.Function(pointer=square) + +def test_aeo_update_decomposition(): + def square(x): + return np.sum(x**2) new_aeo = aeo.AEO() - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_aeo._update_decomposition( + search_space.agents, search_space.best_agent, square) - history = new_aeo.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_aeo_update(): + def square(x): + return np.sum(x**2) + + new_aeo = aeo.AEO() + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm aeo failed to converge.' + new_aeo.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/population/test_ao.py b/tests/opytimizer/optimizers/population/test_ao.py index c99f559c..87517ffe 100644 --- a/tests/opytimizer/optimizers/population/test_ao.py +++ b/tests/opytimizer/optimizers/population/test_ao.py @@ -1,15 +1,11 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.population import ao from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_ao_hyperparams(): - hyperparams = { +def test_ao_params(): + params = { 'alpha': 0.1, 'delta': 0.1, 'n_cycles': 10, @@ -17,7 +13,7 @@ def test_ao_hyperparams(): 'w': 0.005 } - new_ao = ao.AO(hyperparams=hyperparams) + new_ao = ao.AO(params=params) assert new_ao.alpha == 0.1 @@ -30,7 +26,7 @@ def test_ao_hyperparams(): assert new_ao.w == 0.005 -def test_ao_hyperparams_setter(): +def test_ao_params_setter(): new_ao = ao.AO() try: @@ -80,51 +76,26 @@ def test_ao_hyperparams_setter(): assert new_ao.U == 0.00565 try: - new_ao.U = 'e' + new_ao.w = 'e' except: - new_ao.U = 0.005 + new_ao.w = 0.005 try: - new_ao.U = -1 + new_ao.w = -1 except: - new_ao.U = 0.005 - - assert new_ao.U == 0.005 + new_ao.w = 0.005 - -def test_ao_build(): - new_ao = ao.AO() - - assert new_ao.built == True + assert new_ao.w == 0.005 -def test_ao_run(): +def test_ao_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'alpha': 0.1, - 'delta': 0.1, - 'n_cycles': 5, - 'U': 0.00565, - 'w': 0.005 - } - - new_ao = ao.AO(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_ao.run(search_space, new_function, pre_evaluate=hook) + new_ao = ao.AO() - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ao failed to converge.' + new_ao.update(search_space, square, 1, 10) + new_ao.update(search_space, square, 8, 10) diff --git a/tests/opytimizer/optimizers/population/test_coa.py b/tests/opytimizer/optimizers/population/test_coa.py index d0c8989f..f70893e4 100644 --- a/tests/opytimizer/optimizers/population/test_coa.py +++ b/tests/opytimizer/optimizers/population/test_coa.py @@ -1,24 +1,22 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.population import coa from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_coa_hyperparams(): - hyperparams = { +def test_coa_params(): + params = { 'n_p': 2 } - new_coa = coa.COA(hyperparams=hyperparams) + new_coa = coa.COA(params=params) assert new_coa.n_p == 2 -def test_coa_hyperparams_setter(): +def test_coa_params_setter(): new_coa = coa.COA() try: @@ -26,32 +24,68 @@ def test_coa_hyperparams_setter(): except: new_coa.n_p = 2 + assert new_coa.n_p == 2 + + try: + new_coa.n_p = -1 + except: + new_coa.n_p = 2 + + assert new_coa.n_p == 2 + + +def test_coa_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_coa_build(): new_coa = coa.COA() + new_coa.create_additional_attrs(search_space) - assert new_coa.built == True + try: + new_coa.n_c = 'a' + except: + new_coa.n_c = 1 + assert new_coa.n_c == 1 -def test_coa_run(): - def square(x): - return np.sum(x**2) + try: + new_coa.n_c = -1 + except: + new_coa.n_c = 1 + + assert new_coa.n_c == 1 - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_coa_get_agents_from_pack(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_coa = coa.COA() + new_coa.create_additional_attrs(search_space) + + agents = new_coa._get_agents_from_pack(search_space.agents, 0) + + assert len(agents) == 5 - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - history = new_coa.run(search_space, new_function, pre_evaluate=hook) +def test_coa_transition_packs(): + search_space = search.SearchSpace(n_agents=200, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_coa = coa.COA() + new_coa.create_additional_attrs(search_space) + + new_coa._transition_packs(search_space.agents) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm coa failed to converge.' +def test_coa_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_coa = coa.COA() + new_coa.create_additional_attrs(search_space) + + new_coa.update(search_space, square) diff --git a/tests/opytimizer/optimizers/population/test_epo.py b/tests/opytimizer/optimizers/population/test_epo.py index c91110af..d12e2738 100644 --- a/tests/opytimizer/optimizers/population/test_epo.py +++ b/tests/opytimizer/optimizers/population/test_epo.py @@ -1,27 +1,21 @@ -import numpy as np - -from opytimizer.core import function from opytimizer.optimizers.population import epo from opytimizer.spaces import search -from opytimizer.utils import constants - -np.random.seed(0) -def test_epo_hyperparams(): - hyperparams = { +def test_epo_params(): + params = { 'f': 2.0, 'l': 1.5 } - new_epo = epo.EPO(hyperparams=hyperparams) + new_epo = epo.EPO(params=params) assert new_epo.f == 2.0 assert new_epo.l == 1.5 -def test_epo_hyperparams_setter(): +def test_epo_params_setter(): new_epo = epo.EPO() try: @@ -35,31 +29,10 @@ def test_epo_hyperparams_setter(): new_epo.l = 1.5 -def test_epo_build(): +def test_epo_update(): new_epo = epo.EPO() - assert new_epo.built == True - - -def test_epo_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_epo = epo.EPO() - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_epo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm epo failed to converge.' + new_epo.update(search_space, 1, 10) diff --git a/tests/opytimizer/optimizers/population/test_gco.py b/tests/opytimizer/optimizers/population/test_gco.py index ad6485f0..2b0da897 100644 --- a/tests/opytimizer/optimizers/population/test_gco.py +++ b/tests/opytimizer/optimizers/population/test_gco.py @@ -1,27 +1,25 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.population import gco from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_gco_hyperparams(): - hyperparams = { +def test_gco_params(): + params = { 'CR': 0.7, 'F': 1.25 } - new_gco = gco.GCO(hyperparams=hyperparams) + new_gco = gco.GCO(params=params) assert new_gco.CR == 0.7 assert new_gco.F == 1.25 -def test_gco_hyperparams_setter(): +def test_gco_params_setter(): new_gco = gco.GCO() try: @@ -49,18 +47,34 @@ def test_gco_hyperparams_setter(): assert new_gco.F == 1.5 -def test_gco_build(): +def test_gco_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=4, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + new_gco = gco.GCO() + new_gco.create_additional_attrs(search_space) + + try: + new_gco.life = 1 + except: + new_gco.life = np.array([1]) + + assert new_gco.life == np.array([1]) - assert new_gco.built == True + try: + new_gco.counter = 1 + except: + new_gco.counter = np.array([1]) + + assert new_gco.counter == np.array([1]) def test_gco_mutate_cell(): - new_gco = gco.GCO() + search_space = search.SearchSpace(n_agents=4, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=4, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) + new_gco = gco.GCO() + new_gco.create_additional_attrs(search_space) cell = new_gco._mutate_cell( search_space.agents[0], search_space.agents[1], search_space.agents[2], search_space.agents[3]) @@ -68,42 +82,39 @@ def test_gco_mutate_cell(): assert cell.position[0][0] != 0 -def test_gco_update(): +def test_gco_dark_zone(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=4, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_gco = gco.GCO() + new_gco.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=4, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) + new_gco._dark_zone(search_space.agents, square) - new_gco._update(search_space.agents, new_function, np.array([70, 70, 70, 70]), np.array([1, 1, 1, 1])) - assert search_space.agents[0].position[0] != 0 +def test_gco_light_zone(): + search_space = search.SearchSpace(n_agents=4, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + + new_gco = gco.GCO() + new_gco.create_additional_attrs(search_space) + + new_gco._light_zone(search_space.agents) -def test_gco_run(): +def test_gco_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=4, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_gco = gco.GCO() + new_gco.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_gco.run(search_space, new_function, pre_evaluate=hook) + new_gco.update(search_space, square) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm gco failed to converge.' + assert search_space.agents[0].position[0] != 0 diff --git a/tests/opytimizer/optimizers/population/test_gwo.py b/tests/opytimizer/optimizers/population/test_gwo.py index 7ce49f3f..98f229ed 100644 --- a/tests/opytimizer/optimizers/population/test_gwo.py +++ b/tests/opytimizer/optimizers/population/test_gwo.py @@ -1,38 +1,27 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.population import gwo from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_gwo_build(): +def test_gwo_calculate_coefficients(): new_gwo = gwo.GWO() - assert new_gwo.built == True + A, C = new_gwo._calculate_coefficients(1) + assert A[0] != 0 + assert C[0] != 0 -def test_gwo_run(): + +def test_gwo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - new_gwo = gwo.GWO() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_gwo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm gwo failed to converge.' + new_gwo.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/population/test_hho.py b/tests/opytimizer/optimizers/population/test_hho.py index d1768802..9d630a12 100644 --- a/tests/opytimizer/optimizers/population/test_hho.py +++ b/tests/opytimizer/optimizers/population/test_hho.py @@ -1,38 +1,52 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.population import hho from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_hho_build(): +def test_hho_calculate_initial_coefficients(): new_hho = hho.HHO() - assert new_hho.built == True + E, J = new_hho._calculate_initial_coefficients(1, 10) + assert E[0] != 0 + assert J[0] != 0 -def test_hho_run(): + +def test_hho_exploration_phase(): + new_hho = hho.HHO() + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_hho._exploration_phase( + search_space.agents, search_space.agents[0], search_space.best_agent) + + +def test_hho_exploitation_phase(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + assert square(2) == 4 new_hho = hho.HHO() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - history = new_hho.run(search_space, new_function, pre_evaluate=hook) + new_hho._exploitation_phase( + 1, 1, search_space.agents, search_space.agents[0], search_space.best_agent, square) + + +def test_hho_update(): + def square(x): + return np.sum(x**2) + + new_hho = hho.HHO() - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm hho failed to converge.' + new_hho.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/science/test_aso.py b/tests/opytimizer/optimizers/science/test_aso.py index 0cca1a51..ca32329f 100644 --- a/tests/opytimizer/optimizers/science/test_aso.py +++ b/tests/opytimizer/optimizers/science/test_aso.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import aso from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_aso_hyperparams(): - hyperparams = { +def test_aso_params(): + params = { 'alpha': 50.0, 'beta': 0.2 } - new_aso = aso.ASO(hyperparams=hyperparams) + new_aso = aso.ASO(params=params) assert new_aso.alpha == 50.0 assert new_aso.beta == 0.2 -def test_aso_hyperparams_setter(): +def test_aso_params_setter(): new_aso = aso.ASO() try: @@ -42,31 +38,61 @@ def test_aso_hyperparams_setter(): assert new_aso.beta == 0.2 -def test_aso_build(): +def test_aso_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_aso = aso.ASO() + new_aso.create_additional_attrs(search_space) + + try: + new_aso.velocity = 1 + except: + new_aso.velocity = np.array([1]) + + assert new_aso.velocity == np.array([1]) + + +def test_aso_calculate_mass(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_aso = aso.ASO() + new_aso.create_additional_attrs(search_space) + + mass = new_aso._calculate_mass(search_space.agents) + + assert mass[0] == 0.1 - assert new_aso.built == True +def test_aso_calculate_potential(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_aso_run(): - def square(x): - return np.sum(x**2) + new_aso = aso.ASO() + new_aso.create_additional_attrs(search_space) + + new_aso._calculate_potential( + search_space.agents[0], search_space.agents[1], np.array([1]), 1, 10) - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_aso_calculate_acceleration(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_aso = aso.ASO() + new_aso.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + mass = new_aso._calculate_mass(search_space.agents) + new_aso._calculate_acceleration( + search_space.agents, search_space.best_agent, mass, 1, 10) - history = new_aso.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_aso_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_aso = aso.ASO() + new_aso.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm aso failed to converge.' + new_aso.update(search_space, 1, 10) diff --git a/tests/opytimizer/optimizers/science/test_bh.py b/tests/opytimizer/optimizers/science/test_bh.py index 26dc72db..dab556ac 100644 --- a/tests/opytimizer/optimizers/science/test_bh.py +++ b/tests/opytimizer/optimizers/science/test_bh.py @@ -3,15 +3,6 @@ from opytimizer.core import function from opytimizer.optimizers.science import bh from opytimizer.spaces import search -from opytimizer.utils import constants - -np.random.seed(0) - - -def test_bh_build(): - new_bh = bh.BH() - - assert new_bh.built == True def test_bh_update_position(): @@ -22,9 +13,8 @@ def square(x): new_bh = bh.BH() - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) cost = new_bh._update_position( search_space.agents, search_space.best_agent, new_function) @@ -35,34 +25,21 @@ def square(x): def test_bh_event_horizon(): new_bh = bh.BH() - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_bh._event_horizon(search_space.agents, search_space.best_agent, 10) assert search_space.best_agent.fit != 0 -def test_bh_run(): +def test_bh_update(): def square(x): - return np.sum(x) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + return np.sum(x**2) new_bh = bh.BH() - search_space = search.SearchSpace(n_agents=10, n_iterations=50, - n_variables=2, lower_bound=[0, 0], - upper_bound=[5, 5]) - - history = new_bh.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm bh failed to converge.' + new_bh.update(search_space, square) diff --git a/tests/opytimizer/optimizers/science/test_efo.py b/tests/opytimizer/optimizers/science/test_efo.py index 6259458e..786d3369 100644 --- a/tests/opytimizer/optimizers/science/test_efo.py +++ b/tests/opytimizer/optimizers/science/test_efo.py @@ -1,22 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import efo from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_efo_hyperparams(): - hyperparams = { +def test_efo_params(): + params = { 'positive_field': 0.1, 'negative_field': 0.5, 'ps_ratio': 0.1, 'r_ratio': 0.4 } - new_efo = efo.EFO(hyperparams=hyperparams) + new_efo = efo.EFO(params=params) assert new_efo.positive_field == 0.1 @@ -26,8 +24,12 @@ def test_efo_hyperparams(): assert new_efo.r_ratio == 0.4 + assert new_efo.phi == (1 + np.sqrt(5)) / 2 -def test_efo_hyperparams_setter(): + assert new_efo.RI == 0 + + +def test_efo_params_setter(): new_efo = efo.EFO() try: @@ -83,11 +85,24 @@ def test_efo_hyperparams_setter(): assert new_efo.r_ratio == 0.25 + try: + new_efo.phi = 'e' + except: + new_efo.phi = (1 + np.sqrt(5)) / 2 -def test_efo_build(): - new_efo = efo.EFO() + assert new_efo.phi == (1 + np.sqrt(5)) / 2 - assert new_efo.built == True + try: + new_efo.RI = 'f' + except: + new_efo.RI = 0 + + try: + new_efo.RI = -1 + except: + new_efo.RI = 0 + + assert new_efo.RI == 0 def test_efo_calculate_indexes(): @@ -104,38 +119,9 @@ def test_efo_update(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) - new_efo = efo.EFO() - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) - - new_efo._update(search_space.agents, new_function, 1, 1) - - assert search_space.agents[0].position[0] != 0 - - -def test_efo_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_efo = efo.EFO() - - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_efo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm efo failed to converge.' + new_efo.update(search_space, square) diff --git a/tests/opytimizer/optimizers/science/test_eo.py b/tests/opytimizer/optimizers/science/test_eo.py index e69de29b..e0696ce1 100644 --- a/tests/opytimizer/optimizers/science/test_eo.py +++ b/tests/opytimizer/optimizers/science/test_eo.py @@ -0,0 +1,128 @@ +import numpy as np + +from opytimizer.optimizers.science import eo +from opytimizer.spaces import search + + +def test_eo_params(): + params = { + 'a1': 2.0, + 'a2': 1.0, + 'GP': 0.5, + 'V': 1.0 + } + + new_eo = eo.EO(params=params) + + assert new_eo.a1 == 2.0 + + assert new_eo.a2 == 1.0 + + assert new_eo.GP == 0.5 + + assert new_eo.V == 1.0 + + +def test_eo_params_setter(): + new_eo = eo.EO() + + try: + new_eo.a1 = 'a' + except: + new_eo.a1 = 2.0 + + try: + new_eo.a1 = -1 + except: + new_eo.a1 = 2.0 + + assert new_eo.a1 == 2.0 + + try: + new_eo.a2 = 'b' + except: + new_eo.a2 = 1.0 + + try: + new_eo.a2 = -1 + except: + new_eo.a2 = 1.0 + + assert new_eo.a2 == 1.0 + + try: + new_eo.GP = 'c' + except: + new_eo.GP = 0.5 + + try: + new_eo.GP = -1 + except: + new_eo.GP = 0.5 + + assert new_eo.GP == 0.5 + + try: + new_eo.V = 'd' + except: + new_eo.V = 1.0 + + try: + new_eo.V = -1 + except: + new_eo.V = 1.0 + + assert new_eo.V == 1.0 + + +def test_eo_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + + new_eo = eo.EO() + new_eo.create_additional_attrs(search_space) + + try: + new_eo.C = 1 + except: + new_eo.C = [] + + assert new_eo.C == [] + + +def test_eo_calculate_equilibrium(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + + new_eo = eo.EO() + new_eo.create_additional_attrs(search_space) + + new_eo._calculate_equilibrium(search_space.agents) + + +def test_eo_average_concentration(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + + new_eo = eo.EO() + new_eo.create_additional_attrs(search_space) + + C_avg = new_eo._average_concentration(square) + + assert type(C_avg).__name__ == 'Agent' + + +def test_eo_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + + new_eo = eo.EO() + new_eo.create_additional_attrs(search_space) + + new_eo.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/science/test_gsa.py b/tests/opytimizer/optimizers/science/test_gsa.py index d90863bf..c274f156 100644 --- a/tests/opytimizer/optimizers/science/test_gsa.py +++ b/tests/opytimizer/optimizers/science/test_gsa.py @@ -1,24 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import gsa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_gsa_hyperparams(): - hyperparams = { +def test_gsa_params(): + params = { 'G': 2.467, } - new_gsa = gsa.GSA(hyperparams=hyperparams) + new_gsa = gsa.GSA(params=params) assert new_gsa.G == 2.467 -def test_gsa_hyperparams_setter(): +def test_gsa_params_setter(): new_gsa = gsa.GSA() try: @@ -34,34 +30,27 @@ def test_gsa_hyperparams_setter(): assert new_gsa.G == 0.1 -def test_gsa_build(): - new_gsa = gsa.GSA() - - assert new_gsa.built == True +def test_gsa_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - -def test_gsa_calculate_mass(): new_gsa = gsa.GSA() + new_gsa.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - search_space.agents[0].fit = 1 - - search_space.agents.sort(key=lambda x: x.fit) + try: + new_gsa.velocity = 1 + except: + new_gsa.velocity = np.array([1]) - mass = new_gsa._calculate_mass(search_space.agents) + assert new_gsa.velocity == np.array([1]) - assert len(mass) > 0 +def test_gsa_calculate_mass(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_gsa_calculate_force(): new_gsa = gsa.GSA() - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_gsa.create_additional_attrs(search_space) search_space.agents[0].fit = 1 @@ -69,19 +58,15 @@ def test_gsa_calculate_force(): mass = new_gsa._calculate_mass(search_space.agents) - gravity = 1 - - force = new_gsa._calculate_force(search_space.agents, mass, gravity) + assert len(mass) > 0 - assert force.shape[0] > 0 +def test_gsa_calculate_force(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_gsa_update_velocity(): new_gsa = gsa.GSA() - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_gsa.create_additional_attrs(search_space) search_space.agents[0].fit = 1 @@ -93,42 +78,14 @@ def test_gsa_update_velocity(): force = new_gsa._calculate_force(search_space.agents, mass, gravity) - velocity = new_gsa._update_velocity(force[0], mass[0], 1) + assert force.shape[0] > 0 - assert velocity[0] != 0 +def test_gsa_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_gsa_update_position(): new_gsa = gsa.GSA() + new_gsa.create_additional_attrs(search_space) - position = new_gsa._update_position(1, 1) - - assert position == 2 - - -def test_gsa_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'G': 100 - } - - new_gsa = gsa.GSA(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_gsa.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm gsa failed to converge.' + new_gsa.update(search_space, 1) diff --git a/tests/opytimizer/optimizers/science/test_hgso.py b/tests/opytimizer/optimizers/science/test_hgso.py index 825b578f..1fff03b1 100644 --- a/tests/opytimizer/optimizers/science/test_hgso.py +++ b/tests/opytimizer/optimizers/science/test_hgso.py @@ -1,15 +1,13 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import hgso from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_hgso_hyperparams(): - hyperparams = { +def test_hgso_params(): + params = { 'n_clusters': 2, 'l1': 0.0005, 'l2': 100, @@ -19,7 +17,7 @@ def test_hgso_hyperparams(): 'K': 1.0 } - new_hgso = hgso.HGSO(hyperparams=hyperparams) + new_hgso = hgso.HGSO(params=params) assert new_hgso.n_clusters == 2 @@ -36,7 +34,7 @@ def test_hgso_hyperparams(): assert new_hgso.K == 1.0 -def test_hgso_hyperparams_setter(): +def test_hgso_params_setter(): new_hgso = hgso.HGSO() try: @@ -124,31 +122,56 @@ def test_hgso_hyperparams_setter(): assert new_hgso.K == 1.0 -def test_hgso_build(): +def test_hgso_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_hgso = hgso.HGSO() + new_hgso.create_additional_attrs(search_space) - assert new_hgso.built == True + try: + new_hgso.coefficient = 1 + except: + new_hgso.coefficient = np.array([1]) + assert new_hgso.coefficient == np.array([1]) -def test_hgso_run(): - def square(x): - return np.sum(x**2) + try: + new_hgso.pressure = 1 + except: + new_hgso.pressure = np.array([1]) + + assert new_hgso.pressure == np.array([1]) + + try: + new_hgso.constant = 1 + except: + new_hgso.constant = np.array([1]) + + assert new_hgso.constant == np.array([1]) - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_hgso_update_position(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_hgso = hgso.HGSO() + new_hgso.create_additional_attrs(search_space) + + position = new_hgso._update_position( + search_space.agents[0], search_space.agents[1], search_space.best_agent, 0.5) + + assert position[0][0] != 0 - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - history = new_hgso.run(search_space, new_function, pre_evaluate=hook) +def test_hgso_run(): + def square(x): + return np.sum(x**2) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_hgso = hgso.HGSO() + new_hgso.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm hgso failed to converge.' + new_hgso.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/science/test_mvo.py b/tests/opytimizer/optimizers/science/test_mvo.py index 1d256fa9..8decec0c 100644 --- a/tests/opytimizer/optimizers/science/test_mvo.py +++ b/tests/opytimizer/optimizers/science/test_mvo.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import mvo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_mvo_hyperparams(): - hyperparams = { +def test_mvo_params(): + params = { 'WEP_min': 0.2, 'WEP_max': 1.0, 'p': 0.5 } - new_mvo = mvo.MVO(hyperparams=hyperparams) + new_mvo = mvo.MVO(params=params) assert new_mvo.WEP_min == 0.2 @@ -24,7 +20,7 @@ def test_mvo_hyperparams(): assert new_mvo.p == 0.5 -def test_mvo_hyperparams_setter(): +def test_mvo_params_setter(): new_mvo = mvo.MVO() try: @@ -69,48 +65,14 @@ def test_mvo_hyperparams_setter(): assert new_mvo.p == 0.25 -def test_mvo_build(): - new_mvo = mvo.MVO() - - assert new_mvo.built == True - - def test_mvo_update(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) - new_mvo = mvo.MVO() - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) - - new_mvo._update(search_space.agents, search_space.best_agent, new_function, 1, 1) - - assert search_space.agents[0].position[0] != 0 - - -def test_mvo_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_mvo = mvo.MVO() - - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_mvo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm mvo failed to converge.' + new_mvo.update(search_space, square, 1, 10) + new_mvo.update(search_space, square, 5, 10) diff --git a/tests/opytimizer/optimizers/science/test_sa.py b/tests/opytimizer/optimizers/science/test_sa.py index 0d8eb692..4255df86 100644 --- a/tests/opytimizer/optimizers/science/test_sa.py +++ b/tests/opytimizer/optimizers/science/test_sa.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import sa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_sa_hyperparams(): - hyperparams = { +def test_sa_params(): + params = { 'T': 100, 'beta': 0.99, } - new_sa = sa.SA(hyperparams=hyperparams) + new_sa = sa.SA(params=params) assert new_sa.T == 100 assert new_sa.beta == 0.99 -def test_sa_hyperparams_setter(): +def test_sa_params_setter(): new_sa = sa.SA() try: @@ -49,38 +45,14 @@ def test_sa_hyperparams_setter(): assert new_sa.beta == 0.5 -def test_sa_build(): - new_sa = sa.SA() - - assert new_sa.built == True - - -def test_sa_run(): +def test_sa_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - hyperparams = { - 'T': 100, - 'beta': 0.99 - } - - new_sa = sa.SA(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_sa.run(search_space, new_function, pre_evaluate=hook) - - history = new_sa.run(search_space, new_function, pre_evaluate=hook) + new_sa = sa.SA() - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm sa failed to converge.' + new_sa.update(search_space, square) + new_sa.update(search_space, square) diff --git a/tests/opytimizer/optimizers/science/test_two.py b/tests/opytimizer/optimizers/science/test_two.py index b5849a3f..c27f212b 100644 --- a/tests/opytimizer/optimizers/science/test_two.py +++ b/tests/opytimizer/optimizers/science/test_two.py @@ -1,15 +1,11 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import two from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_two_hyperparams(): - hyperparams = { +def test_two_params(): + params = { 'mu_s': 1, 'mu_k': 1, 'delta_t': 1, @@ -17,7 +13,7 @@ def test_two_hyperparams(): 'beta': 0.05 } - new_two = two.TWO(hyperparams=hyperparams) + new_two = two.TWO(params=params) assert new_two.mu_s == 1 @@ -30,7 +26,7 @@ def test_two_hyperparams(): assert new_two.beta == 0.05 -def test_two_hyperparams_setter(): +def test_two_params_setter(): new_two = two.TWO() try: @@ -94,31 +90,27 @@ def test_two_hyperparams_setter(): assert new_two.beta == 0.05 -def test_two_build(): +def test_two_constraint_handle(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_two = two.TWO() - assert new_two.built == True + new_two._constraint_handle( + search_space.agents, search_space.best_agent, square, 1) def test_two_run(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_two = two.TWO() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_two.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm two failed to converge.' + new_two.update(search_space, square, 1, 10) + new_two.update(search_space, square, 5, 10) diff --git a/tests/opytimizer/optimizers/science/test_wca.py b/tests/opytimizer/optimizers/science/test_wca.py index 18bd6471..8cfca03e 100644 --- a/tests/opytimizer/optimizers/science/test_wca.py +++ b/tests/opytimizer/optimizers/science/test_wca.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import wca from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_wca_hyperparams(): - hyperparams = { +def test_wca_params(): + params = { 'nsr': 5, 'd_max': 0.25 } - new_wca = wca.WCA(hyperparams=hyperparams) + new_wca = wca.WCA(params=params) assert new_wca.nsr == 5 assert new_wca.d_max == 0.25 -def test_wca_hyperparams_setter(): +def test_wca_params_setter(): new_wca = wca.WCA() try: @@ -49,97 +45,78 @@ def test_wca_hyperparams_setter(): assert new_wca.d_max == 0.1 -def test_wca_build(): - new_wca = wca.WCA() - - assert new_wca.built == True - +def test_wca_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_wca_flow_intensity(): new_wca = wca.WCA() + new_wca.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=2, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + try: + new_wca.flows = 1 + except: + new_wca.flows = np.array([1]) - flows = new_wca._flow_intensity(search_space.agents) + assert new_wca.flows == np.array([1]) - assert flows.shape[0] == new_wca.nsr +def test_wca_flow_intensity(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_wca_raining_process(): new_wca = wca.WCA() + new_wca.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - flows = new_wca._flow_intensity(search_space.agents) - - new_wca._raining_process(search_space.agents, search_space.best_agent) - - assert search_space.agents[-1].position[0] != 0 +def test_wca_raining_process(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_wca_update_stream(): new_wca = wca.WCA() + new_wca.create_additional_attrs(search_space) + new_wca.flows[0] = 5 - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - flows = new_wca._flow_intensity(search_space.agents) + new_wca.d_max = 100 + new_wca._raining_process(search_space.agents, search_space.best_agent) - new_wca._update_stream(search_space.agents, flows) - assert search_space.agents[-1].position[0] != 0 +def test_wca_update_stream(): + def square(x): + return np.sum(x**2) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_wca_update_river(): new_wca = wca.WCA() + new_wca.create_additional_attrs(search_space) + new_wca.flows[0] = 5 - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_wca._update_stream(search_space.agents, square) - new_wca._update_river(search_space.agents, search_space.best_agent) - assert search_space.agents[1].position[0] != 0 +def test_wca_update_river(): + def square(x): + return np.sum(x**2) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_wca_update(): new_wca = wca.WCA() + new_wca.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - flows = new_wca._flow_intensity(search_space.agents) + new_wca._update_river(search_space.agents, search_space.best_agent, square) - new_wca._update(search_space.agents, search_space.best_agent, flows) + assert search_space.agents[1].position[0][0] != 0 - assert search_space.agents[0].position[0] != 0 - -def test_wca_run(): +def test_wca_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_wca = wca.WCA() + new_wca.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=20, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_wca.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm wca failed to converge.' + new_wca.update(search_space, square, 1) diff --git a/tests/opytimizer/optimizers/science/test_wdo.py b/tests/opytimizer/optimizers/science/test_wdo.py index da0ba6c9..3a1caaa0 100644 --- a/tests/opytimizer/optimizers/science/test_wdo.py +++ b/tests/opytimizer/optimizers/science/test_wdo.py @@ -1,15 +1,11 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import wdo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_wdo_hyperparams(): - hyperparams = { +def test_wdo_params(): + params = { 'v_max': 0.3, 'alpha': 0.8, 'g': 0.6, @@ -17,7 +13,7 @@ def test_wdo_hyperparams(): 'RT': 1.5 } - new_wdo = wdo.WDO(hyperparams=hyperparams) + new_wdo = wdo.WDO(params=params) assert new_wdo.v_max == 0.3 @@ -30,7 +26,7 @@ def test_wdo_hyperparams(): assert new_wdo.RT == 1.5 -def test_wdo_hyperparams_setter(): +def test_wdo_params_setter(): new_wdo = wdo.WDO() try: @@ -94,55 +90,29 @@ def test_wdo_hyperparams_setter(): assert new_wdo.RT == 0.5 -def test_wdo_build(): - new_wdo = wdo.WDO() - - assert new_wdo.built == True - - -def test_wdo_update_velocity(): - new_wdo = wdo.WDO() - - velocity = new_wdo._update_velocity(1, 1, 1, 1, 1) - - assert velocity != 0 - +def test_wdo_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_wdo_update_position(): new_wdo = wdo.WDO() + new_wdo.create_additional_attrs(search_space) - position = new_wdo._update_position(1, 1) + try: + new_wdo.velocity = 1 + except: + new_wdo.velocity = np.array([1]) - assert position == 2 + assert new_wdo.velocity == np.array([1]) -def test_wdo_run(): +def test_wdo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - hyperparams = { - 'v_max': 0.3, - 'alpha': 0.8, - 'g': 0.6, - 'c': 1.0, - 'RT': 1.5 - } - - new_wdo = wdo.WDO(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_wdo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_wdo = wdo.WDO() + new_wdo.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm wdo failed to converge.' + new_wdo.update(search_space, square) diff --git a/tests/opytimizer/optimizers/science/test_wwo.py b/tests/opytimizer/optimizers/science/test_wwo.py index 4303e869..3fb25b43 100644 --- a/tests/opytimizer/optimizers/science/test_wwo.py +++ b/tests/opytimizer/optimizers/science/test_wwo.py @@ -1,22 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.science import wwo from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_wwo_hyperparams(): - hyperparams = { +def test_wwo_params(): + params = { 'h_max': 5, 'alpha': 1.001, 'beta': 0.001, 'k_max': 1 } - new_wwo = wwo.WWO(hyperparams=hyperparams) + new_wwo = wwo.WWO(params=params) assert new_wwo.h_max == 5 @@ -27,7 +25,7 @@ def test_wwo_hyperparams(): assert new_wwo.k_max == 1 -def test_wwo_hyperparams_setter(): +def test_wwo_params_setter(): new_wwo = wwo.WWO() try: @@ -79,31 +77,94 @@ def test_wwo_hyperparams_setter(): assert new_wwo.k_max == 1 -def test_wwo_build(): +def test_wwo_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_wwo = wwo.WWO() + new_wwo.create_additional_attrs(search_space) + + try: + new_wwo.height = 1 + except: + new_wwo.height = np.array([1]) + + assert new_wwo.height == np.array([1]) - assert new_wwo.built == True + try: + new_wwo.length = 1 + except: + new_wwo.length = np.array([1]) + + assert new_wwo.length == np.array([1]) -def test_wwo_run(): +def test_wwo_propagate_wave(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) + new_wwo = wwo.WWO() + new_wwo.create_additional_attrs(search_space) - new_wwo = wwo.WWO({'k_max': 20}) + wave = new_wwo._propagate_wave(search_space.agents[0], square, 0) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + assert type(wave).__name__ == 'Agent' - history = new_wwo.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_wwo_refract_wave(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_wwo = wwo.WWO() + new_wwo.create_additional_attrs(search_space) + + height, length = new_wwo._refract_wave( + search_space.agents[0], search_space.best_agent, square, 0) + + assert height == 5 + assert length != 0 + + +def test_wwo_break_wave(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_wwo = wwo.WWO() + new_wwo.create_additional_attrs(search_space) + + broken_wave = new_wwo._break_wave(search_space.agents[0], square, 0) + + assert type(broken_wave).__name__ == 'Agent' + + +def test_wwo_update_wave_length(): + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_wwo = wwo.WWO() + new_wwo.create_additional_attrs(search_space) + + new_wwo._update_wave_length(search_space.agents) + + +def test_wwo_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=50, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_wwo = wwo.WWO() + new_wwo.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm wwo failed to converge.' + new_wwo.update(search_space, square) + new_wwo.update(search_space, square) diff --git a/tests/opytimizer/optimizers/social/test_qsa.py b/tests/opytimizer/optimizers/social/test_qsa.py index 660cf7dd..87973bbe 100644 --- a/tests/opytimizer/optimizers/social/test_qsa.py +++ b/tests/opytimizer/optimizers/social/test_qsa.py @@ -1,38 +1,70 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.social import qsa from opytimizer.spaces import search -from opytimizer.utils import constants +from opytimizer.utils import constant -np.random.seed(0) + +def test_qsa_calculate_queue(): + new_qsa = qsa.QSA() + + q_1, q_2, q_3 = new_qsa._calculate_queue(10, 1, 1, 1) + + assert q_1 == 3 + assert q_2 == 3 + assert q_3 == 3 + + q_1, q_2, q_3 = new_qsa._calculate_queue(10, constant.EPSILON - 0.1, 1, 1) + + assert q_1 == 3 + assert q_2 == 3 + assert q_3 == 3 -def test_qsa_build(): +def test_qsa_business_one(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=100, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_qsa = qsa.QSA() - assert new_qsa.built == True + new_qsa._business_one(search_space.agents, square, 0.1) + new_qsa._business_one(search_space.agents, square, 100) -def test_qsa_run(): +def test_qsa_business_two(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + search_space = search.SearchSpace(n_agents=100, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_qsa = qsa.QSA() + + new_qsa._business_two(search_space.agents, square) + + +def test_qsa_business_three(): + def square(x): + return np.sum(x**2) - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=100, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_qsa = qsa.QSA() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_qsa._business_three(search_space.agents, square) - history = new_qsa.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_qsa_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=100, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_qsa = qsa.QSA() - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm qsa failed to converge.' + new_qsa.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/social/test_ssd.py b/tests/opytimizer/optimizers/social/test_ssd.py index ee8fb3d1..2c15c41f 100644 --- a/tests/opytimizer/optimizers/social/test_ssd.py +++ b/tests/opytimizer/optimizers/social/test_ssd.py @@ -1,27 +1,24 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.social import ssd from opytimizer.spaces import search -from opytimizer.utils import constants +from opytimizer.utils import constant -np.random.seed(0) - -def test_ssd_hyperparams(): - hyperparams = { +def test_ssd_params(): + params = { 'c': 2.0, 'decay': 0.99 } - new_ssd = ssd.SSD(hyperparams=hyperparams) + new_ssd = ssd.SSD(params=params) assert new_ssd.c == 2.0 assert new_ssd.decay == 0.99 -def test_ssd_hyperparams_setter(): +def test_ssd_params_setter(): new_ssd = ssd.SSD() try: @@ -49,10 +46,26 @@ def test_ssd_hyperparams_setter(): assert new_ssd.decay == 0.99 -def test_ssd_build(): +def test_ssd_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_ssd = ssd.SSD() + new_ssd.create_additional_attrs(search_space) - assert new_ssd.built == True + try: + new_ssd.local_position = 1 + except: + new_ssd.local_position = np.array([1]) + + assert new_ssd.local_position == 1 + + try: + new_ssd.velocity = 1 + except: + new_ssd.velocity = np.array([1]) + + assert new_ssd.velocity == 1 def test_ssd_mean_global_solution(): @@ -63,46 +76,53 @@ def test_ssd_mean_global_solution(): assert mean != 0 -def test_ssd_update_velocity(): +def test_ssd_update_position(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_ssd = ssd.SSD() + new_ssd.create_additional_attrs(search_space) + + position = new_ssd._update_position(1, 1) - velocity = new_ssd._update_velocity(0.5, 10, 25) + assert position[0][0] != 0 - assert velocity[0] != 0 +def test_ssd_update_velocity(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_ssd_update_position(): new_ssd = ssd.SSD() + new_ssd.create_additional_attrs(search_space) - position = new_ssd._update_position(1, 1) + velocity = new_ssd._update_velocity(0.5, 10, 1) - assert position == 2 + assert velocity[0] != 0 -def test_ssd_run(): +def test_ssd_evaluate(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) + new_ssd = ssd.SSD() + new_ssd.create_additional_attrs(search_space) - hyperparams = { - 'c': 2.0, - 'decay': 0.3 - } + new_ssd.evaluate(search_space, square) + + assert search_space.best_agent.fit != constant.FLOAT_MAX - new_ssd = ssd.SSD(hyperparams=hyperparams) - search_space = search.SearchSpace(n_agents=50, n_iterations=350, - n_variables=2, lower_bound=[-100, -100], - upper_bound=[100, 100]) +def test_ssd_update(): + def square(x): + return np.sum(x**2) - history = new_ssd.run(search_space, new_function, pre_evaluate=hook) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_ssd = ssd.SSD() + new_ssd.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ssd failed to converge.' + new_ssd.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_abc.py b/tests/opytimizer/optimizers/swarm/test_abc.py index 05a651fb..79874d43 100644 --- a/tests/opytimizer/optimizers/swarm/test_abc.py +++ b/tests/opytimizer/optimizers/swarm/test_abc.py @@ -1,24 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import abc from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_abc_hyperparams(): - hyperparams = { +def test_abc_params(): + params = { 'n_trials': 5 } - new_abc = abc.ABC(hyperparams=hyperparams) + new_abc = abc.ABC(params=params) assert new_abc.n_trials == 5 -def test_abc_hyperparams_setter(): +def test_abc_params_setter(): new_abc = abc.ABC() try: @@ -34,35 +30,86 @@ def test_abc_hyperparams_setter(): assert new_abc.n_trials == 10 -def test_abc_build(): +def test_abc_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_abc = abc.ABC() + new_abc.create_additional_attrs(search_space) + + try: + new_abc.trial = 1 + except: + new_abc.trial = np.array([1]) - assert new_abc.built == True + assert new_abc.trial == np.array([1]) -def test_abc_run(): +def test_abc_evaluate_location(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) + new_abc = abc.ABC() + new_abc.create_additional_attrs(search_space) - hyperparams = { - 'n_trials': 1 - } + new_abc._evaluate_location( + search_space.agents[0], search_space.agents[1], square, 0) + + +def test_abc_send_employee(): + def square(x): + return np.sum(x**2) - new_abc = abc.ABC(hyperparams=hyperparams) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_abc = abc.ABC() + new_abc.create_additional_attrs(search_space) - history = new_abc.run(search_space, new_function, pre_evaluate=hook) + new_abc._send_employee(search_space.agents, square) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm abc failed to converge.' +def test_abc_send_onlooker(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_abc = abc.ABC() + new_abc.create_additional_attrs(search_space) + + new_abc._send_onlooker(search_space.agents, square) + + +def test_abc_send_scout(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_abc = abc.ABC() + new_abc.create_additional_attrs(search_space) + + new_abc._send_scout(search_space.agents, square) + + new_abc.trial[0] = 5 + new_abc.n_trials = 1 + new_abc._send_scout(search_space.agents, square) + + +def test_abc_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_abc = abc.ABC() + new_abc.create_additional_attrs(search_space) + + new_abc.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_abo.py b/tests/opytimizer/optimizers/swarm/test_abo.py index a2f43bc0..da1d8c44 100644 --- a/tests/opytimizer/optimizers/swarm/test_abo.py +++ b/tests/opytimizer/optimizers/swarm/test_abo.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import abo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_abo_hyperparams(): - hyperparams = { +def test_abo_params(): + params = { 'sunspot_ratio': 0.9, 'a': 2.0 } - new_abo = abo.ABO(hyperparams=hyperparams) + new_abo = abo.ABO(params=params) assert new_abo.sunspot_ratio == 0.9 - + assert new_abo.a == 2.0 -def test_abo_hyperparams_setter(): +def test_abo_params_setter(): new_abo = abo.ABO() try: @@ -49,31 +45,27 @@ def test_abo_hyperparams_setter(): assert new_abo.a == 2.0 -def test_abo_build(): +def test_abo_flight_mode(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_abo = abo.ABO() - assert new_abo.built == True + new_abo._flight_mode( + search_space.agents[0], search_space.agents[1], square) -def test_abo_run(): +def test_abo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_abo = abo.ABO() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_abo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm abo failed to converge.' + new_abo.update(search_space, square, 1, 10) + new_abo.update(search_space, square, 5, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_ba.py b/tests/opytimizer/optimizers/swarm/test_ba.py index cbd308dd..2d07288f 100644 --- a/tests/opytimizer/optimizers/swarm/test_ba.py +++ b/tests/opytimizer/optimizers/swarm/test_ba.py @@ -1,22 +1,18 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import ba from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_ba_hyperparams(): - hyperparams = { +def test_ba_params(): + params = { 'f_min': 0, 'f_max': 2, 'A': 0.5, 'r': 0.5 } - new_ba = ba.BA(hyperparams=hyperparams) + new_ba = ba.BA(params=params) assert new_ba.f_min == 0 @@ -27,7 +23,7 @@ def test_ba_hyperparams(): assert new_ba.r == 0.5 -def test_ba_hyperparams_setter(): +def test_ba_params_setter(): new_ba = ba.BA() try: @@ -84,62 +80,50 @@ def test_ba_hyperparams_setter(): assert new_ba.r == 0.5 -def test_ba_build(): - new_ba = ba.BA() - - assert new_ba.built == True - +def test_ba_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_ba_update_frequency(): new_ba = ba.BA() + new_ba.create_additional_attrs(search_space) - frequency = new_ba._update_frequency(0, 2) - - assert frequency != 0 - + try: + new_ba.frequency = 1 + except: + new_ba.frequency = np.array([1]) -def test_ba_update_velocity(): - new_ba = ba.BA() + assert new_ba.frequency == np.array([1]) - velocity = new_ba._update_velocity(1, 1, 1, 1) + try: + new_ba.velocity = 1 + except: + new_ba.velocity = np.array([1]) - assert velocity != 0 + assert new_ba.velocity == np.array([1]) + try: + new_ba.loudness = 1 + except: + new_ba.loudness = np.array([1]) -def test_ba_update_position(): - new_ba = ba.BA() + assert new_ba.loudness == np.array([1]) - position = new_ba._update_position(1, 1) + try: + new_ba.pulse_rate = 1 + except: + new_ba.pulse_rate = np.array([1]) - assert position == 2 + assert new_ba.pulse_rate == np.array([1]) -def test_ba_run(): +def test_ba_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - hyperparams = { - 'f_min': 0, - 'f_max': 2, - 'A': 1, - 'r': 0.5 - } - - new_ba = ba.BA(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_ba.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_ba = ba.BA() + new_ba.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ba failed to converge.' + new_ba.update(search_space, square, 1) diff --git a/tests/opytimizer/optimizers/swarm/test_boa.py b/tests/opytimizer/optimizers/swarm/test_boa.py index 8da95887..453992e8 100644 --- a/tests/opytimizer/optimizers/swarm/test_boa.py +++ b/tests/opytimizer/optimizers/swarm/test_boa.py @@ -1,30 +1,26 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import boa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_boa_hyperparams(): - hyperparams = { +def test_boa_params(): + params = { 'c': 0.01, 'a': 0.1, 'p': 0.8 } - new_boa = boa.BOA(hyperparams=hyperparams) + new_boa = boa.BOA(params=params) assert new_boa.c == 0.01 assert new_boa.a == 0.1 - + assert new_boa.p == 0.8 -def test_boa_hyperparams_setter(): +def test_boa_params_setter(): new_boa = boa.BOA() try: @@ -64,31 +60,48 @@ def test_boa_hyperparams_setter(): assert new_boa.p == 0.8 -def test_boa_build(): +def test_boa_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_boa = boa.BOA() + new_boa.create_additional_attrs(search_space) - assert new_boa.built == True + try: + new_boa.fragrance = 1 + except: + new_boa.fragrance = np.array([1]) + assert new_boa.fragrance == np.array([1]) -def test_boa_run(): - def square(x): - return np.sum(x**2) - def hook(optimizer, space, function): - return +def test_boa_best_movement(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) + new_boa = boa.BOA() + new_boa.create_additional_attrs(search_space) + + new_boa._best_movement( + search_space.agents[0].position, search_space.best_agent.position, new_boa.fragrance[0], 0.5) + + +def test_boa_local_movement(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_boa = boa.BOA() + new_boa.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_boa._local_movement(search_space.agents[0].position, search_space.agents[1].position, + search_space.agents[2].position, new_boa.fragrance[0], 0.5) - history = new_boa.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_boa_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_boa = boa.BOA() + new_boa.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm boa failed to converge.' + new_boa.update(search_space) diff --git a/tests/opytimizer/optimizers/swarm/test_bwo.py b/tests/opytimizer/optimizers/swarm/test_bwo.py index c72f008c..cd92c192 100644 --- a/tests/opytimizer/optimizers/swarm/test_bwo.py +++ b/tests/opytimizer/optimizers/swarm/test_bwo.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import bwo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_bwo_hyperparams(): - hyperparams = { +def test_bwo_params(): + params = { 'pp': 0.6, 'cr': 0.44, 'pm': 0.4, } - new_bwo = bwo.BWO(hyperparams=hyperparams) + new_bwo = bwo.BWO(params=params) assert new_bwo.pp == 0.6 @@ -24,7 +20,7 @@ def test_bwo_hyperparams(): assert new_bwo.pm == 0.4 -def test_bwo_hyperparams_setter(): +def test_bwo_params_setter(): new_bwo = bwo.BWO() try: @@ -64,51 +60,37 @@ def test_bwo_hyperparams_setter(): assert new_bwo.pm == 0.4 -def test_bwo_build(): +def test_bwo_procreating(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) + new_bwo = bwo.BWO() - assert new_bwo.built == True + y1, y2 = new_bwo._procreating( + search_space.agents[0], search_space.agents[1]) + assert type(y1).__name__ == 'Agent' + assert type(y2).__name__ == 'Agent' -def test_bwo_update(): - def square(x): - return np.sum(x**2) - new_function = function.Function(pointer=square) +def test_bwo_mutation(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_bwo = bwo.BWO() - search_space = search.SearchSpace(n_agents=10, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) - - new_bwo._evaluate(search_space, new_function) + alpha = new_bwo._mutation(search_space.agents[0]) - new_bwo._update(search_space.agents, - search_space.n_variables, new_function) + assert type(alpha).__name__ == 'Agent' - assert search_space.agents[0].position[0] != 0 - -def test_bwo_run(): +def test_bwo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_bwo = bwo.BWO() - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_bwo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm bwo failed to converge.' + new_bwo.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_cs.py b/tests/opytimizer/optimizers/swarm/test_cs.py index 882631ff..9946e8de 100644 --- a/tests/opytimizer/optimizers/swarm/test_cs.py +++ b/tests/opytimizer/optimizers/swarm/test_cs.py @@ -1,28 +1,24 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import cs from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_cs_hyperparams(): - hyperparams = { +def test_cs_params(): + params = { 'alpha': 1.0, 'beta': 1.5, 'p': 0.2 } - new_cs = cs.CS(hyperparams=hyperparams) + new_cs = cs.CS(params=params) assert new_cs.alpha == 1.0 assert new_cs.beta == 1.5 assert new_cs.p == 0.2 -def test_cs_hyperparams_setter(): +def test_cs_params_setter(): new_cs = cs.CS() try: @@ -62,48 +58,49 @@ def test_cs_hyperparams_setter(): assert new_cs.p == 0.25 -def test_cs_build(): +def test_cs_generate_new_nests(): + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[-10, -10], upper_bound=[10, 10]) + new_cs = cs.CS() - assert new_cs.built == True + new_agents = new_cs._generate_new_nests( + search_space.agents, search_space.best_agent) + assert len(new_agents) == 20 -def test_cs_update(): - def square(x): - return np.sum(x**2) - new_function = function.Function(pointer=square) +def test_cs_generate_abandoned_nests(): + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[-10, -10], upper_bound=[10, 10]) new_cs = cs.CS() - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[-10, -10], - upper_bound=[10, 10]) - - new_cs._update(search_space.agents, search_space.best_agent, new_function) + new_agents = new_cs._generate_abandoned_nests(search_space.agents, 0.5) - assert search_space.agents[0].position[0] != 0 + assert len(new_agents) == 20 -def test_cs_run(): +def test_cs_evaluate_nests(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[-10, -10], upper_bound=[10, 10]) new_cs = cs.CS() - search_space = search.SearchSpace(n_agents=25, n_iterations=30, - n_variables=2, lower_bound=[-10, -10], - upper_bound=[10, 10]) + new_agents = new_cs._generate_abandoned_nests(search_space.agents, 0.5) + new_cs._evaluate_nests(search_space.agents, new_agents, square) - history = new_cs.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_cs_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[-10, -10], upper_bound=[10, 10]) + + new_cs = cs.CS() - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm abc failed to converge.' + new_cs.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_csa.py b/tests/opytimizer/optimizers/swarm/test_csa.py index 26593b87..bf281078 100644 --- a/tests/opytimizer/optimizers/swarm/test_csa.py +++ b/tests/opytimizer/optimizers/swarm/test_csa.py @@ -1,27 +1,23 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import csa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_csa_hyperparams(): - hyperparams = { +def test_csa_params(): + params = { 'fl': 2.0, 'AP': 0.1 } - new_csa = csa.CSA(hyperparams=hyperparams) + new_csa = csa.CSA(params=params) assert new_csa.fl == 2.0 assert new_csa.AP == 0.1 -def test_csa_hyperparams_setter(): +def test_csa_params_setter(): new_csa = csa.CSA() try: @@ -42,31 +38,42 @@ def test_csa_hyperparams_setter(): assert new_csa.AP == 0.1 -def test_csa_build(): +def test_csa_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_csa = csa.CSA() + new_csa.create_additional_attrs(search_space) + + try: + new_csa.memory = 1 + except: + new_csa.memory = np.array([1]) - assert new_csa.built == True + assert new_csa.memory == 1 -def test_csa_run(): +def test_csa_evaluate(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_csa = csa.CSA() + new_csa.create_additional_attrs(search_space) + + new_csa.evaluate(search_space, square) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - history = new_csa.run(search_space, new_function, pre_evaluate=hook) +def test_csa_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_csa = csa.CSA() + new_csa.create_additional_attrs(search_space) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_csa.update(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm csa failed to converge.' + new_csa.AP = 1 + new_csa.update(search_space) diff --git a/tests/opytimizer/optimizers/swarm/test_eho.py b/tests/opytimizer/optimizers/swarm/test_eho.py index 53fd2202..b7df1a55 100644 --- a/tests/opytimizer/optimizers/swarm/test_eho.py +++ b/tests/opytimizer/optimizers/swarm/test_eho.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import eho from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_eho_hyperparams(): - hyperparams = { +def test_eho_params(): + params = { 'alpha': 0.5, 'beta': 0.1, 'n_clans': 10 } - new_eho = eho.EHO(hyperparams=hyperparams) + new_eho = eho.EHO(params=params) assert new_eho.alpha == 0.5 @@ -24,7 +20,7 @@ def test_eho_hyperparams(): assert new_eho.n_clans == 10 -def test_eho_hyperparams_setter(): +def test_eho_params_setter(): new_eho = eho.EHO() try: @@ -64,41 +60,73 @@ def test_eho_hyperparams_setter(): assert new_eho.n_clans == 10 -def test_eho_build(): +def test_eho_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_eho = eho.EHO() + new_eho.create_additional_attrs(search_space) + + try: + new_eho.n_ci = 'a' + except: + new_eho.n_ci = 1 + + assert new_eho.n_ci == 1 + + try: + new_eho.n_ci = -1 + except: + new_eho.n_ci = 1 + + assert new_eho.n_ci == 1 + + +def test_eho_get_agents_from_clan(): + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_eho = eho.EHO() + new_eho.create_additional_attrs(search_space) + + agents = new_eho._get_agents_from_clan(search_space.agents, 0) - assert new_eho.built == True + assert len(agents) == 2 -def test_eho_run(): +def test_eho_updating_operator(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_eho = eho.EHO() + new_eho.create_additional_attrs(search_space) - + centers = [np.random.normal(size=(2, 1)) for _ in range(10)] - try: - search_space = search.SearchSpace(n_agents=5, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_eho._updating_operator(search_space.agents, centers, square) - history = new_eho.run(search_space, new_function, pre_evaluate=hook) - except: - search_space = search.SearchSpace(n_agents=20, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) +def test_eho_separating_operator(): + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_eho = eho.EHO() + new_eho.create_additional_attrs(search_space) - history = new_eho.run(search_space, new_function, pre_evaluate=hook) + new_eho._separating_operator(search_space.agents) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm eho failed to converge.' +def test_eho_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=20, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_eho = eho.EHO() + new_eho.create_additional_attrs(search_space) + + new_eho.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_fa.py b/tests/opytimizer/optimizers/swarm/test_fa.py index 1c4c4777..57a72c06 100644 --- a/tests/opytimizer/optimizers/swarm/test_fa.py +++ b/tests/opytimizer/optimizers/swarm/test_fa.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import fa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_fa_hyperparams(): - hyperparams = { +def test_fa_params(): + params = { 'alpha': 0.5, 'beta': 0.2, 'gamma': 1.0 } - new_fa = fa.FA(hyperparams=hyperparams) + new_fa = fa.FA(params=params) assert new_fa.alpha == 0.5 @@ -24,7 +20,7 @@ def test_fa_hyperparams(): assert new_fa.gamma == 1.0 -def test_fa_hyperparams_setter(): +def test_fa_params_setter(): new_fa = fa.FA() try: @@ -64,31 +60,10 @@ def test_fa_hyperparams_setter(): assert new_fa.gamma == 1.0 -def test_fa_build(): - new_fa = fa.FA() - - assert new_fa.built == True - - -def test_fa_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - +def test_fa_update(): new_fa = fa.FA() - search_space = search.SearchSpace(n_agents=10, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_fa.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm fa failed to converge.' + new_fa.update(search_space, 100) diff --git a/tests/opytimizer/optimizers/swarm/test_fpa.py b/tests/opytimizer/optimizers/swarm/test_fpa.py index 22bd7385..656d7a5e 100644 --- a/tests/opytimizer/optimizers/swarm/test_fpa.py +++ b/tests/opytimizer/optimizers/swarm/test_fpa.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import fpa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_fpa_hyperparams(): - hyperparams = { +def test_fpa_params(): + params = { 'beta': 1.0, 'eta': 0.5, 'p': 0.5 } - new_fpa = fpa.FPA(hyperparams=hyperparams) + new_fpa = fpa.FPA(params=params) assert new_fpa.beta == 1.0 @@ -24,7 +20,7 @@ def test_fpa_hyperparams(): assert new_fpa.p == 0.5 -def test_fpa_hyperparams_setter(): +def test_fpa_params_setter(): new_fpa = fpa.FPA() try: @@ -64,12 +60,6 @@ def test_fpa_hyperparams_setter(): assert new_fpa.p == 0.25 -def test_fpa_build(): - new_fpa = fpa.FPA() - - assert new_fpa.built == True - - def test_fpa_global_pollination(): new_fpa = fpa.FPA() @@ -90,38 +80,12 @@ def test_fpa_update(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) - - new_fpa = fpa.FPA() - - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) - - new_fpa._update(search_space.agents, search_space.best_agent, new_function) - - assert search_space.agents[0].position[0] != 0 - - -def test_fpa_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - new_fpa = fpa.FPA() - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_fpa.run(search_space, new_function, pre_evaluate=hook) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_fpa.update(search_space, square) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm fpa failed to converge.' + new_fpa.p = 0.01 + new_fpa.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_goa.py b/tests/opytimizer/optimizers/swarm/test_goa.py index 5d328cf0..45dee65a 100644 --- a/tests/opytimizer/optimizers/swarm/test_goa.py +++ b/tests/opytimizer/optimizers/swarm/test_goa.py @@ -1,22 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import goa from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_goa_hyperparams(): - hyperparams = { +def test_goa_params(): + params = { 'c_min': 0.00001, 'c_max': 1.0, 'f': 0.5, 'l': 1.5 } - new_goa = goa.GOA(hyperparams=hyperparams) + new_goa = goa.GOA(params=params) assert new_goa.c_min == 0.00001 @@ -27,7 +25,7 @@ def test_goa_hyperparams(): assert new_goa.l == 1.5 -def test_goa_hyperparams_setter(): +def test_goa_params_setter(): new_goa = goa.GOA() try: @@ -79,31 +77,21 @@ def test_goa_hyperparams_setter(): assert new_goa.l == 1.5 -def test_goa_build(): +def test_goa_social_force(): new_goa = goa.GOA() - assert new_goa.built == True + r = new_goa._social_force(np.array([1, 1, 1])) + assert r[0] == -0.11117088165514633 -def test_goa_run(): + +def test_goa_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - new_goa = goa.GOA() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_goa.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm goa failed to converge.' + new_goa.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_js.py b/tests/opytimizer/optimizers/swarm/test_js.py index 3ae093d5..d0c12fb3 100644 --- a/tests/opytimizer/optimizers/swarm/test_js.py +++ b/tests/opytimizer/optimizers/swarm/test_js.py @@ -1,21 +1,19 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import js from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_js_hyperparams(): - hyperparams = { +def test_js_params(): + params = { 'eta': 4.0, 'beta': 3.0, 'gamma': 0.1 } - new_js = js.JS(hyperparams=hyperparams) + new_js = js.JS(params=params) assert new_js.eta == 4.0 @@ -24,7 +22,7 @@ def test_js_hyperparams(): assert new_js.gamma == 0.1 -def test_js_hyperparams_setter(): +def test_js_params_setter(): new_js = js.JS() try: @@ -64,55 +62,75 @@ def test_js_hyperparams_setter(): assert new_js.gamma == 0.1 -def test_js_build(): +def test_js_initialize_chaotic_map(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_js = js.JS() + new_js._initialize_chaotic_map(search_space.agents) - assert new_js.built == True +def test_js_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_js_run(): - def square(x): - return np.sum(x**2) + new_js = js.JS() + new_js.create_additional_attrs(search_space) - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_js_ocean_current(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_js = js.JS() + new_js.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + trend = new_js._ocean_current(search_space.agents, search_space.best_agent) - history = new_js.run(search_space, new_function, pre_evaluate=hook) + assert trend[0][0] != 0 - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm js failed to converge.' +def test_js_motion_a(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_js = js.JS() + new_js.create_additional_attrs(search_space) -def test_nbjs_run(): - def square(x): - return np.sum(x**2) + motion = new_js._motion_a(0, 1) - def hook(optimizer, space, function): - return + assert motion[0] != 0 - new_function = function.Function(pointer=square) - new_nbjs = js.NBJS() +def test_js_motion_b(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_js = js.JS() + new_js.create_additional_attrs(search_space) + + motion = new_js._motion_b(search_space.agents[0], search_space.agents[1]) + + assert motion[0][0] != 0 - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - history = new_nbjs.run(search_space, new_function, pre_evaluate=hook) +def test_js_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_js = js.JS() + new_js.create_additional_attrs(search_space) + + new_js.update(search_space, 1, 10) + + +def test_nbjs_motion_a(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_nbjs = js.NBJS() + new_nbjs.create_additional_attrs(search_space) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + motion = new_nbjs._motion_a(0, 1) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm nbjs failed to converge.' + assert motion[0] != 0 diff --git a/tests/opytimizer/optimizers/swarm/test_kh.py b/tests/opytimizer/optimizers/swarm/test_kh.py index aaf5f7cd..8194def7 100644 --- a/tests/opytimizer/optimizers/swarm/test_kh.py +++ b/tests/opytimizer/optimizers/swarm/test_kh.py @@ -1,15 +1,13 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import kh from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_kh_hyperparams(): - hyperparams = { +def test_kh_params(): + params = { 'N_max': 0.01, 'w_n': 0.42, 'NN': 5, @@ -21,7 +19,7 @@ def test_kh_hyperparams(): 'Mu': 0.05 } - new_kh = kh.KH(hyperparams=hyperparams) + new_kh = kh.KH(params=params) assert new_kh.N_max == 0.01 assert new_kh.w_n == 0.42 @@ -34,7 +32,7 @@ def test_kh_hyperparams(): assert new_kh.Mu == 0.05 -def test_kh_hyperparams_setter(): +def test_kh_params_setter(): new_kh = kh.KH() try: @@ -164,35 +162,49 @@ def test_kh_hyperparams_setter(): assert new_kh.Mu == 0.05 -def test_kh_build(): +def test_kh_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) - assert new_kh.built == True + try: + new_kh.motion = 1 + except: + new_kh.motion = np.array([1]) + + assert new_kh.motion == np.array([1]) + + try: + new_kh.foraging = 1 + except: + new_kh.foraging = np.array([1]) + + assert new_kh.foraging == np.array([1]) def test_kh_food_location(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - food = new_kh._food_location(search_space.agents, new_function) + food = new_kh._food_location(search_space.agents, square) assert food.fit >= 0 def test_kh_sensing_distance(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) distance, eucl_distance = new_kh._sensing_distance(search_space.agents, 0) @@ -201,11 +213,11 @@ def test_kh_sensing_distance(): def test_kh_get_neighbours(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) distance, eucl_distance = new_kh._sensing_distance(search_space.agents, 0) @@ -216,11 +228,11 @@ def test_kh_get_neighbours(): def test_kh_local_alpha(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) distance, eucl_distance = new_kh._sensing_distance(search_space.agents, 0) @@ -234,11 +246,11 @@ def test_kh_local_alpha(): def test_kh_target_alpha(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) alpha = new_kh._target_alpha( search_space.agents[0], search_space.agents[-1], search_space.agents[0], 1) @@ -247,11 +259,11 @@ def test_kh_target_alpha(): def test_kh_neighbour_motion(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) motion = np.zeros((5, 2, 1)) @@ -262,11 +274,11 @@ def test_kh_neighbour_motion(): def test_kh_food_beta(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) beta = new_kh._food_beta( search_space.agents[0], search_space.agents[-1], search_space.agents[0], search_space.agents[0], 1) @@ -275,11 +287,11 @@ def test_kh_food_beta(): def test_kh_best_beta(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) beta = new_kh._best_beta( search_space.agents[0], search_space.agents[-1], search_space.agents[0]) @@ -288,11 +300,11 @@ def test_kh_best_beta(): def test_kh_foraging_motion(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) foraging = np.zeros((5, 2, 1)) @@ -303,7 +315,11 @@ def test_kh_foraging_motion(): def test_kh_physical_diffusion(): + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) new_physical = new_kh._physical_diffusion(1, 1, 1, 20) @@ -311,11 +327,11 @@ def test_kh_physical_diffusion(): def test_kh_update_position(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) motion = np.zeros((2, 1)) @@ -328,11 +344,11 @@ def test_kh_update_position(): def test_kh_crossover(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) crossover = new_kh._crossover(search_space.agents, 0) @@ -340,36 +356,25 @@ def test_kh_crossover(): def test_kh_mutation(): - new_kh = kh.KH() + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) mutation = new_kh._mutation(search_space.agents, 0) assert mutation.position.shape == (2, 1) -def test_kh_run(): +def test_kh_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_kh = kh.KH() + new_kh.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_kh.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm kh failed to converge.' + new_kh.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_mfo.py b/tests/opytimizer/optimizers/swarm/test_mfo.py index 13ccbadd..aa262e36 100644 --- a/tests/opytimizer/optimizers/swarm/test_mfo.py +++ b/tests/opytimizer/optimizers/swarm/test_mfo.py @@ -1,24 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import mfo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_mfo_hyperparams(): - hyperparams = { +def test_mfo_params(): + params = { 'b': 1 } - new_mfo = mfo.MFO(hyperparams=hyperparams) + new_mfo = mfo.MFO(params=params) assert new_mfo.b == 1 -def test_mfo_hyperparams_setter(): +def test_mfo_params_setter(): new_mfo = mfo.MFO() try: @@ -34,31 +30,9 @@ def test_mfo_hyperparams_setter(): assert new_mfo.b == 1 -def test_mfo_build(): +def test_mfo_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_mfo = mfo.MFO() - assert new_mfo.built == True - - -def test_mfo_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_mfo = mfo.MFO() - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_mfo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm mfo failed to converge.' + new_mfo.update(search_space, 1, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_mrfo.py b/tests/opytimizer/optimizers/swarm/test_mrfo.py index 950f0d61..2f95a2f8 100644 --- a/tests/opytimizer/optimizers/swarm/test_mrfo.py +++ b/tests/opytimizer/optimizers/swarm/test_mrfo.py @@ -1,24 +1,22 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import mrfo from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_mrfo_hyperparams(): - hyperparams = { +def test_mrfo_params(): + params = { 'S': 2.0 } - new_mrfo = mrfo.MRFO(hyperparams=hyperparams) + new_mrfo = mrfo.MRFO(params=params) assert new_mrfo.S == 2.0 -def test_mrfo_hyperparams_setter(): +def test_mrfo_params_setter(): new_mrfo = mrfo.MRFO() try: @@ -34,20 +32,14 @@ def test_mrfo_hyperparams_setter(): assert new_mrfo.S == 2.0 -def test_mrfo_build(): - new_mrfo = mrfo.MRFO() - - assert new_mrfo.built == True - - def test_mrfo_cyclone_foraging(): new_mrfo = mrfo.MRFO() - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - cyclone = new_mrfo._cyclone_foraging(search_space.agents, search_space.best_agent.position, 1, 1, 20) + cyclone = new_mrfo._cyclone_foraging( + search_space.agents, search_space.best_agent.position, 1, 1, 20) assert cyclone[0] != 0 @@ -55,16 +47,16 @@ def test_mrfo_cyclone_foraging(): def test_mrfo_chain_foraging(): new_mrfo = mrfo.MRFO() - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - chain = new_mrfo._chain_foraging(search_space.agents, search_space.best_agent.position, 1) + chain = new_mrfo._chain_foraging( + search_space.agents, search_space.best_agent.position, 1) assert chain[0] != 0 -def test_mrfo__somersault_foraging(): +def test_mrfo_somersault_foraging(): new_mrfo = mrfo.MRFO() somersault = new_mrfo._somersault_foraging(1, 1) @@ -72,25 +64,13 @@ def test_mrfo__somersault_foraging(): assert somersault != 0 -def test_mrfo_run(): +def test_mrfo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=5, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_mrfo = mrfo.MRFO() - search_space = search.SearchSpace(n_agents=10, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_mrfo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm mrfo failed to converge.' + new_mrfo.update(search_space, square, 1, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_pio.py b/tests/opytimizer/optimizers/swarm/test_pio.py index bc486cdf..06c47c19 100644 --- a/tests/opytimizer/optimizers/swarm/test_pio.py +++ b/tests/opytimizer/optimizers/swarm/test_pio.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import pio from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_pio_hyperparams(): - hyperparams = { +def test_pio_params(): + params = { 'n_c1': 150, 'n_c2': 200, 'R': 0.2 } - new_pio = pio.PIO(hyperparams=hyperparams) + new_pio = pio.PIO(params=params) assert new_pio.n_c1 == 150 @@ -24,7 +20,7 @@ def test_pio_hyperparams(): assert new_pio.R == 0.2 -def test_pio_hyperparams_setter(): +def test_pio_params_setter(): new_pio = pio.PIO() try: @@ -64,31 +60,63 @@ def test_pio_hyperparams_setter(): assert new_pio.R == 0.2 -def test_pio_build(): +def test_pio_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_pio = pio.PIO() + new_pio.create_additional_attrs(search_space) + + try: + new_pio.n_p = 'a' + except: + new_pio.n_p = 1 + + assert new_pio.n_p == 1 + + try: + new_pio.n_p = -1 + except: + new_pio.n_p = 1 - assert new_pio.built == True + assert new_pio.n_p == 1 + try: + new_pio.velocity = 1 + except: + new_pio.velocity = np.array([1]) -def test_pio_run(): - def square(x): - return np.sum(x**2) + assert new_pio.velocity == np.array([1]) - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_pio_calculate_center(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_pio = pio.PIO() + new_pio.create_additional_attrs(search_space) + + center = new_pio._calculate_center(search_space.agents) - search_space = search.SearchSpace(n_agents=10, n_iterations=175, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - history = new_pio.run(search_space, new_function, pre_evaluate=hook) +def test_pio_update_center_position(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_pio = pio.PIO() + new_pio.create_additional_attrs(search_space) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + center = new_pio._calculate_center(search_space.agents) + position = new_pio._update_center_position( + search_space.agents[0].position, center) + + +def test_pio_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_pio = pio.PIO() + new_pio.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm pio failed to converge.' + new_pio.update(search_space, 1) + new_pio.update(search_space, 175) diff --git a/tests/opytimizer/optimizers/swarm/test_pso.py b/tests/opytimizer/optimizers/swarm/test_pso.py index bcf02be9..270da3cd 100644 --- a/tests/opytimizer/optimizers/swarm/test_pso.py +++ b/tests/opytimizer/optimizers/swarm/test_pso.py @@ -1,23 +1,17 @@ -import sys - import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import pso from opytimizer.spaces import search -from opytimizer.utils import constants - -np.random.seed(0) -def test_pso_hyperparams(): - hyperparams = { +def test_pso_params(): + params = { 'w': 2, 'c1': 1.7, 'c2': 1.7 } - new_pso = pso.PSO(hyperparams=hyperparams) + new_pso = pso.PSO(params=params) assert new_pso.w == 2 @@ -26,7 +20,7 @@ def test_pso_hyperparams(): assert new_pso.c2 == 1.7 -def test_pso_hyperparams_setter(): +def test_pso_params_setter(): new_pso = pso.PSO() try: @@ -66,474 +60,155 @@ def test_pso_hyperparams_setter(): assert new_pso.c2 == 1.5 -def test_pso_build(): - new_pso = pso.PSO() - - assert new_pso.built == True +def test_pso_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - -def test_pso_update_velocity(): new_pso = pso.PSO() + new_pso.create_additional_attrs(search_space) - velocity = new_pso._update_velocity(1, 1, 1, 1) - - assert velocity != 0 - + try: + new_pso.local_position = 1 + except: + new_pso.local_position = np.array([1]) -def test_pso_update_position(): - new_pso = pso.PSO() + assert new_pso.local_position == np.array([1]) - position = new_pso._update_position(1, 1) + try: + new_pso.velocity = 1 + except: + new_pso.velocity = np.array([1]) - assert position == 2 + assert new_pso.velocity == np.array([1]) def test_pso_evaluate(): def square(x): return np.sum(x**2) - new_function = function.Function(pointer=square) - - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_pso = pso.PSO() + new_pso.create_additional_attrs(search_space) - local_position = np.zeros((2, 2, 1)) + new_pso.evaluate(search_space, square) - new_pso._evaluate(search_space, new_function, local_position) - assert search_space.best_agent.fit < sys.float_info.max - - -def test_pso_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) +def test_pso_update(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_pso = pso.PSO() + new_pso.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_pso.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - assert len(history.local) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm pso failed to converge.' - - -def test_aiwpso_hyperparams(): - hyperparams = { - 'w_min': 1, - 'w_max': 3, - } - - new_aiwpso = pso.AIWPSO(hyperparams=hyperparams) - - assert new_aiwpso.w_min == 1 - - assert new_aiwpso.w_max == 3 - - -def test_aiwpso_hyperparams_setter(): - new_aiwpso = pso.AIWPSO() - - try: - new_aiwpso.w_min = 'a' - except: - new_aiwpso.w_min = 0.5 - - try: - new_aiwpso.w_min = -1 - except: - new_aiwpso.w_min = 0.5 - - assert new_aiwpso.w_min == 0.5 - - try: - new_aiwpso.w_max = 'b' - except: - new_aiwpso.w_max = 1.0 - - try: - new_aiwpso.w_max = -1 - except: - new_aiwpso.w_max = 1.0 - - try: - new_aiwpso.w_max = 0 - except: - new_aiwpso.w_max = 1.0 - - assert new_aiwpso.w_max == 1.0 - - -def test_aiwpso_rebuild(): - new_aiwpso = pso.AIWPSO() - - assert new_aiwpso.built == True + new_pso.update(search_space) def test_aiwpso_compute_success(): - n_agents = 2 - - search_space = search.SearchSpace(n_agents=n_agents, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_aiwpso = pso.AIWPSO() + new_aiwpso.create_additional_attrs(search_space) - new_fitness = np.zeros(n_agents) + new_aiwpso.fitness = [1, 1] + new_aiwpso._compute_success(search_space.agents) - new_aiwpso._compute_success(search_space.agents, new_fitness) - assert new_aiwpso.w != 0 - - -def test_aiwpso_evaluate(): - def square(x): - return np.sum(x**2) - - new_function = function.Function(pointer=square) - - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) +def test_aiwpso_update(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_aiwpso = pso.AIWPSO() + new_aiwpso.create_additional_attrs(search_space) - local_position = np.zeros((2, 2, 1)) - - new_aiwpso._evaluate(search_space, new_function, local_position) - - assert search_space.best_agent.fit < sys.float_info.max - - -def test_aiwpso_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_aiwpso = pso.AIWPSO() - - search_space = search.SearchSpace(n_agents=10, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_aiwpso.run(search_space, new_function, - pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - assert len(history.local) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm aiwpso failed to converge.' - - -def test_rpso_hyperparams(): - hyperparams = { - 'c1': 1.7, - 'c2': 1.7 - } - - new_rpso = pso.RPSO(hyperparams=hyperparams) - - assert new_rpso.c1 == 1.7 - assert new_rpso.c2 == 1.7 - - -def test_rpso_hyperparams_setter(): - new_rpso = pso.RPSO() - - try: - new_rpso.c1 = 'a' - except: - new_rpso.c1 = 1.5 - - try: - new_rpso.c1 = -1 - except: - new_rpso.c1 = 1.5 - - assert new_rpso.c1 == 1.5 - - try: - new_rpso.c2 = 'b' - except: - new_rpso.c2 = 1.5 - - try: - new_rpso.c2 = -1 - except: - new_rpso.c2 = 1.5 - - assert new_rpso.c2 == 1.5 - + new_aiwpso.update(search_space, 0) -def test_rpso_build(): - new_rpso = pso.RPSO() - - assert new_rpso.built == True - - -def test_rpso_update_velocity(): - new_rpso = pso.RPSO() - - velocity = new_rpso._update_velocity(1, 1, 1, 10, 1, 1) - - assert velocity != 0 +def test_rpso_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_rpso_update_position(): new_rpso = pso.RPSO() - - position = new_rpso._update_position(1, 1) - - assert position == 2 - - -def test_rpso_evaluate(): - def square(x): - return np.sum(x**2) - - new_function = function.Function(pointer=square) - - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - new_rpso = pso.RPSO() - - local_position = np.zeros((2, 2, 1)) - - new_rpso._evaluate(search_space, new_function, local_position) - - assert search_space.best_agent.fit < sys.float_info.max - - -def test_rpso_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) - - new_rpso = pso.RPSO() - - search_space = search.SearchSpace(n_agents=5, n_iterations=20, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_rpso.run(search_space, new_function, - pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - assert len(history.local) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm rpso failed to converge.' - - -def test_savpso_hyperparams(): - hyperparams = { - 'w': 2, - 'c1': 1.7, - 'c2': 1.7 - } - - new_savpso = pso.SAVPSO(hyperparams=hyperparams) - - assert new_savpso.w == 2 - - assert new_savpso.c1 == 1.7 - - assert new_savpso.c2 == 1.7 - - -def test_savpso_hyperparams_setter(): - new_savpso = pso.SAVPSO() - - try: - new_savpso.w = 'a' - except: - new_savpso.w = 1 + new_rpso.create_additional_attrs(search_space) try: - new_savpso.w = -1 + new_rpso.local_position = 1 except: - new_savpso.w = 1 - - assert new_savpso.w == 1 + new_rpso.local_position = np.array([1]) - try: - new_savpso.c1 = 'b' - except: - new_savpso.c1 = 1.5 + assert new_rpso.local_position == np.array([1]) try: - new_savpso.c1 = -1 + new_rpso.velocity = 1 except: - new_savpso.c1 = 1.5 + new_rpso.velocity = np.array([1]) - assert new_savpso.c1 == 1.5 + assert new_rpso.velocity == np.array([1]) try: - new_savpso.c2 = 'c' + new_rpso.mass = 1 except: - new_savpso.c2 = 1.5 + new_rpso.mass = np.array([1]) - try: - new_savpso.c2 = -1 - except: - new_savpso.c2 = 1.5 + assert new_rpso.mass == np.array([1]) - assert new_savpso.c2 == 1.5 - - -def test_savpso_update_velocity(): - new_savpso = pso.SAVPSO() - velocity = new_savpso._update_velocity(1, 1, 1, 1, 1) - - assert velocity == 0 +def test_rpso_update(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_rpso = pso.RPSO() + new_rpso.create_additional_attrs(search_space) -def test_savpso_run(): - def square(x): - return np.sum(x**2) + new_rpso.update(search_space) - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_savpso_update(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_savpso = pso.SAVPSO() + new_savpso.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=5, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_savpso.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - assert len(history.local) > 0 + new_savpso.update(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm pso failed to converge.' +def test_vpso_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_vpso_hyperparams(): - hyperparams = { - 'w': 2, - 'c1': 1.7, - 'c2': 1.7 - } - - new_vpso = pso.VPSO(hyperparams=hyperparams) - - assert new_vpso.w == 2 - - assert new_vpso.c1 == 1.7 - - assert new_vpso.c2 == 1.7 - - -def test_vpso_hyperparams_setter(): new_vpso = pso.VPSO() + new_vpso.create_additional_attrs(search_space) try: - new_vpso.w = 'a' + new_vpso.local_position = 1 except: - new_vpso.w = 1 + new_vpso.local_position = np.array([1]) - try: - new_vpso.w = -1 - except: - new_vpso.w = 1 - - assert new_vpso.w == 1 - - try: - new_vpso.c1 = 'b' - except: - new_vpso.c1 = 1.5 + assert new_vpso.local_position == np.array([1]) try: - new_vpso.c1 = -1 + new_vpso.velocity = 1 except: - new_vpso.c1 = 1.5 - - assert new_vpso.c1 == 1.5 + new_vpso.velocity = np.array([1]) - try: - new_vpso.c2 = 'c' - except: - new_vpso.c2 = 1.5 + assert new_vpso.velocity == np.array([1]) try: - new_vpso.c2 = -1 + new_vpso.v_velocity = 1 except: - new_vpso.c2 = 1.5 - - assert new_vpso.c2 == 1.5 - - -def test_vpso_update_velocity(): - new_vpso = pso.VPSO() - - velocity, v_velocity = new_vpso._update_velocity(1, 1, 1, 1, 1) + new_vpso.v_velocity = np.array([1]) - assert velocity == 0.7 - assert v_velocity == 0 + assert new_vpso.v_velocity == np.array([1]) -def test_vpso_update_position(): - new_vpso = pso.VPSO() - - position = new_vpso._update_position(1, 1, 1) - - assert position == 2 - - -def test_vpso_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) +def test_vpso_update(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_vpso = pso.VPSO() + new_vpso.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=5, n_iterations=10, - n_variables=2, lower_bound=[0, 0], - upper_bound=[1, 1]) - - history = new_vpso.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - assert len(history.local) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm pso failed to converge.' + new_vpso.update(search_space) diff --git a/tests/opytimizer/optimizers/swarm/test_sbo.py b/tests/opytimizer/optimizers/swarm/test_sbo.py index 3f715647..d1bc0e15 100644 --- a/tests/opytimizer/optimizers/swarm/test_sbo.py +++ b/tests/opytimizer/optimizers/swarm/test_sbo.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import sbo from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_sbo_hyperparams(): - hyperparams = { +def test_sbo_params(): + params = { 'alpha': 0.9, 'p_mutation': 0.05, 'z': 0.02 } - new_sbo = sbo.SBO(hyperparams=hyperparams) + new_sbo = sbo.SBO(params=params) assert new_sbo.alpha == 0.9 @@ -24,7 +20,7 @@ def test_sbo_hyperparams(): assert new_sbo.z == 0.02 -def test_sbo_hyperparams_setter(): +def test_sbo_params_setter(): new_sbo = sbo.SBO() try: @@ -64,49 +60,30 @@ def test_sbo_hyperparams_setter(): assert new_sbo.z == 0.02 -def test_sbo_build(): - new_sbo = sbo.SBO() - - assert new_sbo.built == True - - -def test_sbo_update(): - def square(x): - return np.sum(x**2) - - new_function = function.Function(pointer=square) +def test_sbo_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_sbo = sbo.SBO() + new_sbo.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=2, n_iterations=10, - n_variables=2, lower_bound=[1, 1], - upper_bound=[10, 10]) - - new_sbo._update(search_space.agents, - search_space.best_agent, new_function, np.array([0.5, 0.5])) + try: + new_sbo.sigma = 1 + except: + new_sbo.sigma = [] - assert search_space.agents[0].position[0] != 0 + assert new_sbo.sigma == [] -def test_sbo_run(): +def test_sbo_update(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=2, n_variables=2, + lower_bound=[1, 1], upper_bound=[10, 10]) new_sbo = sbo.SBO() + new_sbo.create_additional_attrs(search_space) + new_sbo.p_mutation = 1 - search_space = search.SearchSpace(n_agents=10, n_iterations=30, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_sbo.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm sbo failed to converge.' + new_sbo.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_sca.py b/tests/opytimizer/optimizers/swarm/test_sca.py index 5606672b..b27c392e 100644 --- a/tests/opytimizer/optimizers/swarm/test_sca.py +++ b/tests/opytimizer/optimizers/swarm/test_sca.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import sca from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_sca_hyperparams(): - hyperparams = { +def test_sca_params(): + params = { 'r_min': 0, 'r_max': 2, 'a': 3, } - new_sca = sca.SCA(hyperparams=hyperparams) + new_sca = sca.SCA(params=params) assert new_sca.r_min == 0 @@ -24,7 +20,7 @@ def test_sca_hyperparams(): assert new_sca.a == 3 -def test_sca_hyperparams_setter(): +def test_sca_params_setter(): new_sca = sca.SCA() try: @@ -69,12 +65,6 @@ def test_sca_hyperparams_setter(): assert new_sca.a == 0.5 -def test_sca_build(): - new_sca = sca.SCA() - - assert new_sca.built == True - - def test_sca_update_position(): new_sca = sca.SCA() @@ -83,31 +73,10 @@ def test_sca_update_position(): assert position > 0 -def test_sca_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return +def test_sca_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) - new_function = function.Function(pointer=square) - - hyperparams = { - 'r_min': 0, - 'r_max': 2, - 'a': 3 - } - - new_sca = sca.SCA(hyperparams=hyperparams) - - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_sca.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_sca = sca.SCA() - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm sca failed to converge.' + new_sca.update(search_space, 1, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_sfo.py b/tests/opytimizer/optimizers/swarm/test_sfo.py index ccdeca7c..67469ba2 100644 --- a/tests/opytimizer/optimizers/swarm/test_sfo.py +++ b/tests/opytimizer/optimizers/swarm/test_sfo.py @@ -1,21 +1,19 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import sfo from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_sfo_hyperparams(): - hyperparams = { +def test_sfo_params(): + params = { 'PP': 0.1, 'A': 4, 'e': 0.001 } - new_sfo = sfo.SFO(hyperparams=hyperparams) + new_sfo = sfo.SFO(params=params) assert new_sfo.PP == 0.1 @@ -24,7 +22,7 @@ def test_sfo_hyperparams(): assert new_sfo.e == 0.001 -def test_sfo_hyperparams_setter(): +def test_sfo_params_setter(): new_sfo = sfo.SFO() try: @@ -64,31 +62,69 @@ def test_sfo_hyperparams_setter(): assert new_sfo.e == 0.001 -def test_sfo_build(): +def test_sfo_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=3, + lower_bound=[0, 0, 0], upper_bound=[10, 10, 10]) + new_sfo = sfo.SFO() + new_sfo.create_additional_attrs(search_space) + + try: + new_sfo.sardines = 1 + except: + new_sfo.sardines = [] - assert new_sfo.built == True + assert new_sfo.sardines == [] -def test_sfo_run(): - def square(x): - return np.sum(x**2) +def test_sfo_generate_random_agent(): + search_space = search.SearchSpace(n_agents=10, n_variables=3, + lower_bound=[0, 0, 0], upper_bound=[10, 10, 10]) - def hook(optimizer, space, function): - return + new_sfo = sfo.SFO() + new_sfo.create_additional_attrs(search_space) + + agent = new_sfo._generate_random_agent(search_space.agents[0]) + + assert type(agent).__name__ == 'Agent' - new_function = function.Function(pointer=square) + +def test_sfo_calculate_lambda_i(): + search_space = search.SearchSpace(n_agents=10, n_variables=3, + lower_bound=[0, 0, 0], upper_bound=[10, 10, 10]) new_sfo = sfo.SFO() + new_sfo.create_additional_attrs(search_space) + + lambda_i = new_sfo._calculate_lambda_i(10, 10) + + assert lambda_i[0] != 0 - search_space = search.SearchSpace(n_agents=10, n_iterations=500, - n_variables=3, lower_bound=[0, 0, 0], - upper_bound=[10, 10, 10]) - history = new_sfo.run(search_space, new_function, pre_evaluate=hook) +def test_sfo_update_sailfish(): + search_space = search.SearchSpace(n_agents=10, n_variables=3, + lower_bound=[0, 0, 0], upper_bound=[10, 10, 10]) + + new_sfo = sfo.SFO() + new_sfo.create_additional_attrs(search_space) + + position = new_sfo._update_sailfish( + search_space.agents[0], search_space.best_agent, search_space.agents[0], 0.5) + + assert position[0][0] != 0 + + +def test_sfo_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=5, + lower_bound=[0, 0, 0, 0, 0], upper_bound=[10, 10, 10, 10, 10]) + + new_sfo = sfo.SFO() + new_sfo.create_additional_attrs(search_space) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 + new_sfo.update(search_space, square, 1) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm sfo failed to converge.' + new_sfo.A = 1 + new_sfo.update(search_space, square, 350) diff --git a/tests/opytimizer/optimizers/swarm/test_sos.py b/tests/opytimizer/optimizers/swarm/test_sos.py index 00fa8856..f5a4d772 100644 --- a/tests/opytimizer/optimizers/swarm/test_sos.py +++ b/tests/opytimizer/optimizers/swarm/test_sos.py @@ -1,38 +1,54 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import sos from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) +def test_sos_mutualism(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) -def test_sos_build(): new_sos = sos.SOS() - assert new_sos.built == True + new_sos._mutualism( + search_space.agents[0], search_space.agents[1], search_space.best_agent, square) -def test_sos_run(): +def test_sos_commensalism(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_sos = sos.SOS() + + new_sos._commensalism( + search_space.agents[0], search_space.agents[1], search_space.best_agent, square) - new_function = function.Function(pointer=square) + +def test_sos_parasitism(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_sos = sos.SOS() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_sos._parasitism(search_space.agents[0], search_space.agents[1], square) - history = new_sos.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_sos_update(): + def square(x): + return np.sum(x**2) + + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_sos = sos.SOS() - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm sos failed to converge.' + new_sos.update(search_space, square) diff --git a/tests/opytimizer/optimizers/swarm/test_ssa.py b/tests/opytimizer/optimizers/swarm/test_ssa.py index 9b5957de..bcdfffdd 100644 --- a/tests/opytimizer/optimizers/swarm/test_ssa.py +++ b/tests/opytimizer/optimizers/swarm/test_ssa.py @@ -1,38 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import ssa from opytimizer.spaces import search -from opytimizer.utils import constants np.random.seed(0) -def test_ssa_build(): - new_ssa = ssa.SSA() - - assert new_ssa.built == True - - -def test_ssa_run(): - def square(x): - return np.sum(x**2) - - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) +def test_ssa_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_ssa = ssa.SSA() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_ssa.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ssa failed to converge.' + new_ssa.update(search_space, 1, 10) + new_ssa.update(search_space, 5, 10) + new_ssa.update(search_space, 10, 10) diff --git a/tests/opytimizer/optimizers/swarm/test_sso.py b/tests/opytimizer/optimizers/swarm/test_sso.py index 1ccab89b..2bb352a9 100644 --- a/tests/opytimizer/optimizers/swarm/test_sso.py +++ b/tests/opytimizer/optimizers/swarm/test_sso.py @@ -1,21 +1,17 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import sso from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_sso_hyperparams(): - hyperparams = { +def test_sso_params(): + params = { 'C_w': 0.1, 'C_p': 0.4, 'C_g': 0.9 } - new_sso = sso.SSO(hyperparams=hyperparams) + new_sso = sso.SSO(params=params) assert new_sso.C_w == 0.1 @@ -24,7 +20,7 @@ def test_sso_hyperparams(): assert new_sso.C_g == 0.9 -def test_sso_hyperparams_setter(): +def test_sso_params_setter(): new_sso = sso.SSO() try: @@ -64,31 +60,39 @@ def test_sso_hyperparams_setter(): assert new_sso.C_g == 0.9 -def test_sso_build(): +def test_sso_create_additional_attrs(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_sso = sso.SSO() + new_sso.create_additional_attrs(search_space) + + try: + new_sso.local_position = 1 + except: + new_sso.local_position = np.array([1]) - assert new_sso.built == True + assert new_sso.local_position == np.array([1]) -def test_sso_run(): +def test_sso_evaluate(): def square(x): return np.sum(x**2) - def hook(optimizer, space, function): - return - - new_function = function.Function(pointer=square) + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_sso = sso.SSO() + new_sso.create_additional_attrs(search_space) - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) + new_sso.evaluate(search_space, square) - history = new_sso.run(search_space, new_function, pre_evaluate=hook) - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 +def test_sso_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + + new_sso = sso.SSO() + new_sso.create_additional_attrs(search_space) - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm sso failed to converge.' + new_sso.update(search_space) diff --git a/tests/opytimizer/optimizers/swarm/test_woa.py b/tests/opytimizer/optimizers/swarm/test_woa.py index 8b14619b..9ccae04f 100644 --- a/tests/opytimizer/optimizers/swarm/test_woa.py +++ b/tests/opytimizer/optimizers/swarm/test_woa.py @@ -1,24 +1,20 @@ import numpy as np -from opytimizer.core import function from opytimizer.optimizers.swarm import woa from opytimizer.spaces import search -from opytimizer.utils import constants -np.random.seed(0) - -def test_woa_hyperparams(): - hyperparams = { +def test_woa_params(): + params = { 'b': 1 } - new_woa = woa.WOA(hyperparams=hyperparams) + new_woa = woa.WOA(params=params) assert new_woa.b == 1 -def test_woa_hyperparams_setter(): +def test_woa_params_setter(): new_woa = woa.WOA() try: @@ -27,31 +23,20 @@ def test_woa_hyperparams_setter(): new_woa.b = 1 -def test_woa_build(): - new_woa = woa.WOA() - - assert new_woa.built == True +def test_woa_generate_random_agent(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) + new_woa = woa.WOA() + agent = new_woa._generate_random_agent(search_space.agents[0]) -def test_woa_run(): - def square(x): - return np.sum(x**2) + assert type(agent).__name__ == 'Agent' - def hook(optimizer, space, function): - return - new_function = function.Function(pointer=square) +def test_woa_update(): + search_space = search.SearchSpace(n_agents=10, n_variables=2, + lower_bound=[0, 0], upper_bound=[10, 10]) new_woa = woa.WOA() - search_space = search.SearchSpace(n_agents=10, n_iterations=100, - n_variables=2, lower_bound=[0, 0], - upper_bound=[10, 10]) - - history = new_woa.run(search_space, new_function, pre_evaluate=hook) - - assert len(history.agents) > 0 - assert len(history.best_agent) > 0 - - best_fitness = history.best_agent[-1][1] - assert best_fitness <= constants.TEST_EPSILON, 'The algorithm woa failed to converge.' + new_woa.update(search_space, 1, 10) diff --git a/tests/opytimizer/spaces/test_boolean.py b/tests/opytimizer/spaces/test_boolean.py index 3ab5da75..f4544280 100644 --- a/tests/opytimizer/spaces/test_boolean.py +++ b/tests/opytimizer/spaces/test_boolean.py @@ -2,16 +2,16 @@ def test_boolean_initialize_agents(): - new_boolean_space = boolean.BooleanSpace() + new_boolean_space = boolean.BooleanSpace(1, 1) assert new_boolean_space.agents[0].position[0][0] == 0 or new_boolean_space.agents[0].position[0][0] == 1 -def test_boolean_clip_limits(): - new_boolean_space = boolean.BooleanSpace() +def test_boolean_clip_by_bound(): + new_boolean_space = boolean.BooleanSpace(1, 1) new_boolean_space.agents[0].position[0][0] = 20 - new_boolean_space.clip_limits() + new_boolean_space.clip_by_bound() assert new_boolean_space.agents[0].position[0][0] == 1 diff --git a/tests/opytimizer/spaces/test_grid.py b/tests/opytimizer/spaces/test_grid.py index c8593bc1..9fb5ded4 100644 --- a/tests/opytimizer/spaces/test_grid.py +++ b/tests/opytimizer/spaces/test_grid.py @@ -4,48 +4,55 @@ def test_grid_space_step(): - new_grid_space = grid.GridSpace() + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) - assert new_grid_space.step == (0.1,) + assert new_grid_space.step == 0.1 def test_grid_space_step_setter(): - new_grid_space = grid.GridSpace() + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) try: new_grid_space.step = 'a' except: - new_grid_space.step = (0.1,) + new_grid_space.step = np.array([0.1]) - assert new_grid_space.step == (0.1,) + assert new_grid_space.step == 0.1 + + try: + new_grid_space.step = np.array([0.1, 0.1]) + except: + new_grid_space.step = np.array([0.1]) + + assert new_grid_space.step == 0.1 def test_grid_space_grid(): - new_grid_space = grid.GridSpace() + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) assert len(new_grid_space.grid) == 11 def test_grid_space_terminals_setter(): try: - new_grid_space = grid.GridSpace() + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) new_grid_space.grid = 'a' except: - new_grid_space = grid.GridSpace() - new_grid_space.grid = np.array((1, 1)) + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) + new_grid_space.grid = np.array([1, 1]) assert len(new_grid_space.grid) == 2 def test_grid_create_grid(): - new_grid_space = grid.GridSpace() + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) - new_grid_space._create_grid((0.1, 0.1), (1, 1), (2, 2)) + new_grid_space._create_grid() - assert len(new_grid_space.grid) == 121 + assert len(new_grid_space.grid) == 11 def test_grid_initialize_agents(): - new_grid_space = grid.GridSpace() + new_grid_space = grid.GridSpace(1, 0.1, 0, 1) assert new_grid_space.agents[0].position[0] != 1 diff --git a/tests/opytimizer/spaces/test_hyper_complex.py b/tests/opytimizer/spaces/test_hyper_complex.py index 6effbc0f..21f2a179 100644 --- a/tests/opytimizer/spaces/test_hyper_complex.py +++ b/tests/opytimizer/spaces/test_hyper_complex.py @@ -2,16 +2,16 @@ def test_hyper_complex_initialize_agents(): - new_hyper_complex_space = hyper_complex.HyperComplexSpace() + new_hyper_complex_space = hyper_complex.HyperComplexSpace(1, 1, 1) assert new_hyper_complex_space.agents[0].position[0][0] > 0 -def test_hyper_complex_clip_limits(): - new_hyper_complex_space = hyper_complex.HyperComplexSpace() +def test_hyper_complex_clip_by_bound(): + new_hyper_complex_space = hyper_complex.HyperComplexSpace(1, 1, 1) new_hyper_complex_space.agents[0].position[0][0] = 20 - new_hyper_complex_space.clip_limits() + new_hyper_complex_space.clip_by_bound() assert new_hyper_complex_space.agents[0].position[0][0] != 20 diff --git a/tests/opytimizer/spaces/test_search.py b/tests/opytimizer/spaces/test_search.py index be427b98..8c603030 100644 --- a/tests/opytimizer/spaces/test_search.py +++ b/tests/opytimizer/spaces/test_search.py @@ -2,16 +2,16 @@ def test_search_initialize_agents(): - new_search_space = search.SearchSpace() + new_search_space = search.SearchSpace(1, 1, 0, 1) assert new_search_space.agents[0].position[0] != 0 -def test_search_clip_limits(): - new_search_space = search.SearchSpace() +def test_search_clip_by_bound(): + new_search_space = search.SearchSpace(1, 1, 0, 1) new_search_space.agents[0].position[0] = 20 - new_search_space.clip_limits() + new_search_space.clip_by_bound() assert new_search_space.agents[0].position[0] != 20 diff --git a/tests/opytimizer/spaces/test_tree.py b/tests/opytimizer/spaces/test_tree.py index c91084a9..e49eb18d 100644 --- a/tests/opytimizer/spaces/test_tree.py +++ b/tests/opytimizer/spaces/test_tree.py @@ -2,166 +2,134 @@ from opytimizer.spaces import tree -def test_tree_space_n_trees(): - new_tree_space = tree.TreeSpace(n_trees=1) - - assert new_tree_space.n_trees == 1 - - -def test_tree_space_n_trees_setter(): - try: - new_tree_space = tree.TreeSpace(n_trees=0.0) - except: - new_tree_space = tree.TreeSpace(n_trees=1) - - try: - new_tree_space = tree.TreeSpace(n_trees=0) - except: - new_tree_space = tree.TreeSpace(n_trees=1) - - assert new_tree_space.n_trees == 1 - - def test_tree_space_n_terminals(): - new_tree_space = tree.TreeSpace(n_terminals=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1) assert new_tree_space.n_terminals == 1 def test_tree_space_n_terminals_setter(): try: - new_tree_space = tree.TreeSpace(n_terminals=0.0) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, n_terminals=0.0) except: - new_tree_space = tree.TreeSpace(n_terminals=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, n_terminals=1) try: - new_tree_space = tree.TreeSpace(n_terminals=0) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, n_terminals=0) except: - new_tree_space = tree.TreeSpace(n_terminals=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, n_terminals=1) assert new_tree_space.n_terminals == 1 def test_tree_space_min_depth(): - new_tree_space = tree.TreeSpace(min_depth=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1) assert new_tree_space.min_depth == 1 def test_tree_space_min_depth_setter(): try: - new_tree_space = tree.TreeSpace(min_depth=0.0) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, min_depth=0.0) except: - new_tree_space = tree.TreeSpace(min_depth=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, min_depth=1) try: - new_tree_space = tree.TreeSpace(min_depth=0) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, min_depth=0) except: - new_tree_space = tree.TreeSpace(min_depth=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, min_depth=1) assert new_tree_space.min_depth == 1 def test_tree_space_max_depth(): - new_tree_space = tree.TreeSpace(max_depth=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, max_depth=1) assert new_tree_space.max_depth == 1 def test_tree_space_max_depth_setter(): try: - new_tree_space = tree.TreeSpace(max_depth=0.0) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, max_depth=0.0) except: - new_tree_space = tree.TreeSpace(max_depth=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, max_depth=1) try: - new_tree_space = tree.TreeSpace(max_depth=0) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, max_depth=0) except: - new_tree_space = tree.TreeSpace(max_depth=1) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, max_depth=1) assert new_tree_space.max_depth == 1 def test_tree_space_functions(): - new_tree_space = tree.TreeSpace(functions=['SUM']) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, functions=['SUM']) assert len(new_tree_space.functions) == 1 def test_tree_space_functions_setter(): try: - new_tree_space = tree.TreeSpace(functions='a') + new_tree_space = tree.TreeSpace(1, 1, 0, 1, functions='a') except: - new_tree_space = tree.TreeSpace(functions=['SUM']) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, functions=['SUM']) assert len(new_tree_space.functions) == 1 def test_tree_space_terminals(): - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) assert len(new_tree_space.terminals) == 1 def test_tree_space_terminals_setter(): try: - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) new_tree_space.terminals = 'a' except: - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) new_tree_space.terminals = [] assert len(new_tree_space.terminals) == 0 def test_tree_space_trees(): - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) assert len(new_tree_space.trees) == 1 def test_tree_space_trees_setter(): try: - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) new_tree_space.trees = 'a' except: - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) new_tree_space.trees = [] assert len(new_tree_space.trees) == 0 def test_tree_space_best_tree(): - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) assert isinstance(new_tree_space.best_tree, node.Node) def test_tree_space_best_tree_setter(): try: - new_tree_space = tree.TreeSpace() + new_tree_space = tree.TreeSpace(1, 1, 0, 1) new_tree_space.best_tree = 'a' except: - new_tree_space = tree.TreeSpace() - new_tree_space.best_tree = node.Node(name='0', node_type='FUNCTION') + new_tree_space = tree.TreeSpace(1, 1, 0, 1) + new_tree_space.best_tree = node.Node(name='0', category='FUNCTION') assert isinstance(new_tree_space.best_tree, node.Node) -def test_tree_space_initialize_agents(): - new_tree_space = tree.TreeSpace() - - assert new_tree_space.agents[0].position[0] != 0 - - -def test_tree_space_initialize_terminals(): - new_tree_space = tree.TreeSpace() - - assert new_tree_space.terminals[0].position[0] != 0 - - def test_tree_space_create_terminals(): - new_tree_space = tree.TreeSpace(n_terminals=2) + new_tree_space = tree.TreeSpace(1, 1, 0, 1, n_terminals=2) new_tree_space._create_terminals() @@ -169,15 +137,28 @@ def test_tree_space_create_terminals(): def test_tree_space_create_trees(): - new_tree_space = tree.TreeSpace(n_trees=2) + new_tree_space = tree.TreeSpace(2, 1, 0, 1) new_tree_space._create_trees() assert len(new_tree_space.trees) == 2 +def test_tree_space_initialize_agents(): + new_tree_space = tree.TreeSpace(1, 1, 0, 1) + + assert new_tree_space.agents[0].position[0] != 0 + + +def test_tree_space_initialize_terminals(): + new_tree_space = tree.TreeSpace(1, 1, 0, 1) + + assert new_tree_space.terminals[0].position[0] != 0 + + def test_tree_space_grow(): - new_tree_space = tree.TreeSpace(min_depth=1, max_depth=5) + new_tree_space = tree.TreeSpace( + 1, 1, 0, 1, min_depth=1, max_depth=5, functions=['SUM', 'SUB', 'MUL', 'DIV']) new_tree = new_tree_space.grow( new_tree_space.min_depth, new_tree_space.max_depth) diff --git a/tests/opytimizer/test_opytimizer.py b/tests/opytimizer/test_opytimizer.py index 9f891623..7e93d8e2 100644 --- a/tests/opytimizer/test_opytimizer.py +++ b/tests/opytimizer/test_opytimizer.py @@ -1,71 +1,244 @@ -import numpy as np - import opytimizer from opytimizer.core import function from opytimizer.optimizers.swarm import pso from opytimizer.spaces import search +from opytimizer.utils import callback, history + + +def test_opytimizer_space(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert type(new_opytimizer.space).__name__ == 'SearchSpace' + + +def test_opytimizer_space_setter(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + try: + space.built = False + new_opytimizer.space = space + except: + space.built = True + new_opytimizer.space = space + + assert type(new_opytimizer.space).__name__ == 'SearchSpace' + + +def test_opytimizer_optimizer(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert type(new_opytimizer.optimizer).__name__ == 'PSO' + + +def test_opytimizer_optimizer_setter(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + try: + optimizer.built = False + new_opytimizer.optimizer = optimizer + except: + optimizer.built = True + new_opytimizer.optimizer = optimizer + + assert type(new_opytimizer.optimizer).__name__ == 'PSO' + + +def test_opytimizer_function(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert type(new_opytimizer.function).__name__ == 'Function' + + +def test_opytimizer_function_setter(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + try: + func.built = False + new_opytimizer.function = func + except: + func.built = True + new_opytimizer.function = func + + assert type(new_opytimizer.function).__name__ == 'Function' + + +def test_opytimizer_history(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert type(new_opytimizer.history).__name__ == 'History' -def test_opytimizer_build(): - def square(x): - return np.sum(x**2) +def test_opytimizer_history_setter(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + hist = history.History() - assert square(2) == 4 + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) - new_function = function.Function(pointer=square) + try: + new_opytimizer.history = 1 + except: + new_opytimizer.history = hist + + assert type(new_opytimizer.history).__name__ == 'History' + + +def test_opytimizer_iteration(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() - lb = [0] + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) - ub = [10] + assert new_opytimizer.iteration == 0 - new_space = search.SearchSpace(lower_bound=lb, upper_bound=ub) - new_pso = pso.PSO() +def test_opytimizer_iterations_setter(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) try: - new_space.built = False - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) + new_opytimizer.iteration = 'a' except: - new_space.built = True - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) + new_opytimizer.iteration = 0 + + assert new_opytimizer.iteration == 0 try: - new_pso.built = False - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) + new_opytimizer.iteration = -1 except: - new_pso.built = True - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) + new_opytimizer.iteration = 0 + + assert new_opytimizer.iteration == 0 + + +def test_opytimizer_total_iterations(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert new_opytimizer.total_iterations == 0 + + +def test_opytimizer_total_iterations_setter(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + try: + new_opytimizer.total_iterations = 'a' + except: + new_opytimizer.total_iterations = 0 + + assert new_opytimizer.total_iterations == 0 try: - new_function.built = False - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) + new_opytimizer.total_iterations = -1 except: - new_function.built = True - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) + new_opytimizer.total_iterations = 0 + + assert new_opytimizer.total_iterations == 0 + + +def test_opytimizer_evaluate_args(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert len(new_opytimizer.evaluate_args) == 2 + + +def test_opytimizer_update_args(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + assert len(new_opytimizer.update_args) == 1 + + +def test_opytimizer_evaluate(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + callbacks = callback.CallbackVessel([]) + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + new_opytimizer.evaluate(callbacks) + + +def test_opytimizer_update(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + callbacks = callback.CallbackVessel([]) + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + new_opytimizer.update(callbacks) def test_opytimizer_start(): - def square(x): - return np.sum(x**2) + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() + + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) + + new_opytimizer.start(n_iterations=1) - new_function = function.Function(pointer=square) - lb = [0] +def test_opytimizer_save(): + space = search.SearchSpace(1, 1, 0, 1) + func = function.Function(callable) + optimizer = pso.PSO() - ub = [10] + new_opytimizer = opytimizer.Opytimizer(space, optimizer, func) - new_space = search.SearchSpace(lower_bound=lb, upper_bound=ub) + new_opytimizer.save('out.pkl') - new_pso = pso.PSO() - new_opytimizer = opytimizer.Opytimizer( - space=new_space, optimizer=new_pso, function=new_function) +def test_opytimizer_load(): + new_opytimizer = opytimizer.Opytimizer.load('out.pkl') - history = new_opytimizer.start() - assert isinstance(history, opytimizer.utils.history.History) + assert type(new_opytimizer).__name__ == 'Opytimizer' diff --git a/tests/opytimizer/utils/test_callback.py b/tests/opytimizer/utils/test_callback.py new file mode 100644 index 00000000..6baea3e6 --- /dev/null +++ b/tests/opytimizer/utils/test_callback.py @@ -0,0 +1,92 @@ +from opytimizer.utils import callback + + +def test_callback(): + new_callback = callback.Callback() + + new_callback.on_iteration_begin(None, None) + new_callback.on_iteration_end(None, None) + new_callback.on_evaluate_before() + new_callback.on_evaluate_after() + new_callback.on_update_before() + new_callback.on_update_after() + + +def test_callback_vessel(): + new_callback_1 = callback.Callback() + new_callback_2 = callback.Callback() + + new_callback_vessel = callback.CallbackVessel( + [new_callback_1, new_callback_2]) + + new_callback_vessel.on_iteration_begin(None, None) + new_callback_vessel.on_iteration_end(None, None) + new_callback_vessel.on_evaluate_before() + new_callback_vessel.on_evaluate_after() + new_callback_vessel.on_update_before() + new_callback_vessel.on_update_after() + + +def test_callback_vessel_callbacks(): + new_callback_vessel = callback.CallbackVessel([]) + + assert new_callback_vessel.callbacks == [] + + +def test_callback_vessel_callbacks_setter(): + new_callback_vessel = callback.CallbackVessel([]) + + try: + new_callback_vessel.callbacks = 1 + except: + new_callback_vessel.callbacks = [] + + assert new_callback_vessel.callbacks == [] + + +def test_checkpoint_callback(): + new_checkpoint_callback = callback.CheckpointCallback() + + assert new_checkpoint_callback.file_path == 'checkpoint.pkl' + assert new_checkpoint_callback.frequency == 0 + + +def test_checkpoint_callback_file_path_setter(): + new_checkpoint_callback = callback.CheckpointCallback() + + try: + new_checkpoint_callback.file_path = 1 + except: + new_checkpoint_callback.file_path = 'out' + + assert new_checkpoint_callback.file_path == 'out' + + +def test_checkpoint_callback_frequency_setter(): + new_checkpoint_callback = callback.CheckpointCallback() + + try: + new_checkpoint_callback.frequency = 'a' + except: + new_checkpoint_callback.frequency = 1 + + assert new_checkpoint_callback.frequency == 1 + + try: + new_checkpoint_callback.frequency = -1 + except: + new_checkpoint_callback.frequency = 1 + + assert new_checkpoint_callback.frequency == 1 + + +def test_checkpoint_callback_on_iteration_end(): + new_checkpoint_callback = callback.CheckpointCallback(frequency=1) + + class Model: + def save(self, file_path): + pass + + model = Model() + + new_checkpoint_callback.on_iteration_end(1, model) diff --git a/tests/opytimizer/utils/test_constant.py b/tests/opytimizer/utils/test_constant.py new file mode 100644 index 00000000..e207c13e --- /dev/null +++ b/tests/opytimizer/utils/test_constant.py @@ -0,0 +1,28 @@ +import sys + +from opytimizer.utils import constant + + +def test_constant_constants(): + assert constant.EPSILON == 1e-32 + + assert constant.FLOAT_MAX == sys.float_info.max + + assert constant.LIGHT_SPEED == 3e5 + + assert constant.FUNCTION_N_ARGS == { + 'SUM': 2, + 'SUB': 2, + 'MUL': 2, + 'DIV': 2, + 'EXP': 1, + 'SQRT': 1, + 'LOG': 1, + 'ABS': 1, + 'SIN': 1, + 'COS': 1 + } + + assert constant.TEST_EPSILON == 100 + + assert constant.TOURNAMENT_SIZE == 2 diff --git a/tests/opytimizer/utils/test_constants.py b/tests/opytimizer/utils/test_constants.py deleted file mode 100644 index 69a78afd..00000000 --- a/tests/opytimizer/utils/test_constants.py +++ /dev/null @@ -1,30 +0,0 @@ -import sys - -from opytimizer.utils import constants - - -def test_constants(): - assert constants.EPSILON == 1e-32 - - assert constants.FLOAT_MAX == sys.float_info.max - - assert constants.HISTORY_KEYS == ['agents', 'best_agent', 'local'] - - assert constants.LIGHT_SPEED == 3e5 - - assert constants.N_ARGS_FUNCTION == { - 'SUM': 2, - 'SUB': 2, - 'MUL': 2, - 'DIV': 2, - 'EXP': 1, - 'SQRT': 1, - 'LOG': 1, - 'ABS': 1, - 'SIN': 1, - 'COS': 1 - } - - assert constants.TEST_EPSILON == 100 - - assert constants.TOURNAMENT_SIZE == 2 diff --git a/tests/opytimizer/utils/test_decorator.py b/tests/opytimizer/utils/test_decorator.py deleted file mode 100644 index fdcd2c33..00000000 --- a/tests/opytimizer/utils/test_decorator.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np - -from opytimizer.utils import decorator - - -def test_hyper_spanning(): - lb = np.full(1, 10) - ub = np.full(1, 20) - - @decorator.hyper_spanning(lb, ub) - def call(x): - return np.sum(x) - - y = call(np.array([[0.5], [0.5]])) - - assert y == 30 - - -def test_pre_evaluate(): - @decorator.pre_evaluate - def call(obj, x): - return x - - def f(): - return True - - assert f() == True - - call(f, 1, hook=None) diff --git a/tests/opytimizer/utils/test_exception.py b/tests/opytimizer/utils/test_exception.py index 190853c4..de28b824 100644 --- a/tests/opytimizer/utils/test_exception.py +++ b/tests/opytimizer/utils/test_exception.py @@ -1,7 +1,7 @@ from opytimizer.utils import exception -def test_error(): +def test_exception_error(): new_exception = exception.Error('Error', 'error') try: @@ -10,7 +10,7 @@ def test_error(): pass -def test_argument_error(): +def test_exception_argument_error(): new_exception = exception.ArgumentError('error') try: @@ -19,7 +19,7 @@ def test_argument_error(): pass -def test_build_error(): +def test_exception_build_error(): new_exception = exception.BuildError('error') try: @@ -28,7 +28,7 @@ def test_build_error(): pass -def test_size_error(): +def test_exception_size_error(): new_exception = exception.SizeError('error') try: @@ -37,7 +37,7 @@ def test_size_error(): pass -def test_type_error(): +def test_exception_type_error(): new_exception = exception.TypeError('error') try: @@ -46,7 +46,7 @@ def test_type_error(): pass -def test_value_error(): +def test_exception_value_error(): new_exception = exception.ValueError('error') try: diff --git a/tests/opytimizer/utils/test_history.py b/tests/opytimizer/utils/test_history.py index 59ec8330..9627771f 100644 --- a/tests/opytimizer/utils/test_history.py +++ b/tests/opytimizer/utils/test_history.py @@ -1,30 +1,29 @@ -import os - from opytimizer.core import agent from opytimizer.utils import history -def test_history_store_best_only(): +def test_history_save_agents(): new_history = history.History() - assert new_history.store_best_only is False + assert new_history.save_agents is False -def test_history_store_best_only_setter(): +def test_history_save_agents_setter(): new_history = history.History() try: - new_history.store_best_only = 'a' + new_history.save_agents = 'a' except: - new_history.store_best_only = True + new_history.save_agents = True - assert new_history.store_best_only is True + assert new_history.save_agents is True def test_history_dump(): - new_history = history.History() + new_history = history.History(save_agents=True) - agents = [agent.Agent(n_variables=2, n_dimensions=1) for _ in range(5)] + agents = [agent.Agent(n_variables=2, n_dimensions=1, lower_bound=[ + 0, 0], upper_bound=[1, 1]) for _ in range(5)] new_history.dump(agents=agents, best_agent=agents[4], value=0) @@ -32,44 +31,48 @@ def test_history_dump(): assert len(new_history.best_agent) > 0 assert new_history.value[0] == 0 + new_history = history.History(save_agents=False) -def test_history_get(): - new_history = history.History() - - agents = [agent.Agent(n_variables=2, n_dimensions=1) for _ in range(5)] - - new_history.dump(agents=agents, best_agent=agents[4], value=0) - - try: - agents = new_history.get(key='agents', index=0) - except: - agents = new_history.get(key='agents', index=(0, 0)) + new_history.dump(agents=agents) - try: - agents = new_history.get(key='agents', index=(0, 0, 0)) - except: - agents = new_history.get(key='agents', index=(0, 0)) + assert hasattr(new_history, 'agents') is False - assert agents.shape == (2, 1) +def test_history_get_convergence(): + new_history = history.History(save_agents=True) -def test_history_save(): - new_history = history.History() + agents = [agent.Agent(n_variables=2, n_dimensions=1, lower_bound=[ + 0, 0], upper_bound=[1, 1]) for _ in range(5)] - agents = [agent.Agent(n_variables=2, n_dimensions=1) for _ in range(5)] + new_history.dump( + agents=agents, best_agent=agents[4], local_position=agents[0].position, value=0) + new_history.dump( + agents=agents, best_agent=agents[4], local_position=agents[0].position, value=0) - new_history.dump(agents=agents, best_agent=agents[0]) + try: + agents_pos, agents_fit = new_history.get_convergence( + key='agents', index=5) + except: + agents_pos, agents_fit = new_history.get_convergence( + key='agents', index=0) - new_history.save('models/test.pkl') + assert agents_pos.shape == (2, 2) + assert agents_fit.shape == (2,) - assert os.path.isfile('./models/test.pkl') + best_agent_pos, best_agent_fit = new_history.get_convergence( + key='best_agent') + assert best_agent_pos.shape == (2, 2) + assert best_agent_fit.shape == (2,) -def test_history_load(): - new_history = history.History() + try: + local_position = new_history.get_convergence( + key='local_position', index=5) + except: + local_position = new_history.get_convergence(key='local_position') - new_history.load('models/test.pkl') + assert local_position.shape == (2,) - assert len(new_history.agents) > 0 + value = new_history.get_convergence(key='value') - print(new_history) + assert value.shape == (2,) diff --git a/tests/opytimizer/utils/test_logging.py b/tests/opytimizer/utils/test_logging.py index c94e938f..b00c06f3 100644 --- a/tests/opytimizer/utils/test_logging.py +++ b/tests/opytimizer/utils/test_logging.py @@ -1,19 +1,25 @@ from opytimizer.utils import logging -def test_get_console_handler(): +def test_logging_to_file(): + logger = logging.get_logger(__name__) + + assert logger.to_file('msg') == None + + +def test_logging_get_console_handler(): c = logging.get_console_handler() assert c != None -def test_get_timed_file_handler(): +def test_logging_get_timed_file_handler(): f = logging.get_timed_file_handler() assert f != None -def test_get_logger(): +def test_logging_get_logger(): logger = logging.get_logger(__name__) assert logger.name == 'test_logging' diff --git a/tests/opytimizer/visualization/test_convergence.py b/tests/opytimizer/visualization/test_convergence.py index 8b3276ab..d14a5cf8 100644 --- a/tests/opytimizer/visualization/test_convergence.py +++ b/tests/opytimizer/visualization/test_convergence.py @@ -1,20 +1,16 @@ -from opytimizer.utils import history from opytimizer.visualization import convergence def test_convergence_plot(): - new_history = history.History() - - new_history.load('models/test.pkl') - - agents = new_history.get(key='agents', index=(0, 0)) + agent_pos = [[0.5, 0.4, 0.3], [0.5, 0.4, 0.3]] try: - convergence.plot(agents[0], agents[1], labels=1) + convergence.plot(agent_pos[0], agent_pos[1], labels=1) except: - convergence.plot(agents[0], agents[1], labels=['agent[0]', 'agent[1]']) + convergence.plot(agent_pos[0], agent_pos[1], + labels=['agent[0]', 'agent[1]']) try: - convergence.plot(agents[0], agents[1], labels=['agent[0]']) + convergence.plot(agent_pos[0], agent_pos[1], labels=['agent[0]']) except: - convergence.plot(agents[0], agents[1]) + convergence.plot(agent_pos[0], agent_pos[1])