diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 38627b7adb..5f4b24dfc2 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -333,12 +333,13 @@ ^^^^^^^^^^^^ `BIAS` `NodeRole` can be used to implement BIAS Nodes, which add a bias (constant value) to the input of another -Node, that can also be modified by `learning `. A bias Node is implemented by adding a -`ProcessingMechanism` to the Composition and requiring it to have the `BIAS` `NodeRole`. The ProcessingMechanims -cannot have any afferent Projections, and should project to the `InputPort` containing the values to be biased. If -the bias is to be learned, the `learnable ` attribute of the MappingProjeciton -should be set to True. The value of the bias, and how it is applied to the values being biased are specified as -described below: +Node, that can also be modified by `learning `. A BIAS Node is implemented by adding a +`ProcessingMechanism` to the Composition and requiring it to have the `BIAS` `NodeRole`. This can be done using +any of the methods described `above ` for assigning `NodeRoles ` tp a Node. The +ProcessingMechanism cannot have any afferent Projections, and should project to the `InputPort` of the Node with +the values to be biased. If the bias is to be learned, the `learnable ` attribute of +the MappingProjeciton should be set to True. The value of the bias, and how it is applied to the values being biased +are specified as described below: *Single bias value*. To apply a single scalar bias value to all elements of the array being biased, the `default_variable ` of the BIAS Node should be specified as a scalar value, and the `matrix diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index ab8ce37e7c..9e95a04032 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -232,8 +232,10 @@ def __init__(self, self._composition._get_node_index(node), device, context=context) - pytorch_node._is_bias = any(input_port.default_input == DEFAULT_VARIABLE - for input_port in node.input_ports) + # pytorch_node._is_bias = all(input_port.default_input == DEFAULT_VARIABLE + # for input_port in node.input_ports) + pytorch_node._is_bias = node in self._composition.get_nodes_by_role(NodeRole.BIAS) + self.nodes_map[node] = pytorch_node self.wrapped_nodes.append(pytorch_node) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 3f898f11fe..ce0c93492f 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -8139,11 +8139,11 @@ def test_danglingControlledMech(self): @pytest.mark.parametrize( 'removed_nodes, expected_dependencies', [ - # (['A'], {'B': set(), 'C': set('B'), 'D': set('C'), 'E': set('C')}), - # (['C'], {'A': set(), 'B': set(), 'D': set(), 'E': set()}), - # (['E'], {'A': set(), 'B': set(), 'C': {'A', 'B'}, 'D': set('C')}), - # (['A', 'B'], {'C': set(), 'D': set('C'), 'E': set('C')}), - # (['D', 'E'], {'A': set(), 'B': set(), 'C': {'A', 'B'}}), + (['A'], {'B': set(), 'C': set('B'), 'D': set('C'), 'E': set('C')}), + (['C'], {'A': set(), 'B': set(), 'D': set(), 'E': set()}), + (['E'], {'A': set(), 'B': set(), 'C': {'A', 'B'}, 'D': set('C')}), + (['A', 'B'], {'C': set(), 'D': set('C'), 'E': set('C')}), + (['D', 'E'], {'A': set(), 'B': set(), 'C': {'A', 'B'}}), (['A', 'B', 'C', 'D', 'E'], {}), ] )