Package Bio :: Package NeuralNetwork :: Package BackPropagation :: Module Layer
[hide private]
[frames] | no frames]

Source Code for Module Bio.NeuralNetwork.BackPropagation.Layer

  1  """Model a single layer in a nueral network. 
  2   
  3  These classes deal with a layers in the neural network (ie. the input layer, 
  4  hidden layers and the output layer). 
  5  """ 
  6  # standard library 
  7  import math 
  8  import random 
  9   
10 -def logistic_function(value):
11 """Transform the value with the logistic function. 12 13 XXX This is in the wrong place -- I need to find a place to put it 14 that makes sense. 15 """ 16 return 1.0 / (1.0 + math.exp(-value))
17
18 -class AbstractLayer:
19 """Abstract base class for all layers. 20 """
21 - def __init__(self, num_nodes, has_bias_node):
22 """Initialize the layer. 23 24 Arguments: 25 26 o num_nodes -- The number of nodes that are contained in this layer. 27 28 o has_bias_node -- Specify whether or not this node has a bias 29 node. This node is not included in the number of nodes in the network, 30 but is used in constructing and dealing with the network. 31 """ 32 # specify all of the nodes in the network 33 if has_bias_node: 34 lower_range = 0 35 else: 36 lower_range = 1 37 38 self.nodes = range(lower_range, num_nodes + 1) 39 40 self.weights = {}
41
42 - def __str__(self):
43 """Debugging output. 44 """ 45 return "weights: %s" % self.weights
46
47 - def set_weight(self, this_node, next_node, value):
48 """Set a weight value from one node to the next. 49 50 If weights are not explicitly set, they will be initialized to 51 random values to start with. 52 """ 53 if not(self.weights.has_key((this_node, next_node))): 54 raise ValueError("Invalid node values passed.") 55 56 self.weights[(this_node, next_node)] = value
57
58 -class InputLayer(AbstractLayer):
59 - def __init__(self, num_nodes, next_layer):
60 """Initialize the input layer. 61 62 Arguments: 63 64 o num_nodes -- The number of nodes in the input layer. 65 66 o next_layer -- The next layer in the neural network this is 67 connected to. 68 """ 69 AbstractLayer.__init__(self, num_nodes, 1) 70 71 self._next_layer = next_layer 72 73 # set up the weights 74 self.weights = {} 75 for own_node in self.nodes: 76 for other_node in self._next_layer.nodes: 77 self.weights[(own_node, other_node)] = \ 78 random.randrange(-2.0, 2.0) 79 80 # set up the weight changes 81 self.weight_changes = {} 82 for own_node in self.nodes: 83 for other_node in self._next_layer.nodes: 84 self.weight_changes[(own_node, other_node)] = 0.0 85 86 # set up the calculated values for each node -- these will 87 # actually just be set from inputs into the network. 88 self.values = {} 89 for node in self.nodes: 90 # set the bias node -- always has a value of 1 91 if node == 0: 92 self.values[0] = 1 93 else: 94 self.values[node] = 0
95
96 - def update(self, inputs):
97 """Update the values of the nodes using given inputs. 98 99 Arguments: 100 101 o inputs -- A list of inputs into the network -- this must be 102 equal to the number of nodes in the layer. 103 """ 104 if len(inputs) != len(self.values.keys()) - 1: 105 raise ValueError("Inputs do not match input layer nodes.") 106 107 # set the node values from the inputs 108 for input_num in range(len(inputs)): 109 self.values[input_num + 1] = inputs[input_num] 110 111 # propogate the update to the next layer 112 self._next_layer.update(self)
113
114 - def backpropagate(self, outputs, learning_rate, momentum):
115 """Recalculate all weights based on the last round of prediction. 116 117 Arguments: 118 119 o learning_rate -- The learning rate of the network 120 121 o momentum - The amount of weight to place on the previous weight 122 change. 123 124 o outputs - The output info we are using to calculate error. 125 """ 126 # first backpropogate to the next layers 127 next_errors = self._next_layer.backpropagate(outputs, learning_rate, 128 momentum) 129 130 for this_node in self.nodes: 131 for next_node in self._next_layer.nodes: 132 error_deriv = (next_errors[next_node] * 133 self.values[this_node]) 134 135 delta = (learning_rate * error_deriv + 136 momentum * self.weight_changes[(this_node, next_node)]) 137 138 # apply the change to the weight 139 self.weights[(this_node, next_node)] += delta 140 141 # remember the weight change for next time 142 self.weight_changes[(this_node, next_node)] = delta
143
144 -class HiddenLayer(AbstractLayer):
145 - def __init__(self, num_nodes, next_layer, activation = logistic_function):
146 """Initialize a hidden layer. 147 148 Arguments: 149 150 o num_nodes -- The number of nodes in this hidden layer. 151 152 o next_layer -- The next layer in the neural network that this 153 is connected to. 154 155 o activation -- The transformation function used to transform 156 predicted values. 157 """ 158 AbstractLayer.__init__(self, num_nodes, 1) 159 160 self._next_layer = next_layer 161 self._activation = activation 162 163 # set up the weights 164 self.weights = {} 165 for own_node in self.nodes: 166 for other_node in self._next_layer.nodes: 167 self.weights[(own_node, other_node)] = \ 168 random.randrange(-2.0, 2.0) 169 170 # set up the weight changes 171 self.weight_changes = {} 172 for own_node in self.nodes: 173 for other_node in self._next_layer.nodes: 174 self.weight_changes[(own_node, other_node)] = 0.0 175 176 # set up the calculated values for each node 177 self.values = {} 178 for node in self.nodes: 179 # bias node 180 if node == 0: 181 self.values[node] = 1 182 else: 183 self.values[node] = 0
184
185 - def update(self, previous_layer):
186 """Update the values of nodes from the previous layer info. 187 188 Arguments: 189 190 o previous_layer -- The previous layer in the network. 191 """ 192 # update each node in this network 193 for update_node in self.nodes[1:]: 194 # sum up the weighted inputs from the previous network 195 sum = 0.0 196 for node in previous_layer.nodes: 197 sum += (previous_layer.values[node] * 198 previous_layer.weights[(node, update_node)]) 199 200 self.values[update_node] = self._activation(sum) 201 202 # propogate the update to the next layer 203 self._next_layer.update(self)
204
205 - def backpropagate(self, outputs, learning_rate, momentum):
206 """Recalculate all weights based on the last round of prediction. 207 208 Arguments: 209 210 o learning_rate -- The learning rate of the network 211 212 o momentum - The amount of weight to place on the previous weight 213 change. 214 215 o outputs - The output values we are using to see how good our 216 network is at predicting things. 217 """ 218 # first backpropogate to the next layers 219 next_errors = self._next_layer.backpropagate(outputs, learning_rate, 220 momentum) 221 222 # --- update the weights 223 for this_node in self.nodes: 224 for next_node in self._next_layer.nodes: 225 error_deriv = (next_errors[next_node] * 226 self.values[this_node]) 227 228 delta = (learning_rate * error_deriv + 229 momentum * self.weight_changes[(this_node, next_node)]) 230 231 # apply the change to the weight 232 self.weights[(this_node, next_node)] += delta 233 234 # remember the weight change for next time 235 self.weight_changes[(this_node, next_node)] = delta 236 237 # --- calculate error terms 238 errors = {} 239 for error_node in self.nodes: 240 # get the error info propogated from the next layer 241 previous_error = 0.0 242 for next_node in self._next_layer.nodes: 243 previous_error += (next_errors[next_node] * 244 self.weights[(error_node, next_node)]) 245 246 # get the correction factor 247 corr_factor = (self.values[error_node] * 248 (1 - self.values[error_node])) 249 250 # calculate the error 251 errors[error_node] = previous_error * corr_factor 252 253 return errors
254
255 -class OutputLayer(AbstractLayer):
256 - def __init__(self, num_nodes, activation = logistic_function):
257 """Initialize the Output Layer. 258 259 Arguments: 260 261 o num_nodes -- The number of nodes in this layer. This corresponds 262 to the number of outputs in the neural network. 263 264 o activation -- The transformation function used to transform 265 predicted values. 266 """ 267 AbstractLayer.__init__(self, num_nodes, 0) 268 269 self._activation = activation 270 271 self.values = {} 272 for node in self.nodes: 273 self.values[node] = 0
274
275 - def update(self, previous_layer):
276 """Update the value of output nodes from the previous layers. 277 278 Arguments: 279 280 o previous_layer -- The hidden layer preceeding this. 281 """ 282 # update all of the nodes in this layer 283 for update_node in self.nodes: 284 # sum up the contribution from all of the previous inputs 285 sum = 0.0 286 for node in previous_layer.nodes: 287 sum += (previous_layer.values[node] * 288 previous_layer.weights[(node, update_node)]) 289 290 self.values[update_node] = self._activation(sum)
291
292 - def backpropagate(self, outputs, learning_rate, momentum):
293 """Calculate the backpropagation error at a given node. 294 295 This calculates the error term using the formula: 296 297 p = (z - t) z (1 - z) 298 299 where z is the calculated value for the node, and t is the 300 real value. 301 302 Arguments: 303 304 o outputs - The list of output values we use to calculate the 305 errors in our predictions. 306 """ 307 errors = {} 308 for node in self.nodes: 309 calculated_value = self.values[node] 310 real_value = outputs[node - 1] 311 312 errors[node] = ((real_value - calculated_value) * 313 calculated_value * 314 (1 - calculated_value)) 315 316 return errors
317
318 - def get_error(self, real_value, node_number):
319 """Return the error value at a particular node. 320 """ 321 predicted_value = self.values[node_number] 322 return 0.5 * math.pow((real_value - predicted_value), 2)
323
324 - def set_weight(self, this_node, next_node, value):
325 raise NotImplementedError("Can't set weights for the output layer")
326