@@ -416,24 +416,152 @@ def predict(example):
416
416
# ______________________________________________________________________________
417
417
418
418
419
- def NeuralNetLearner (dataset , sizes ):
420
- """Layered feed-forward network."""
419
+ def NeuralNetLearner (dataset , hidden_layer_sizes = [3 ],
420
+ learning_rate = 0.01 , epoches = 100 ):
421
+ """
422
+ Layered feed-forward network.
423
+ hidden_layer_sizes: List of number of hidden units per hidden layer
424
+ learning_rate: Learning rate of gradient decent
425
+ epoches: Number of passes over the dataset
426
+ """
421
427
422
- activations = [[0.0 for i in range (n )] for n in sizes ] # noqa
423
- weights = [] # noqa
428
+ examples = dataset .examples
429
+ i_units = len (dataset .inputs )
430
+ o_units = 1 # As of now, dataset.target gives only one index.
431
+
432
+ # construct a network
433
+ raw_net = network (i_units , hidden_layer_sizes , o_units )
434
+ learned_net = BackPropagationLearner (dataset , raw_net ,
435
+ learning_rate , epoches )
424
436
425
437
def predict (example ):
426
- unimplemented ()
438
+
439
+ # Input nodes
440
+ i_nodes = learned_net [0 ]
441
+
442
+ # Activate input layer
443
+ for v , n in zip (example , i_nodes ):
444
+ n .value = v
445
+
446
+ # Forward pass
447
+ for layer in learned_net [1 :]:
448
+ for node in layer :
449
+ inc = [n .value for n in node .inputs ]
450
+ in_val = dotproduct (inc , node .weights )
451
+ node .value = node .activation (in_val )
452
+
453
+ # Hypothesis
454
+ o_nodes = learned_net [- 1 ]
455
+ pred = [o_nodes [i ].value for i in range (o_units )]
456
+ return pred [0 ]
427
457
428
458
return predict
429
459
430
460
431
461
class NNUnit :
462
+ """
463
+ Single Unit of Multiple Layer Neural Network
464
+ inputs: Incoming connections
465
+ weights: weights to incoming connections
466
+ """
432
467
433
- """Unit of a neural net."""
468
+ def __init__ (self , weights = None , inputs = None ):
469
+ self .weights = []
470
+ self .inputs = []
471
+ self .value = None
472
+ self .activation = sigmoid
434
473
435
- def __init__ (self ):
436
- unimplemented ()
474
+
475
+ def network (input_units , hidden_layer_sizes , output_units ):
476
+ """
477
+ Create of Directed Acyclic Network of given number layers
478
+ hidden_layers_sizes : list number of neuron units in each hidden layer
479
+ excluding input and output layers.
480
+ """
481
+ layers_sizes = [input_units ] + hidden_layer_sizes + [output_units ]
482
+ net = [[NNUnit () for n in range (size )]
483
+ for size in layers_sizes ]
484
+ n_layers = len (net )
485
+
486
+ # Make Connection
487
+ for i in range (1 , n_layers ):
488
+ for n in net [i ]:
489
+ for k in net [i - 1 ]:
490
+ n .inputs .append (k )
491
+ n .weights .append (0 )
492
+ return net
493
+
494
+
495
+ def BackPropagationLearner (dataset , network , learning_rate , epoches ):
496
+ "[Fig. 18.23] The back-propagation algorithm for multilayer network"
497
+ # Initialise weights
498
+ for layer in network :
499
+ for node in layer :
500
+ node .weights = [random .uniform (- 0.5 , 0.5 )
501
+ for i in range (len (node .weights ))]
502
+
503
+ examples = dataset .examples
504
+ '''
505
+ As of now dataset.target gives an int instead of list,
506
+ Changing dataset class will have effect on all the learners.
507
+ Will be taken care of later
508
+ '''
509
+ idx_t = [dataset .target ]
510
+ idx_i = dataset .inputs
511
+ n_layers = len (network )
512
+ o_nodes = network [- 1 ]
513
+ i_nodes = network [0 ]
514
+
515
+ for epoch in range (epoches ):
516
+ # Iterate over each example
517
+ for e in examples :
518
+ i_val = [e [i ] for i in idx_i ]
519
+ t_val = [e [i ] for i in idx_t ]
520
+ # Activate input layer
521
+ for v , n in zip (i_val , i_nodes ):
522
+ n .value = v
523
+
524
+ # Forward pass
525
+ for layer in network [1 :]:
526
+ for node in layer :
527
+ inc = [n .value for n in node .inputs ]
528
+ in_val = dotproduct (inc , node .weights )
529
+ node .value = node .activation (in_val )
530
+
531
+ # Initialize delta
532
+ delta = [[] for i in range (n_layers )]
533
+
534
+ # Compute outer layer delta
535
+ o_units = len (o_nodes )
536
+ err = [t_val [i ] - o_nodes [i ].value
537
+ for i in range (o_units )]
538
+ delta [- 1 ] = [(o_nodes [i ].value )* (1 - o_nodes [i ].value ) *
539
+ (err [i ]) for i in range (o_units )]
540
+
541
+ # Backward pass
542
+ h_layers = n_layers - 2
543
+ for i in range (h_layers , 0 , - 1 ):
544
+ layer = network [i ]
545
+ h_units = len (layer )
546
+ nx_layer = network [i + 1 ]
547
+ # weights from each ith layer node to each i + 1th layer node
548
+ w = [[node .weights [k ] for node in nx_layer ]
549
+ for k in range (h_units )]
550
+
551
+ delta [i ] = [(layer [j ].value ) * (1 - layer [j ].value ) *
552
+ dotproduct (w [j ], delta [i + 1 ])
553
+ for j in range (h_units )]
554
+
555
+ # Update weights
556
+ for i in range (1 , n_layers ):
557
+ layer = network [i ]
558
+ inc = [node .value for node in network [i - 1 ]]
559
+ units = len (layer )
560
+ for j in range (units ):
561
+ layer [j ].weights = vector_add (layer [j ].weights ,
562
+ scalar_vector_product (learning_rate * delta [i ][j ], inc ))
563
+
564
+ return network
437
565
438
566
439
567
def PerceptronLearner (dataset , sizes ):
0 commit comments