@@ -469,7 +469,7 @@ def NeuralNetLearner(dataset, hidden_layer_sizes=[3],
469
469
"""
470
470
471
471
i_units = len (dataset .inputs )
472
- o_units = 1 # As of now, dataset.target gives only one index.
472
+ o_units = len ( dataset .values [ dataset . target ])
473
473
474
474
# construct a network
475
475
raw_net = network (i_units , hidden_layer_sizes , o_units )
@@ -494,49 +494,12 @@ def predict(example):
494
494
495
495
# Hypothesis
496
496
o_nodes = learned_net [- 1 ]
497
- pred = [ o_nodes [ i ]. value for i in range ( o_units )]
498
- return 1 if pred [ 0 ] >= 0.5 else 0
497
+ prediction = find_max_node ( o_nodes )
498
+ return prediction
499
499
500
500
return predict
501
501
502
502
503
- class NNUnit :
504
- """Single Unit of Multiple Layer Neural Network
505
- inputs: Incoming connections
506
- weights: Weights to incoming connections
507
- """
508
-
509
- def __init__ (self , weights = None , inputs = None ):
510
- self .weights = []
511
- self .inputs = []
512
- self .value = None
513
- self .activation = sigmoid
514
-
515
-
516
- def network (input_units , hidden_layer_sizes , output_units ):
517
- """Create Directed Acyclic Network of given number layers.
518
- hidden_layers_sizes : List number of neuron units in each hidden layer
519
- excluding input and output layers
520
- """
521
- # Check for PerceptronLearner
522
- if hidden_layer_sizes :
523
- layers_sizes = [input_units ] + hidden_layer_sizes + [output_units ]
524
- else :
525
- layers_sizes = [input_units ] + [output_units ]
526
-
527
- net = [[NNUnit () for n in range (size )]
528
- for size in layers_sizes ]
529
- n_layers = len (net )
530
-
531
- # Make Connection
532
- for i in range (1 , n_layers ):
533
- for n in net [i ]:
534
- for k in net [i - 1 ]:
535
- n .inputs .append (k )
536
- n .weights .append (0 )
537
- return net
538
-
539
-
540
503
def BackPropagationLearner (dataset , net , learning_rate , epochs ):
541
504
"""[Figure 18.23] The back-propagation algorithm for multilayer network"""
542
505
# Initialise weights
@@ -551,17 +514,21 @@ def BackPropagationLearner(dataset, net, learning_rate, epochs):
551
514
Changing dataset class will have effect on all the learners.
552
515
Will be taken care of later
553
516
'''
554
- idx_t = [dataset .target ]
555
- idx_i = dataset .inputs
556
- n_layers = len (net )
557
517
o_nodes = net [- 1 ]
558
518
i_nodes = net [0 ]
519
+ o_units = len (o_nodes )
520
+ idx_t = dataset .target
521
+ idx_i = dataset .inputs
522
+ n_layers = len (net )
523
+
524
+ inputs , targets = init_examples (examples , idx_i , idx_t , o_units )
559
525
560
526
for epoch in range (epochs ):
561
527
# Iterate over each example
562
- for e in examples :
563
- i_val = [e [i ] for i in idx_i ]
564
- t_val = [e [i ] for i in idx_t ]
528
+ for e in range (len (examples )):
529
+ i_val = inputs [e ]
530
+ t_val = targets [e ]
531
+
565
532
# Activate input layer
566
533
for v , n in zip (i_val , i_nodes ):
567
534
n .value = v
@@ -577,7 +544,6 @@ def BackPropagationLearner(dataset, net, learning_rate, epochs):
577
544
delta = [[] for i in range (n_layers )]
578
545
579
546
# Compute outer layer delta
580
- o_units = len (o_nodes )
581
547
err = [t_val [i ] - o_nodes [i ].value
582
548
for i in range (o_units )]
583
549
delta [- 1 ] = [(o_nodes [i ].value ) * (1 - o_nodes [i ].value ) *
@@ -613,7 +579,7 @@ def BackPropagationLearner(dataset, net, learning_rate, epochs):
613
579
def PerceptronLearner (dataset , learning_rate = 0.01 , epochs = 100 ):
614
580
"""Logistic Regression, NO hidden layer"""
615
581
i_units = len (dataset .inputs )
616
- o_units = 1 # As of now, dataset.target gives only one index.
582
+ o_units = len ( dataset .values [ dataset . target ])
617
583
hidden_layer_sizes = []
618
584
raw_net = network (i_units , hidden_layer_sizes , o_units )
619
585
learned_net = BackPropagationLearner (dataset , raw_net , learning_rate , epochs )
@@ -635,10 +601,73 @@ def predict(example):
635
601
636
602
# Hypothesis
637
603
o_nodes = learned_net [- 1 ]
638
- pred = [ o_nodes [ i ]. value for i in range ( o_units )]
639
- return 1 if pred [ 0 ] >= 0.5 else 0
604
+ prediction = find_max_node ( o_nodes )
605
+ return prediction
640
606
641
607
return predict
608
+
609
+
610
+ class NNUnit :
611
+ """Single Unit of Multiple Layer Neural Network
612
+ inputs: Incoming connections
613
+ weights: Weights to incoming connections
614
+ """
615
+
616
+ def __init__ (self , weights = None , inputs = None ):
617
+ self .weights = []
618
+ self .inputs = []
619
+ self .value = None
620
+ self .activation = sigmoid
621
+
622
+
623
+ def network (input_units , hidden_layer_sizes , output_units ):
624
+ """Create Directed Acyclic Network of given number layers.
625
+ hidden_layers_sizes : List number of neuron units in each hidden layer
626
+ excluding input and output layers
627
+ """
628
+ # Check for PerceptronLearner
629
+ if hidden_layer_sizes :
630
+ layers_sizes = [input_units ] + hidden_layer_sizes + [output_units ]
631
+ else :
632
+ layers_sizes = [input_units ] + [output_units ]
633
+
634
+ net = [[NNUnit () for n in range (size )]
635
+ for size in layers_sizes ]
636
+ n_layers = len (net )
637
+
638
+ # Make Connection
639
+ for i in range (1 , n_layers ):
640
+ for n in net [i ]:
641
+ for k in net [i - 1 ]:
642
+ n .inputs .append (k )
643
+ n .weights .append (0 )
644
+ return net
645
+
646
+
647
+ def init_examples (examples , idx_i , idx_t , o_units ):
648
+ inputs = {}
649
+ targets = {}
650
+
651
+ for i in range (len (examples )):
652
+ e = examples [i ]
653
+ # Input values of e
654
+ inputs [i ] = [e [i ] for i in idx_i ]
655
+
656
+ if o_units > 1 :
657
+ # One-Hot representation of e's target
658
+ t = [0 for i in range (o_units )]
659
+ t [e [idx_t ]] = 1
660
+ targets [i ] = t
661
+ else :
662
+ # Target value of e
663
+ targets [i ] = [e [idx_t ]]
664
+
665
+ return inputs , targets
666
+
667
+
668
+ def find_max_node (nodes ):
669
+ return nodes .index (argmax (nodes , key = lambda node : node .value ))
670
+
642
671
# ______________________________________________________________________________
643
672
644
673
0 commit comments