@@ -526,6 +526,7 @@ def markov_blanket_sample(X, e, bn):
526
526
527
527
# Umbrella Example [Fig. 15.2]
528
528
529
+
529
530
class HiddenMarkovModel :
530
531
531
532
""" A Hidden markov model which takes Transition model and Sensor model as inputs"""
@@ -546,17 +547,18 @@ def sensor_dist(self, ev):
546
547
547
548
def forward (HMM , fv , ev ):
548
549
prediction = vector_add (scalar_vector_product (fv [0 ], HMM .transition_model [0 ]),
549
- scalar_vector_product (fv [1 ], HMM .transition_model [1 ]))
550
+ scalar_vector_product (fv [1 ], HMM .transition_model [1 ]))
550
551
sensor_dist = HMM .sensor_dist (ev )
551
552
552
553
return (normalize (element_wise_product (sensor_dist , prediction )))
553
554
555
+
554
556
def backward (HMM , b , ev ):
555
557
sensor_dist = HMM .sensor_dist (ev )
556
558
prediction = element_wise_product (sensor_dist , b )
557
559
558
560
return (normalize (vector_add (scalar_vector_product (prediction [0 ], HMM .transition_model [0 ]),
559
- scalar_vector_product (prediction [1 ], HMM .transition_model [1 ]))))
561
+ scalar_vector_product (prediction [1 ], HMM .transition_model [1 ]))))
560
562
561
563
562
564
def forward_backward (HMM , ev , prior ):
@@ -571,7 +573,8 @@ def forward_backward(HMM, ev, prior):
571
573
umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)
572
574
573
575
>>> forward_backward(umbrellaHMM, umbrella_evidence, umbrella_prior)
574
- [[0.6469, 0.3531], [0.8673, 0.1327], [0.8204, 0.1796], [0.3075, 0.6925], [0.8204, 0.1796], [0.8673, 0.1327]]
576
+ [[0.6469, 0.3531], [0.8673, 0.1327], [0.8204, 0.1796],
577
+ [0.3075, 0.6925], [0.8204, 0.1796], [0.8673, 0.1327]]
575
578
"""
576
579
t = len (ev )
577
580
ev .insert (0 , None ) # to make the code look similar to pseudo code
@@ -583,10 +586,10 @@ def forward_backward(HMM, ev, prior):
583
586
584
587
fv [0 ] = prior
585
588
586
- for i in range (1 , t + 1 ):
587
- fv [i ] = forward (HMM , fv [i - 1 ], ev [i ])
589
+ for i in range (1 , t + 1 ):
590
+ fv [i ] = forward (HMM , fv [i - 1 ], ev [i ])
588
591
for i in range (t , - 1 , - 1 ):
589
- sv [i - 1 ] = normalize (element_wise_product (fv [i ], b ))
592
+ sv [i - 1 ] = normalize (element_wise_product (fv [i ], b ))
590
593
b = backward (HMM , b , ev [i ])
591
594
bv .append (b )
592
595
@@ -600,14 +603,78 @@ def forward_backward(HMM, ev, prior):
600
603
601
604
# _________________________________________________________________________
602
605
606
+
603
607
def fixed_lag_smoothing (e_t , hmm , d ):
604
608
"""[Fig. 15.6]"""
605
609
unimplemented ()
606
610
607
611
608
- def particle_filtering (e , N , dbn ):
609
- """[Fig. 15.17]"""
610
- unimplemented ()
612
+ def particle_filtering (e , N , HMM ):
613
+ """
614
+ Particle filtering considering two states variables
615
+ N = 10
616
+ umbrella_evidence = T
617
+ umbrella_prior = [0.5, 0.5]
618
+ umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
619
+ umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
620
+ umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)
621
+
622
+ >>> particle_filtering(umbrella_evidence, N, umbrellaHMM)
623
+ ['A', 'A', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B']
624
+
625
+ NOTE: Output is an probabilistic answer, therfore can vary
626
+ """
627
+ s = []
628
+ dist = [0.5 , 0.5 ]
629
+ # State Initialization
630
+ s = ['A' if probability (dist [0 ]) else 'B' for i in range (N )]
631
+ # Weight Initialization
632
+ w = [0 for i in range (N )]
633
+ # STEP 1
634
+ # Propagate one step using transition model given prior state
635
+ dist = vector_add (scalar_vector_product (dist [0 ], HMM .transition_model [0 ]),
636
+ scalar_vector_product (dist [1 ], HMM .transition_model [1 ]))
637
+ # Assign state according to probability
638
+ s = ['A' if probability (dist [0 ]) else 'B' for i in range (N )]
639
+ w_tot = 0
640
+ # Calculate importance weight given evidence e
641
+ for i in range (N ):
642
+ if s [i ] == 'A' :
643
+ # P(U|A)*P(A)
644
+ w_i = HMM .sensor_dist (e )[0 ]* dist [0 ]
645
+ if s [i ] == 'B' :
646
+ # P(U|B)*P(B)
647
+ w_i = HMM .sensor_dist (e )[1 ]* dist [1 ]
648
+ w [i ] = w_i
649
+ w_tot += w_i
650
+
651
+ # Normalize all the weights
652
+ for i in range (N ):
653
+ w [i ] = w [i ]/ w_tot
654
+
655
+ # Limit weights to 4 digits
656
+ for i in range (N ):
657
+ w [i ] = float ("{0:.4f}" .format (w [i ]))
658
+
659
+ # STEP 2
660
+ s = weighted_sample_with_replacement (N , s , w )
661
+ return s
662
+
663
+
664
+ def weighted_sample_with_replacement (N , s , w ):
665
+ """
666
+ Performs Weighted sampling over the paricles given weights of each particle.
667
+ We keep on picking random states unitll we fill N number states in new distribution
668
+ """
669
+ s_wtd = []
670
+ cnt = 0
671
+ while (cnt <= N ):
672
+ # Generate a random number from 0 to N-1
673
+ i = random .randint (0 , N - 1 )
674
+ if (probability (w [i ])):
675
+ s_wtd .append (s [i ])
676
+ cnt += 1
677
+ return s_wtd
611
678
612
679
# _________________________________________________________________________
613
680
__doc__ += """
0 commit comments