Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 76 additions & 9 deletions probability.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,7 @@ def markov_blanket_sample(X, e, bn):

# Umbrella Example [Fig. 15.2]


class HiddenMarkovModel:

""" A Hidden markov model which takes Transition model and Sensor model as inputs"""
Expand All @@ -546,17 +547,18 @@ def sensor_dist(self, ev):

def forward(HMM, fv, ev):
prediction = vector_add(scalar_vector_product(fv[0], HMM.transition_model[0]),
scalar_vector_product(fv[1], HMM.transition_model[1]))
scalar_vector_product(fv[1], HMM.transition_model[1]))
sensor_dist = HMM.sensor_dist(ev)

return(normalize(element_wise_product(sensor_dist, prediction)))


def backward(HMM, b, ev):
sensor_dist = HMM.sensor_dist(ev)
prediction = element_wise_product(sensor_dist, b)

return(normalize(vector_add(scalar_vector_product(prediction[0], HMM.transition_model[0]),
scalar_vector_product(prediction[1], HMM.transition_model[1]))))
scalar_vector_product(prediction[1], HMM.transition_model[1]))))


def forward_backward(HMM, ev, prior):
Expand All @@ -571,7 +573,8 @@ def forward_backward(HMM, ev, prior):
umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)

>>> forward_backward(umbrellaHMM, umbrella_evidence, umbrella_prior)
[[0.6469, 0.3531], [0.8673, 0.1327], [0.8204, 0.1796], [0.3075, 0.6925], [0.8204, 0.1796], [0.8673, 0.1327]]
[[0.6469, 0.3531], [0.8673, 0.1327], [0.8204, 0.1796],
[0.3075, 0.6925], [0.8204, 0.1796], [0.8673, 0.1327]]
"""
t = len(ev)
ev.insert(0, None) # to make the code look similar to pseudo code
Expand All @@ -583,10 +586,10 @@ def forward_backward(HMM, ev, prior):

fv[0] = prior

for i in range(1, t+ 1):
fv[i] = forward(HMM, fv[i- 1], ev[i])
for i in range(1, t + 1):
fv[i] = forward(HMM, fv[i - 1], ev[i])
for i in range(t, -1, -1):
sv[i- 1] = normalize(element_wise_product(fv[i], b))
sv[i - 1] = normalize(element_wise_product(fv[i], b))
b = backward(HMM, b, ev[i])
bv.append(b)

Expand All @@ -600,14 +603,78 @@ def forward_backward(HMM, ev, prior):

# _________________________________________________________________________


def fixed_lag_smoothing(e_t, hmm, d):
"""[Fig. 15.6]"""
unimplemented()


def particle_filtering(e, N, dbn):
"""[Fig. 15.17]"""
unimplemented()
def particle_filtering(e, N, HMM):
"""
Particle filtering considering two states variables
N = 10
umbrella_evidence = T
umbrella_prior = [0.5, 0.5]
umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)

>>> particle_filtering(umbrella_evidence, N, umbrellaHMM)
['A', 'A', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B']

NOTE: Output is an probabilistic answer, therfore can vary
"""
s = []
dist = [0.5, 0.5]
# State Initialization
s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
# Weight Initialization
w = [0 for i in range(N)]
# STEP 1
# Propagate one step using transition model given prior state
dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]),
scalar_vector_product(dist[1], HMM.transition_model[1]))
# Assign state according to probability
s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
w_tot = 0
# Calculate importance weight given evidence e
for i in range(N):
if s[i] == 'A':
# P(U|A)*P(A)
w_i = HMM.sensor_dist(e)[0]*dist[0]
if s[i] == 'B':
# P(U|B)*P(B)
w_i = HMM.sensor_dist(e)[1]*dist[1]
w[i] = w_i
w_tot += w_i

# Normalize all the weights
for i in range(N):
w[i] = w[i]/w_tot

# Limit weights to 4 digits
for i in range(N):
w[i] = float("{0:.4f}".format(w[i]))

# STEP 2
s = weighted_sample_with_replacement(N, s, w)
return s


def weighted_sample_with_replacement(N, s, w):
"""
Performs Weighted sampling over the paricles given weights of each particle.
We keep on picking random states unitll we fill N number states in new distribution
"""
s_wtd = []
cnt = 0
while (cnt <= N):
# Generate a random number from 0 to N-1
i = random.randint(0, N-1)
if (probability(w[i])):
s_wtd.append(s[i])
cnt += 1
return s_wtd

# _________________________________________________________________________
__doc__ += """
Expand Down