Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit d7fbe0d

Browse files
committed
Fill out gibbs_ask().
1 parent 21f0549 commit d7fbe0d

File tree

1 file changed

+28
-14
lines changed

1 file changed

+28
-14
lines changed

probability.py

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
from utils import *
55
from logic import extend
6-
from random import random, seed
6+
from random import choice, seed
77

88
#______________________________________________________________________________
99

@@ -160,6 +160,8 @@ def add(self, node):
160160
assert every(lambda parent: parent in self.variables(), node.parents)
161161
self.nodes.append(node)
162162
self.vars.append(node.variable)
163+
for parent in node.parents:
164+
self.variable_node(parent).children.append(node)
163165

164166
def variable_node(self, var):
165167
"""Return the node for the variable named var.
@@ -224,7 +226,7 @@ def __init__(self, X, parents, cpt):
224226
assert every(lambda v: isinstance(v, bool), vs)
225227
assert 0 <= p <= 1
226228

227-
update(self, variable=X, parents=parents, cpt=cpt)
229+
update(self, variable=X, parents=parents, cpt=cpt, children=[])
228230

229231
def p(self, value, event):
230232
"""Return the conditional probability
@@ -243,7 +245,7 @@ def sample(self, event):
243245
on event's values for parent_vars. That is, return True/False
244246
at random according with the conditional probability given the
245247
parents."""
246-
return random() <= self.p(True, event)
248+
return probability(self.p(True, event))
247249

248250
node = BayesNode
249251

@@ -391,24 +393,36 @@ def weighted_sample(bn, e):
391393
#_______________________________________________________________________________
392394

393395
def gibbs_ask(X, e, bn, N):
394-
"""[Fig. 14.16]"""
395-
counts = {True: 0, False: 0} # boldface N in Fig. 14.16
396-
Z = [var for var in bn.variables if var not in e]
396+
"""[Fig. 14.16]
397+
>>> seed(1017)
398+
>>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000
399+
... ).show_approx()
400+
'False: 0.738, True: 0.262'
401+
"""
402+
counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.16
403+
Z = [var for var in bn.variables() if var not in e]
397404
state = dict(e) # boldface x in Fig. 14.16
398405
for Zi in Z:
399-
state[Zi] = choice([True, False])
406+
state[Zi] = choice(bn.variable_values(Zi))
400407
for j in xrange(N):
401408
for Zi in Z:
402-
state[Zi] = (random() < P_markov_blanket(Zi, state, bn))
409+
state[Zi] = markov_blanket_sample(Zi, state, bn)
403410
counts[state[X]] += 1
404411
return ProbDist(X, counts)
405412

406-
def P_markov_blanket(X, e, bn):
407-
"""Return P(X | mb) where mb denotes that the variables in the
408-
Markov blanket of X take their values from event e (which must
409-
assign a value to each). The Markov blanket of X is X's parents,
410-
children, and children's parents."""
411-
unimplemented()
413+
def markov_blanket_sample(X, e, bn):
414+
"""Return a sample from P(X | mb) where mb denotes that the
415+
variables in the Markov blanket of X take their values from event
416+
e (which must assign a value to each). The Markov blanket of X is
417+
X's parents, children, and children's parents."""
418+
Xnode = bn.variable_node(X)
419+
Q = ProbDist(X)
420+
for xi in bn.variable_values(X):
421+
ei = extend(e, X, xi)
422+
# [Equation 14.12:]
423+
Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)
424+
for Yj in Xnode.children)
425+
return probability(Q.normalize()[True]) # (assuming a Boolean variable here)
412426

413427
#_______________________________________________________________________________
414428

0 commit comments

Comments
 (0)