from Agenda
+ self.agenda.remove((G, act1))
+
+ # For actions with variable number of arguments, use least commitment principle
+ # act0_temp, bindings = self.find_action_for_precondition(G)
+ # act0 = self.generate_action_object(act0_temp, bindings)
+
+ # Actions = Actions U {act0}
+ self.actions.add(act0)
+
+ # Constraints = add_const(start < act0, Constraints)
+ self.constraints = self.add_const((self.start, act0), self.constraints)
+
+ # for each CL E CausalLinks do
+ # Constraints = protect(CL, act0, Constraints)
+ for causal_link in self.causal_links:
+ self.constraints = self.protect(causal_link, act0, self.constraints)
+
+ # Agenda = Agenda U {: P is a precondition of act0}
+ for precondition in act0.precond:
+ self.agenda.add((precondition, act0))
+
+ # Constraints = add_const(act0 < act1, Constraints)
+ self.constraints = self.add_const((act0, act1), self.constraints)
+
+ # CausalLinks U {}
+ if (act0, G, act1) not in self.causal_links:
+ self.causal_links.append((act0, G, act1))
+
+ # for each A E Actions do
+ # Constraints = protect(, A, Constraints)
+ for action in self.actions:
+ self.constraints = self.protect((act0, G, act1), action, self.constraints)
+
+ if step > 200:
+ print("Couldn't find a solution")
+ return None, None
+
+ if display:
+ self.display_plan()
+ else:
+ return self.constraints, self.causal_links
+
+
+def spare_tire_graphPlan():
+ """Solves the spare tire problem using GraphPlan"""
+ return GraphPlan(spare_tire()).execute()
+
+
+def three_block_tower_graphPlan():
+ """Solves the Sussman Anomaly problem using GraphPlan"""
+ return GraphPlan(three_block_tower()).execute()
+
+
+def air_cargo_graphPlan():
+ """Solves the air cargo problem using GraphPlan"""
+ return GraphPlan(air_cargo()).execute()
+
+
+def have_cake_and_eat_cake_too_graphPlan():
+ """Solves the cake problem using GraphPlan"""
+ return [GraphPlan(have_cake_and_eat_cake_too()).execute()[1]]
+
+
+def shopping_graphPlan():
+ """Solves the shopping problem using GraphPlan"""
+ return GraphPlan(shopping_problem()).execute()
+
+
+def socks_and_shoes_graphPlan():
+ """Solves the socks and shoes problem using GraphPlan"""
+ return GraphPlan(socks_and_shoes()).execute()
+
+
+def simple_blocks_world_graphPlan():
+ """Solves the simple blocks world problem"""
+ return GraphPlan(simple_blocks_world()).execute()
+
+
+class HLA(Action):
+ """
+ Define Actions for the real-world (that may be refined further), and satisfy resource
+ constraints.
+ """
+ unique_group = 1
+
+ def __init__(self, action, precond=None, effect=None, duration=0, consume=None, use=None):
+ """
+ As opposed to actions, to define HLA, we have added constraints.
+ duration holds the amount of time required to execute the task
+ consumes holds a dictionary representing the resources the task consumes
+ uses holds a dictionary representing the resources the task uses
+ """
+ precond = precond or [None]
+ effect = effect or [None]
+ super().__init__(action, precond, effect)
+ self.duration = duration
+ self.consumes = consume or {}
+ self.uses = use or {}
+ self.completed = False
+ # self.priority = -1 # must be assigned in relation to other HLAs
+ # self.job_group = -1 # must be assigned in relation to other HLAs
+
+ def do_action(self, job_order, available_resources, kb, args):
+ """
+ An HLA based version of act - along with knowledge base updation, it handles
+ resource checks, and ensures the actions are executed in the correct order.
+ """
+ if not self.has_usable_resource(available_resources):
+ raise Exception('Not enough usable resources to execute {}'.format(self.name))
+ if not self.has_consumable_resource(available_resources):
+ raise Exception('Not enough consumable resources to execute {}'.format(self.name))
+ if not self.inorder(job_order):
+ raise Exception("Can't execute {} - execute prerequisite actions first".
+ format(self.name))
+ kb = super().act(kb, args) # update knowledge base
+ for resource in self.consumes: # remove consumed resources
+ available_resources[resource] -= self.consumes[resource]
+ self.completed = True # set the task status to complete
+ return kb
+
+ def has_consumable_resource(self, available_resources):
+ """
+ Ensure there are enough consumable resources for this action to execute.
+ """
+ for resource in self.consumes:
+ if available_resources.get(resource) is None:
+ return False
+ if available_resources[resource] < self.consumes[resource]:
+ return False
+ return True
+
+ def has_usable_resource(self, available_resources):
+ """
+ Ensure there are enough usable resources for this action to execute.
+ """
+ for resource in self.uses:
+ if available_resources.get(resource) is None:
+ return False
+ if available_resources[resource] < self.uses[resource]:
+ return False
+ return True
+
+ def inorder(self, job_order):
+ """
+ Ensure that all the jobs that had to be executed before the current one have been
+ successfully executed.
+ """
+ for jobs in job_order:
+ if self in jobs:
+ for job in jobs:
+ if job is self:
+ return True
+ if not job.completed:
+ return False
+ return True
+
+
+class RealWorldPlanningProblem(PlanningProblem):
+ """
+ Define real-world problems by aggregating resources as numerical quantities instead of
+ named entities.
+
+ This class is identical to PDDL, except that it overloads the act function to handle
+ resource and ordering conditions imposed by HLA as opposed to Action.
+ """
+
+ def __init__(self, initial, goals, actions, jobs=None, resources=None):
+ super().__init__(initial, goals, actions)
+ self.jobs = jobs
+ self.resources = resources or {}
+
+ def act(self, action):
+ """
+ Performs the HLA given as argument.
+
+ Note that this is different from the superclass action - where the parameter was an
+ Expression. For real world problems, an Expr object isn't enough to capture all the
+ detail required for executing the action - resources, preconditions, etc need to be
+ checked for too.
+ """
+ args = action.args
+ list_action = first(a for a in self.actions if a.name == action.name)
+ if list_action is None:
+ raise Exception("Action '{}' not found".format(action.name))
+ self.initial = list_action.do_action(self.jobs, self.resources, self.initial, args).clauses
+
+ def refinements(self, library): # refinements may be (multiple) HLA themselves ...
+ """
+ State is a Problem, containing the current state kb library is a
+ dictionary containing details for every possible refinement. e.g.:
+ {
+ 'HLA': [
+ 'Go(Home, SFO)',
+ 'Go(Home, SFO)',
+ 'Drive(Home, SFOLongTermParking)',
+ 'Shuttle(SFOLongTermParking, SFO)',
+ 'Taxi(Home, SFO)'
+ ],
+ 'steps': [
+ ['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'],
+ ['Taxi(Home, SFO)'],
+ [],
+ [],
+ []
+ ],
+ # empty refinements indicate a primitive action
+ 'precond': [
+ ['At(Home) & Have(Car)'],
+ ['At(Home)'],
+ ['At(Home) & Have(Car)'],
+ ['At(SFOLongTermParking)'],
+ ['At(Home)']
+ ],
+ 'effect': [
+ ['At(SFO) & ~At(Home)'],
+ ['At(SFO) & ~At(Home)'],
+ ['At(SFOLongTermParking) & ~At(Home)'],
+ ['At(SFO) & ~At(SFOLongTermParking)'],
+ ['At(SFO) & ~At(Home)']
+ ]}
+ """
+ indices = [i for i, x in enumerate(library['HLA']) if expr(x).op == self.name]
+ for i in indices:
+ actions = []
+ for j in range(len(library['steps'][i])):
+ # find the index of the step [j] of the HLA
+ index_step = [k for k, x in enumerate(library['HLA']) if x == library['steps'][i][j]][0]
+ precond = library['precond'][index_step][0] # preconditions of step [j]
+ effect = library['effect'][index_step][0] # effect of step [j]
+ actions.append(HLA(library['steps'][i][j], precond, effect))
+ yield actions
+
+ def hierarchical_search(self, hierarchy):
+ """
+ [Figure 11.5]
+ 'Hierarchical Search, a Breadth First Search implementation of Hierarchical
+ Forward Planning Search'
+ The problem is a real-world problem defined by the problem class, and the hierarchy is
+ a dictionary of HLA - refinements (see refinements generator for details)
+ """
+ act = Node(self.initial, None, [self.actions[0]])
+ frontier = deque()
+ frontier.append(act)
+ while True:
+ if not frontier:
+ return None
+ plan = frontier.popleft()
+ # finds the first non primitive hla in plan actions
+ (hla, index) = RealWorldPlanningProblem.find_hla(plan, hierarchy)
+ prefix = plan.action[:index]
+ outcome = RealWorldPlanningProblem(
+ RealWorldPlanningProblem.result(self.initial, prefix), self.goals, self.actions)
+ suffix = plan.action[index + 1:]
+ if not hla: # hla is None and plan is primitive
+ if outcome.goal_test():
+ return plan.action
+ else:
+ for sequence in RealWorldPlanningProblem.refinements(hla, hierarchy): # find refinements
+ frontier.append(Node(outcome.initial, plan, prefix + sequence + suffix))
+
+ def result(state, actions):
+ """The outcome of applying an action to the current problem"""
+ for a in actions:
+ if a.check_precond(state, a.args):
+ state = a(state, a.args).clauses
+ return state
+
+ def angelic_search(self, hierarchy, initial_plan):
+ """
+ [Figure 11.8]
+ A hierarchical planning algorithm that uses angelic semantics to identify and
+ commit to high-level plans that work while avoiding high-level plans that don’t.
+ The predicate MAKING-PROGRESS checks to make sure that we aren’t stuck in an infinite regression
+ of refinements.
+ At top level, call ANGELIC-SEARCH with [Act] as the initialPlan.
+
+ InitialPlan contains a sequence of HLA's with angelic semantics
+
+ The possible effects of an angelic HLA in initialPlan are:
+ ~ : effect remove
+ $+: effect possibly add
+ $-: effect possibly remove
+ $$: possibly add or remove
+ """
+ frontier = deque(initial_plan)
+ while True:
+ if not frontier:
+ return None
+ plan = frontier.popleft() # sequence of HLA/Angelic HLA's
+ opt_reachable_set = RealWorldPlanningProblem.reach_opt(self.initial, plan)
+ pes_reachable_set = RealWorldPlanningProblem.reach_pes(self.initial, plan)
+ if self.intersects_goal(opt_reachable_set):
+ if RealWorldPlanningProblem.is_primitive(plan, hierarchy):
+ return [x for x in plan.action]
+ guaranteed = self.intersects_goal(pes_reachable_set)
+ if guaranteed and RealWorldPlanningProblem.making_progress(plan, initial_plan):
+ final_state = guaranteed[0] # any element of guaranteed
+ return RealWorldPlanningProblem.decompose(hierarchy, final_state, pes_reachable_set)
+ # there should be at least one HLA/AngelicHLA, otherwise plan would be primitive
+ hla, index = RealWorldPlanningProblem.find_hla(plan, hierarchy)
+ prefix = plan.action[:index]
+ suffix = plan.action[index + 1:]
+ outcome = RealWorldPlanningProblem(
+ RealWorldPlanningProblem.result(self.initial, prefix), self.goals, self.actions)
+ for sequence in RealWorldPlanningProblem.refinements(hla, hierarchy): # find refinements
+ frontier.append(
+ AngelicNode(outcome.initial, plan, prefix + sequence + suffix, prefix + sequence + suffix))
+
+ def intersects_goal(self, reachable_set):
+ """
+ Find the intersection of the reachable states and the goal
+ """
+ return [y for x in list(reachable_set.keys())
+ for y in reachable_set[x]
+ if all(goal in y for goal in self.goals)]
+
+ def is_primitive(plan, library):
+ """
+ checks if the hla is primitive action
+ """
+ for hla in plan.action:
+ indices = [i for i, x in enumerate(library['HLA']) if expr(x).op == hla.name]
+ for i in indices:
+ if library["steps"][i]:
+ return False
+ return True
+
+ def reach_opt(init, plan):
+ """
+ Finds the optimistic reachable set of the sequence of actions in plan
+ """
+ reachable_set = {0: [init]}
+ optimistic_description = plan.action # list of angelic actions with optimistic description
+ return RealWorldPlanningProblem.find_reachable_set(reachable_set, optimistic_description)
+
+ def reach_pes(init, plan):
+ """
+ Finds the pessimistic reachable set of the sequence of actions in plan
+ """
+ reachable_set = {0: [init]}
+ pessimistic_description = plan.action_pes # list of angelic actions with pessimistic description
+ return RealWorldPlanningProblem.find_reachable_set(reachable_set, pessimistic_description)
+
+ def find_reachable_set(reachable_set, action_description):
+ """
+ Finds the reachable states of the action_description when applied in each state of reachable set.
+ """
+ for i in range(len(action_description)):
+ reachable_set[i + 1] = []
+ if type(action_description[i]) is AngelicHLA:
+ possible_actions = action_description[i].angelic_action()
+ else:
+ possible_actions = action_description
+ for action in possible_actions:
+ for state in reachable_set[i]:
+ if action.check_precond(state, action.args):
+ if action.effect[0]:
+ new_state = action(state, action.args).clauses
+ reachable_set[i + 1].append(new_state)
+ else:
+ reachable_set[i + 1].append(state)
+ return reachable_set
+
+ def find_hla(plan, hierarchy):
+ """
+ Finds the the first HLA action in plan.action, which is not primitive
+ and its corresponding index in plan.action
+ """
+ hla = None
+ index = len(plan.action)
+ for i in range(len(plan.action)): # find the first HLA in plan, that is not primitive
+ if not RealWorldPlanningProblem.is_primitive(Node(plan.state, plan.parent, [plan.action[i]]), hierarchy):
+ hla = plan.action[i]
+ index = i
+ break
+ return hla, index
+
+ def making_progress(plan, initial_plan):
+ """
+ Prevents from infinite regression of refinements
+
+ (infinite regression of refinements happens when the algorithm finds a plan that
+ its pessimistic reachable set intersects the goal inside a call to decompose on
+ the same plan, in the same circumstances)
+ """
+ for i in range(len(initial_plan)):
+ if plan == initial_plan[i]:
+ return False
+ return True
+
+ def decompose(hierarchy, plan, s_f, reachable_set):
+ solution = []
+ i = max(reachable_set.keys())
+ while plan.action_pes:
+ action = plan.action_pes.pop()
+ if i == 0:
+ return solution
+ s_i = RealWorldPlanningProblem.find_previous_state(s_f, reachable_set, i, action)
+ problem = RealWorldPlanningProblem(s_i, s_f, plan.action)
+ angelic_call = RealWorldPlanningProblem.angelic_search(problem, hierarchy,
+ [AngelicNode(s_i, Node(None), [action], [action])])
+ if angelic_call:
+ for x in angelic_call:
+ solution.insert(0, x)
+ else:
+ return None
+ s_f = s_i
+ i -= 1
+ return solution
+
+ def find_previous_state(s_f, reachable_set, i, action):
+ """
+ Given a final state s_f and an action finds a state s_i in reachable_set
+ such that when action is applied to state s_i returns s_f.
+ """
+ s_i = reachable_set[i - 1][0]
+ for state in reachable_set[i - 1]:
+ if s_f in [x for x in RealWorldPlanningProblem.reach_pes(
+ state, AngelicNode(state, None, [action], [action]))[1]]:
+ s_i = state
+ break
+ return s_i
+
+
+def job_shop_problem():
+ """
+ [Figure 11.1] JOB-SHOP-PROBLEM
+
+ A job-shop scheduling problem for assembling two cars,
+ with resource and ordering constraints.
+
+ Example:
+ >>> from planning import *
+ >>> p = job_shop_problem()
+ >>> p.goal_test()
+ False
+ >>> p.act(p.jobs[1][0])
+ >>> p.act(p.jobs[1][1])
+ >>> p.act(p.jobs[1][2])
+ >>> p.act(p.jobs[0][0])
+ >>> p.act(p.jobs[0][1])
+ >>> p.goal_test()
+ False
+ >>> p.act(p.jobs[0][2])
+ >>> p.goal_test()
+ True
+ >>>
+ """
+ resources = {'EngineHoists': 1, 'WheelStations': 2, 'Inspectors': 2, 'LugNuts': 500}
+
+ add_engine1 = HLA('AddEngine1', precond='~Has(C1, E1)', effect='Has(C1, E1)', duration=30, use={'EngineHoists': 1})
+ add_engine2 = HLA('AddEngine2', precond='~Has(C2, E2)', effect='Has(C2, E2)', duration=60, use={'EngineHoists': 1})
+ add_wheels1 = HLA('AddWheels1', precond='~Has(C1, W1)', effect='Has(C1, W1)', duration=30, use={'WheelStations': 1},
+ consume={'LugNuts': 20})
+ add_wheels2 = HLA('AddWheels2', precond='~Has(C2, W2)', effect='Has(C2, W2)', duration=15, use={'WheelStations': 1},
+ consume={'LugNuts': 20})
+ inspect1 = HLA('Inspect1', precond='~Inspected(C1)', effect='Inspected(C1)', duration=10, use={'Inspectors': 1})
+ inspect2 = HLA('Inspect2', precond='~Inspected(C2)', effect='Inspected(C2)', duration=10, use={'Inspectors': 1})
+
+ actions = [add_engine1, add_engine2, add_wheels1, add_wheels2, inspect1, inspect2]
+
+ job_group1 = [add_engine1, add_wheels1, inspect1]
+ job_group2 = [add_engine2, add_wheels2, inspect2]
+
+ return RealWorldPlanningProblem(
+ initial='Car(C1) & Car(C2) & Wheels(W1) & Wheels(W2) & Engine(E2) & Engine(E2) & ~Has(C1, E1) & ~Has(C2, '
+ 'E2) & ~Has(C1, W1) & ~Has(C2, W2) & ~Inspected(C1) & ~Inspected(C2)',
+ goals='Has(C1, W1) & Has(C1, E1) & Inspected(C1) & Has(C2, W2) & Has(C2, E2) & Inspected(C2)',
+ actions=actions,
+ jobs=[job_group1, job_group2],
+ resources=resources)
+
+
+def go_to_sfo():
+ """Go to SFO Problem"""
+
+ go_home_sfo1 = HLA('Go(Home, SFO)', precond='At(Home) & Have(Car)', effect='At(SFO) & ~At(Home)')
+ go_home_sfo2 = HLA('Go(Home, SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home)')
+ drive_home_sfoltp = HLA('Drive(Home, SFOLongTermParking)', precond='At(Home) & Have(Car)',
+ effect='At(SFOLongTermParking) & ~At(Home)')
+ shuttle_sfoltp_sfo = HLA('Shuttle(SFOLongTermParking, SFO)', precond='At(SFOLongTermParking)',
+ effect='At(SFO) & ~At(SFOLongTermParking)')
+ taxi_home_sfo = HLA('Taxi(Home, SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home)')
+
+ actions = [go_home_sfo1, go_home_sfo2, drive_home_sfoltp, shuttle_sfoltp_sfo, taxi_home_sfo]
+
+ library = {
+ 'HLA': [
+ 'Go(Home, SFO)',
+ 'Go(Home, SFO)',
+ 'Drive(Home, SFOLongTermParking)',
+ 'Shuttle(SFOLongTermParking, SFO)',
+ 'Taxi(Home, SFO)'
+ ],
+ 'steps': [
+ ['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'],
+ ['Taxi(Home, SFO)'],
+ [],
+ [],
+ []
+ ],
+ 'precond': [
+ ['At(Home) & Have(Car)'],
+ ['At(Home)'],
+ ['At(Home) & Have(Car)'],
+ ['At(SFOLongTermParking)'],
+ ['At(Home)']
+ ],
+ 'effect': [
+ ['At(SFO) & ~At(Home)'],
+ ['At(SFO) & ~At(Home)'],
+ ['At(SFOLongTermParking) & ~At(Home)'],
+ ['At(SFO) & ~At(SFOLongTermParking)'],
+ ['At(SFO) & ~At(Home)']]}
+
+ return RealWorldPlanningProblem(initial='At(Home)', goals='At(SFO)', actions=actions), library
+
+
+class AngelicHLA(HLA):
+ """
+ Define Actions for the real-world (that may be refined further), under angelic semantics
+ """
+
+ def __init__(self, action, precond, effect, duration=0, consume=None, use=None):
+ super().__init__(action, precond, effect, duration, consume, use)
+
+ def convert(self, clauses):
+ """
+ Converts strings into Exprs
+ An HLA with angelic semantics can achieve the effects of simple HLA's (add / remove a variable)
+ and furthermore can have following effects on the variables:
+ Possibly add variable ( $+ )
+ Possibly remove variable ( $- )
+ Possibly add or remove a variable ( $$ )
+
+ Overrides HLA.convert function
+ """
+ lib = {'~': 'Not',
+ '$+': 'PosYes',
+ '$-': 'PosNot',
+ '$$': 'PosYesNot'}
+
+ if isinstance(clauses, Expr):
+ clauses = conjuncts(clauses)
+ for i in range(len(clauses)):
+ for ch in lib.keys():
+ if clauses[i].op == ch:
+ clauses[i] = expr(lib[ch] + str(clauses[i].args[0]))
+
+ elif isinstance(clauses, str):
+ for ch in lib.keys():
+ clauses = clauses.replace(ch, lib[ch])
+ if len(clauses) > 0:
+ clauses = expr(clauses)
+
+ try:
+ clauses = conjuncts(clauses)
+ except AttributeError:
+ pass
+
+ return clauses
+
+ def angelic_action(self):
+ """
+ Converts a high level action (HLA) with angelic semantics into all of its corresponding high level actions (HLA).
+ An HLA with angelic semantics can achieve the effects of simple HLA's (add / remove a variable)
+ and furthermore can have following effects for each variable:
+
+ Possibly add variable ( $+: 'PosYes' ) --> corresponds to two HLAs:
+ HLA_1: add variable
+ HLA_2: leave variable unchanged
+
+ Possibly remove variable ( $-: 'PosNot' ) --> corresponds to two HLAs:
+ HLA_1: remove variable
+ HLA_2: leave variable unchanged
+
+ Possibly add / remove a variable ( $$: 'PosYesNot' ) --> corresponds to three HLAs:
+ HLA_1: add variable
+ HLA_2: remove variable
+ HLA_3: leave variable unchanged
+
+
+ example: the angelic action with effects possibly add A and possibly add or remove B corresponds to the
+ following 6 effects of HLAs:
+
+
+ '$+A & $$B': HLA_1: 'A & B' (add A and add B)
+ HLA_2: 'A & ~B' (add A and remove B)
+ HLA_3: 'A' (add A)
+ HLA_4: 'B' (add B)
+ HLA_5: '~B' (remove B)
+ HLA_6: ' ' (no effect)
+
+ """
+
+ effects = [[]]
+ for clause in self.effect:
+ (n, w) = AngelicHLA.compute_parameters(clause)
+ effects = effects * n # create n copies of effects
+ it = range(1)
+ if len(effects) != 0:
+ # split effects into n sublists (separate n copies created in compute_parameters)
+ it = range(len(effects) // n)
+ for i in it:
+ if effects[i]:
+ if clause.args:
+ effects[i] = expr(str(effects[i]) + '&' + str(
+ Expr(clause.op[w:], clause.args[0]))) # make changes in the ith part of effects
+ if n == 3:
+ effects[i + len(effects) // 3] = expr(
+ str(effects[i + len(effects) // 3]) + '&' + str(Expr(clause.op[6:], clause.args[0])))
+ else:
+ effects[i] = expr(
+ str(effects[i]) + '&' + str(expr(clause.op[w:]))) # make changes in the ith part of effects
+ if n == 3:
+ effects[i + len(effects) // 3] = expr(
+ str(effects[i + len(effects) // 3]) + '&' + str(expr(clause.op[6:])))
+
+ else:
+ if clause.args:
+ effects[i] = Expr(clause.op[w:], clause.args[0]) # make changes in the ith part of effects
+ if n == 3:
+ effects[i + len(effects) // 3] = Expr(clause.op[6:], clause.args[0])
+
+ else:
+ effects[i] = expr(clause.op[w:]) # make changes in the ith part of effects
+ if n == 3:
+ effects[i + len(effects) // 3] = expr(clause.op[6:])
+
+ return [HLA(Expr(self.name, self.args), self.precond, effects[i]) for i in range(len(effects))]
+
+ def compute_parameters(clause):
+ """
+ computes n,w
+
+ n = number of HLA effects that the angelic HLA corresponds to
+ w = length of representation of angelic HLA effect
+
+ n = 1, if effect is add
+ n = 1, if effect is remove
+ n = 2, if effect is possibly add
+ n = 2, if effect is possibly remove
+ n = 3, if effect is possibly add or remove
+
+ """
+ if clause.op[:9] == 'PosYesNot':
+ # possibly add/remove variable: three possible effects for the variable
+ n = 3
+ w = 9
+ elif clause.op[:6] == 'PosYes': # possibly add variable: two possible effects for the variable
+ n = 2
+ w = 6
+ elif clause.op[:6] == 'PosNot': # possibly remove variable: two possible effects for the variable
+ n = 2
+ w = 3 # We want to keep 'Not' from 'PosNot' when adding action
+ else: # variable or ~variable
+ n = 1
+ w = 0
+ return n, w
+
+
+class AngelicNode(Node):
+ """
+ Extends the class Node.
+ self.action: contains the optimistic description of an angelic HLA
+ self.action_pes: contains the pessimistic description of an angelic HLA
+ """
+
+ def __init__(self, state, parent=None, action_opt=None, action_pes=None, path_cost=0):
+ super().__init__(state, parent, action_opt, path_cost)
+ self.action_pes = action_pes
diff --git a/planning_angelic_search.ipynb b/planning_angelic_search.ipynb
new file mode 100644
index 000000000..71408e1d9
--- /dev/null
+++ b/planning_angelic_search.ipynb
@@ -0,0 +1,638 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Angelic Search \n",
+ "\n",
+ "Search using angelic semantics (is a hierarchical search), where the agent chooses the implementation of the HLA's. \n",
+ "The algorithms input is: problem, hierarchy and initialPlan\n",
+ "- problem is of type Problem \n",
+ "- hierarchy is a dictionary consisting of all the actions. \n",
+ "- initialPlan is an approximate description(optimistic and pessimistic) of the agents choices for the implementation. \n",
+ " initialPlan contains a sequence of HLA's with angelic semantics"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from planning import * \n",
+ "from notebook import psource"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The Angelic search algorithm consists of three parts. \n",
+ "- Search using angelic semantics\n",
+ "- Decompose\n",
+ "- a search in the space of refinements, in a similar way with hierarchical search\n",
+ "\n",
+ "### Searching using angelic semantics\n",
+ "- Find the reachable set (optimistic and pessimistic) of the sequence of angelic HLA in initialPlan\n",
+ " - If the optimistic reachable set doesn't intersect the goal, then there is no solution\n",
+ " - If the pessimistic reachable set intersects the goal, then we call decompose, in order to find the sequence of actions that lead us to the goal. \n",
+ " - If the optimistic reachable set intersects the goal, but the pessimistic doesn't we do some further refinements, in order to see if there is a sequence of actions that achieves the goal. \n",
+ " \n",
+ "### Search in space of refinements\n",
+ "- Create a search tree, that has root the action and children it's refinements\n",
+ "- Extend frontier by adding each refinement, so that we keep looping till we find all primitive actions\n",
+ "- If we achieve that we return the path of the solution (search tree), else there is no solution and we return None.\n",
+ "\n",
+ " \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def angelic_search ( problem , hierarchy , initialPlan ): \n",
+ " """ \n",
+ "\t[Figure 11.8] A hierarchical planning algorithm that uses angelic semantics to identify and \n",
+ "\tcommit to high-level plans that work while avoiding high-level plans that don’t. \n",
+ "\tThe predicate MAKING-PROGRESS checks to make sure that we aren’t stuck in an infinite regression \n",
+ "\tof refinements. \n",
+ "\tAt top level, call ANGELIC -SEARCH with [Act ] as the initialPlan . \n",
+ "\n",
+ " initialPlan contains a sequence of HLA's with angelic semantics \n",
+ "\n",
+ " The possible effects of an angelic HLA in initialPlan are : \n",
+ " ~ : effect remove \n",
+ " $+: effect possibly add \n",
+ " $-: effect possibly remove \n",
+ " $$: possibly add or remove \n",
+ "\t""" \n",
+ " frontier = deque ( initialPlan ) \n",
+ " while True : \n",
+ " if not frontier : \n",
+ " return None \n",
+ " plan = frontier . popleft () # sequence of HLA/Angelic HLA's \n",
+ " opt_reachable_set = Problem . reach_opt ( problem . init , plan ) \n",
+ " pes_reachable_set = Problem . reach_pes ( problem . init , plan ) \n",
+ " if problem . intersects_goal ( opt_reachable_set ): \n",
+ " if Problem . is_primitive ( plan , hierarchy ): \n",
+ " return ([ x for x in plan . action ]) \n",
+ " guaranteed = problem . intersects_goal ( pes_reachable_set ) \n",
+ " if guaranteed and Problem . making_progress ( plan , initialPlan ): \n",
+ " final_state = guaranteed [ 0 ] # any element of guaranteed \n",
+ " #print('decompose') \n",
+ " return Problem . decompose ( hierarchy , problem , plan , final_state , pes_reachable_set ) \n",
+ " ( hla , index ) = Problem . find_hla ( plan , hierarchy ) # there should be at least one HLA/Angelic_HLA, otherwise plan would be primitive. \n",
+ " prefix = plan . action [: index ] \n",
+ " suffix = plan . action [ index + 1 :] \n",
+ " outcome = Problem ( Problem . result ( problem . init , prefix ), problem . goals , problem . actions ) \n",
+ " for sequence in Problem . refinements ( hla , outcome , hierarchy ): # find refinements \n",
+ " frontier . append ( Angelic_Node ( outcome . init , plan , prefix + sequence + suffix , prefix + sequence + suffix )) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Problem.angelic_search)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "### Decompose \n",
+ "- Finds recursively the sequence of states and actions that lead us from initial state to goal.\n",
+ "- For each of the above actions we find their refinements,if they are not primitive, by calling the angelic_search function. \n",
+ " If there are not refinements return None\n",
+ " \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def decompose ( hierarchy , s_0 , plan , s_f , reachable_set ): \n",
+ " solution = [] \n",
+ " i = max ( reachable_set . keys ()) \n",
+ " while plan . action_pes : \n",
+ " action = plan . action_pes . pop () \n",
+ " if ( i == 0 ): \n",
+ " return solution \n",
+ " s_i = Problem . find_previous_state ( s_f , reachable_set , i , action ) \n",
+ " problem = Problem ( s_i , s_f , plan . action ) \n",
+ " angelic_call = Problem . angelic_search ( problem , hierarchy , [ Angelic_Node ( s_i , Node ( None ), [ action ],[ action ])]) \n",
+ " if angelic_call : \n",
+ " for x in angelic_call : \n",
+ " solution . insert ( 0 , x ) \n",
+ " else : \n",
+ " return None \n",
+ " s_f = s_i \n",
+ " i -= 1 \n",
+ " return solution \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Problem.decompose)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example\n",
+ "\n",
+ "Suppose that somebody wants to get to the airport. \n",
+ "The possible ways to do so is either get a taxi, or drive to the airport. \n",
+ "Those two actions have some preconditions and some effects. \n",
+ "If you get the taxi, you need to have cash, whereas if you drive you need to have a car. \n",
+ "Thus we define the following hierarchy of possible actions.\n",
+ "\n",
+ "##### hierarchy"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "library = {\n",
+ " 'HLA': ['Go(Home,SFO)', 'Go(Home,SFO)', 'Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)', 'Taxi(Home, SFO)'],\n",
+ " 'steps': [['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'], ['Taxi(Home, SFO)'], [], [], []],\n",
+ " 'precond': [['At(Home) & Have(Car)'], ['At(Home)'], ['At(Home) & Have(Car)'], ['At(SFOLongTermParking)'], ['At(Home)']],\n",
+ " 'effect': [['At(SFO) & ~At(Home)'], ['At(SFO) & ~At(Home) & ~Have(Cash)'], ['At(SFOLongTermParking) & ~At(Home)'], ['At(SFO) & ~At(LongTermParking)'], ['At(SFO) & ~At(Home) & ~Have(Cash)']] }\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "the possible actions are the following:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "go_SFO = HLA('Go(Home,SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home)')\n",
+ "taxi_SFO = HLA('Taxi(Home,SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home) & ~Have(Cash)')\n",
+ "drive_SFOLongTermParking = HLA('Drive(Home, SFOLongTermParking)', 'At(Home) & Have(Car)','At(SFOLongTermParking) & ~At(Home)' )\n",
+ "shuttle_SFO = HLA('Shuttle(SFOLongTermParking, SFO)', 'At(SFOLongTermParking)', 'At(SFO) & ~At(LongTermParking)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Suppose that (our preconditionds are that) we are Home and we have cash and car and our goal is to get to SFO and maintain our cash, and our possible actions are the above. \n",
+ "##### Then our problem is: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "prob = Problem('At(Home) & Have(Cash) & Have(Car)', 'At(SFO) & Have(Cash)', [go_SFO, taxi_SFO, drive_SFOLongTermParking,shuttle_SFO])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "An agent gives us some approximate information about the plan we will follow: \n",
+ "(initialPlan is an Angelic Node, where: \n",
+ "- state is the initial state of the problem, \n",
+ "- parent is None \n",
+ "- action: is a list of actions (Angelic HLA's) with the optimistic estimators of effects and \n",
+ "- action_pes: is a list of actions (Angelic HLA's) with the pessimistic approximations of the effects\n",
+ "##### InitialPlan"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "angelic_opt_description = Angelic_HLA('Go(Home, SFO)', precond = 'At(Home)', effect ='$+At(SFO) & $-At(Home)' ) \n",
+ "angelic_pes_description = Angelic_HLA('Go(Home, SFO)', precond = 'At(Home)', effect ='$+At(SFO) & ~At(Home)' )\n",
+ "\n",
+ "initialPlan = [Angelic_Node(prob.init, None, [angelic_opt_description], [angelic_pes_description])] \n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We want to find the optimistic and pessimistic reachable set of initialPlan when applied to the problem:\n",
+ "##### Optimistic/Pessimistic reachable set"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[At(Home), Have(Cash), Have(Car)], [Have(Cash), Have(Car), At(SFO), NotAt(Home)], [Have(Cash), Have(Car), NotAt(Home)], [At(Home), Have(Cash), Have(Car), At(SFO)], [At(Home), Have(Cash), Have(Car)]] \n",
+ "\n",
+ "[[At(Home), Have(Cash), Have(Car)], [Have(Cash), Have(Car), At(SFO), NotAt(Home)], [Have(Cash), Have(Car), NotAt(Home)]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "opt_reachable_set = Problem.reach_opt(prob.init, initialPlan[0])\n",
+ "pes_reachable_set = Problem.reach_pes(prob.init, initialPlan[0])\n",
+ "print([x for y in opt_reachable_set.keys() for x in opt_reachable_set[y]], '\\n')\n",
+ "print([x for y in pes_reachable_set.keys() for x in pes_reachable_set[y]])\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##### Refinements"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[HLA(Drive(Home, SFOLongTermParking)), HLA(Shuttle(SFOLongTermParking, SFO))]\n",
+ "[{'duration': 0, 'effect': [At(SFOLongTermParking), NotAt(Home)], 'args': (Home, SFOLongTermParking), 'uses': {}, 'consumes': {}, 'name': 'Drive', 'completed': False, 'precond': [At(Home), Have(Car)]}, {'duration': 0, 'effect': [At(SFO), NotAt(LongTermParking)], 'args': (SFOLongTermParking, SFO), 'uses': {}, 'consumes': {}, 'name': 'Shuttle', 'completed': False, 'precond': [At(SFOLongTermParking)]}] \n",
+ "\n",
+ "[HLA(Taxi(Home, SFO))]\n",
+ "[{'duration': 0, 'effect': [At(SFO), NotAt(Home), NotHave(Cash)], 'args': (Home, SFO), 'uses': {}, 'consumes': {}, 'name': 'Taxi', 'completed': False, 'precond': [At(Home)]}] \n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "for sequence in Problem.refinements(go_SFO, prob, library):\n",
+ " print (sequence)\n",
+ " print([x.__dict__ for x in sequence ], '\\n')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Run the angelic search\n",
+ "##### Top level call"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[HLA(Drive(Home, SFOLongTermParking)), HLA(Shuttle(SFOLongTermParking, SFO))] \n",
+ "\n",
+ "[{'duration': 0, 'effect': [At(SFOLongTermParking), NotAt(Home)], 'args': (Home, SFOLongTermParking), 'uses': {}, 'consumes': {}, 'name': 'Drive', 'completed': False, 'precond': [At(Home), Have(Car)]}, {'duration': 0, 'effect': [At(SFO), NotAt(LongTermParking)], 'args': (SFOLongTermParking, SFO), 'uses': {}, 'consumes': {}, 'name': 'Shuttle', 'completed': False, 'precond': [At(SFOLongTermParking)]}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "plan= Problem.angelic_search(prob, library, initialPlan)\n",
+ "print (plan, '\\n')\n",
+ "print ([x.__dict__ for x in plan])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example 2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "library_2 = {\n",
+ " 'HLA': ['Go(Home,SFO)', 'Go(Home,SFO)', 'Bus(Home, MetroStop)', 'Metro(MetroStop, SFO)' , 'Metro(MetroStop, SFO)', 'Metro1(MetroStop, SFO)', 'Metro2(MetroStop, SFO)' ,'Taxi(Home, SFO)'],\n",
+ " 'steps': [['Bus(Home, MetroStop)', 'Metro(MetroStop, SFO)'], ['Taxi(Home, SFO)'], [], ['Metro1(MetroStop, SFO)'], ['Metro2(MetroStop, SFO)'],[],[],[]],\n",
+ " 'precond': [['At(Home)'], ['At(Home)'], ['At(Home)'], ['At(MetroStop)'], ['At(MetroStop)'],['At(MetroStop)'], ['At(MetroStop)'] ,['At(Home) & Have(Cash)']],\n",
+ " 'effect': [['At(SFO) & ~At(Home)'], ['At(SFO) & ~At(Home) & ~Have(Cash)'], ['At(MetroStop) & ~At(Home)'], ['At(SFO) & ~At(MetroStop)'], ['At(SFO) & ~At(MetroStop)'], ['At(SFO) & ~At(MetroStop)'] , ['At(SFO) & ~At(MetroStop)'] ,['At(SFO) & ~At(Home) & ~Have(Cash)']] \n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[HLA(Bus(Home, MetroStop)), HLA(Metro1(MetroStop, SFO))] \n",
+ "\n",
+ "[{'duration': 0, 'effect': [At(MetroStop), NotAt(Home)], 'args': (Home, MetroStop), 'uses': {}, 'consumes': {}, 'name': 'Bus', 'completed': False, 'precond': [At(Home)]}, {'duration': 0, 'effect': [At(SFO), NotAt(MetroStop)], 'args': (MetroStop, SFO), 'uses': {}, 'consumes': {}, 'name': 'Metro1', 'completed': False, 'precond': [At(MetroStop)]}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "plan_2 = Problem.angelic_search(prob, library_2, initialPlan)\n",
+ "print(plan_2, '\\n')\n",
+ "print([x.__dict__ for x in plan_2])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example 3 \n",
+ "\n",
+ "Sometimes there is no plan that achieves the goal!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "library_3 = {\n",
+ " 'HLA': ['Shuttle(SFOLongTermParking, SFO)', 'Go(Home, SFOLongTermParking)', 'Taxi(Home, SFOLongTermParking)', 'Drive(Home, SFOLongTermParking)', 'Drive(SFOLongTermParking, Home)', 'Get(Cash)', 'Go(Home, ATM)'],\n",
+ " 'steps': [['Get(Cash)', 'Go(Home, SFOLongTermParking)'], ['Taxi(Home, SFOLongTermParking)'], [], [], [], ['Drive(SFOLongTermParking, Home)', 'Go(Home, ATM)'], []],\n",
+ " 'precond': [['At(SFOLongTermParking)'], ['At(Home)'], ['At(Home) & Have(Cash)'], ['At(Home)'], ['At(SFOLongTermParking)'], ['At(SFOLongTermParking)'], ['At(Home)']],\n",
+ " 'effect': [['At(SFO)'], ['At(SFO)'], ['At(SFOLongTermParking) & ~Have(Cash)'], ['At(SFOLongTermParking)'] ,['At(Home) & ~At(SFOLongTermParking)'], ['At(Home) & Have(Cash)'], ['Have(Cash)'] ]\n",
+ " }\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "shuttle_SFO = HLA('Shuttle(SFOLongTermParking, SFO)', 'Have(Cash) & At(SFOLongTermParking)', 'At(SFO)')\n",
+ "prob_3 = Problem('At(SFOLongTermParking) & Have(Cash)', 'At(SFO) & Have(Cash)', [shuttle_SFO])\n",
+ "# optimistic/pessimistic descriptions\n",
+ "angelic_opt_description = Angelic_HLA('Shuttle(SFOLongTermParking, SFO)', precond = 'At(SFOLongTermParking)', effect ='$+At(SFO) & $-At(SFOLongTermParking)' ) \n",
+ "angelic_pes_description = Angelic_HLA('Shuttle(SFOLongTermParking, SFO)', precond = 'At(SFOLongTermParking)', effect ='$+At(SFO) & ~At(SFOLongTermParking)' ) \n",
+ "# initial Plan\n",
+ "initialPlan_3 = [Angelic_Node(prob.init, None, [angelic_opt_description], [angelic_pes_description])] "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "None\n"
+ ]
+ }
+ ],
+ "source": [
+ "plan_3 = prob_3.angelic_search(library_3, initialPlan_3)\n",
+ "print(plan_3)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/planning_graphPlan.ipynb b/planning_graphPlan.ipynb
new file mode 100644
index 000000000..bffecb937
--- /dev/null
+++ b/planning_graphPlan.ipynb
@@ -0,0 +1,1066 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## SOLVING PLANNING PROBLEMS\n",
+ "----\n",
+ "### GRAPHPLAN\n",
+ " \n",
+ "The GraphPlan algorithm is a popular method of solving classical planning problems.\n",
+ "Before we get into the details of the algorithm, let's look at a special data structure called **planning graph**, used to give better heuristic estimates and plays a key role in the GraphPlan algorithm."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Planning Graph\n",
+ "A planning graph is a directed graph organized into levels. \n",
+ "Each level contains information about the current state of the knowledge base and the possible state-action links to and from that level.\n",
+ "The first level contains the initial state with nodes representing each fluent that holds in that level.\n",
+ "This level has state-action links linking each state to valid actions in that state.\n",
+ "Each action is linked to all its preconditions and its effect states.\n",
+ "Based on these effects, the next level is constructed.\n",
+ "The next level contains similarly structured information about the next state.\n",
+ "In this way, the graph is expanded using state-action links till we reach a state where all the required goals hold true simultaneously.\n",
+ "We can say that we have reached our goal if none of the goal states in the current level are mutually exclusive.\n",
+ "This will be explained in detail later.\n",
+ " \n",
+ "Planning graphs only work for propositional planning problems, hence we need to eliminate all variables by generating all possible substitutions.\n",
+ " \n",
+ "For example, the planning graph of the `have_cake_and_eat_cake_too` problem might look like this\n",
+ "\n",
+ " \n",
+ "The black lines indicate links between states and actions.\n",
+ " \n",
+ "In every planning problem, we are allowed to carry out the `no-op` action, ie, we can choose no action for a particular state.\n",
+ "These are called 'Persistence' actions and are represented in the graph by the small square boxes.\n",
+ "In technical terms, a persistence action has effects same as its preconditions.\n",
+ "This enables us to carry a state to the next level.\n",
+ " \n",
+ " \n",
+ "The gray lines indicate mutual exclusivity.\n",
+ "This means that the actions connected bya gray line cannot be taken together.\n",
+ "Mutual exclusivity (mutex) occurs in the following cases:\n",
+ "1. **Inconsistent effects**: One action negates the effect of the other. For example, _Eat(Cake)_ and the persistence of _Have(Cake)_ have inconsistent effects because they disagree on the effect _Have(Cake)_\n",
+ "2. **Interference**: One of the effects of an action is the negation of a precondition of the other. For example, _Eat(Cake)_ interferes with the persistence of _Have(Cake)_ by negating its precondition.\n",
+ "3. **Competing needs**: One of the preconditions of one action is mutually exclusive with a precondition of the other. For example, _Bake(Cake)_ and _Eat(Cake)_ are mutex because they compete on the value of the _Have(Cake)_ precondition."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the module, planning graphs have been implemented using two classes, `Level` which stores data for a particular level and `Graph` which connects multiple levels together.\n",
+ "Let's look at the `Level` class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from planning import *\n",
+ "from notebook import psource"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class Level : \n",
+ " """ \n",
+ " Contains the state of the planning problem \n",
+ " and exhaustive list of actions which use the \n",
+ " states as pre-condition. \n",
+ " """ \n",
+ "\n",
+ " def __init__ ( self , kb ): \n",
+ " """Initializes variables to hold state and action details of a level""" \n",
+ "\n",
+ " self . kb = kb \n",
+ " # current state \n",
+ " self . current_state = kb . clauses \n",
+ " # current action to state link \n",
+ " self . current_action_links = {} \n",
+ " # current state to action link \n",
+ " self . current_state_links = {} \n",
+ " # current action to next state link \n",
+ " self . next_action_links = {} \n",
+ " # next state to current action link \n",
+ " self . next_state_links = {} \n",
+ " # mutually exclusive actions \n",
+ " self . mutex = [] \n",
+ "\n",
+ " def __call__ ( self , actions , objects ): \n",
+ " self . build ( actions , objects ) \n",
+ " self . find_mutex () \n",
+ "\n",
+ " def separate ( self , e ): \n",
+ " """Separates an iterable of elements into positive and negative parts""" \n",
+ "\n",
+ " positive = [] \n",
+ " negative = [] \n",
+ " for clause in e : \n",
+ " if clause . op [: 3 ] == 'Not' : \n",
+ " negative . append ( clause ) \n",
+ " else : \n",
+ " positive . append ( clause ) \n",
+ " return positive , negative \n",
+ "\n",
+ " def find_mutex ( self ): \n",
+ " """Finds mutually exclusive actions""" \n",
+ "\n",
+ " # Inconsistent effects \n",
+ " pos_nsl , neg_nsl = self . separate ( self . next_state_links ) \n",
+ "\n",
+ " for negeff in neg_nsl : \n",
+ " new_negeff = Expr ( negeff . op [ 3 :], * negeff . args ) \n",
+ " for poseff in pos_nsl : \n",
+ " if new_negeff == poseff : \n",
+ " for a in self . next_state_links [ poseff ]: \n",
+ " for b in self . next_state_links [ negeff ]: \n",
+ " if { a , b } not in self . mutex : \n",
+ " self . mutex . append ({ a , b }) \n",
+ "\n",
+ " # Interference will be calculated with the last step \n",
+ " pos_csl , neg_csl = self . separate ( self . current_state_links ) \n",
+ "\n",
+ " # Competing needs \n",
+ " for posprecond in pos_csl : \n",
+ " for negprecond in neg_csl : \n",
+ " new_negprecond = Expr ( negprecond . op [ 3 :], * negprecond . args ) \n",
+ " if new_negprecond == posprecond : \n",
+ " for a in self . current_state_links [ posprecond ]: \n",
+ " for b in self . current_state_links [ negprecond ]: \n",
+ " if { a , b } not in self . mutex : \n",
+ " self . mutex . append ({ a , b }) \n",
+ "\n",
+ " # Inconsistent support \n",
+ " state_mutex = [] \n",
+ " for pair in self . mutex : \n",
+ " next_state_0 = self . next_action_links [ list ( pair )[ 0 ]] \n",
+ " if len ( pair ) == 2 : \n",
+ " next_state_1 = self . next_action_links [ list ( pair )[ 1 ]] \n",
+ " else : \n",
+ " next_state_1 = self . next_action_links [ list ( pair )[ 0 ]] \n",
+ " if ( len ( next_state_0 ) == 1 ) and ( len ( next_state_1 ) == 1 ): \n",
+ " state_mutex . append ({ next_state_0 [ 0 ], next_state_1 [ 0 ]}) \n",
+ " \n",
+ " self . mutex = self . mutex + state_mutex \n",
+ "\n",
+ " def build ( self , actions , objects ): \n",
+ " """Populates the lists and dictionaries containing the state action dependencies""" \n",
+ "\n",
+ " for clause in self . current_state : \n",
+ " p_expr = Expr ( 'P' + clause . op , * clause . args ) \n",
+ " self . current_action_links [ p_expr ] = [ clause ] \n",
+ " self . next_action_links [ p_expr ] = [ clause ] \n",
+ " self . current_state_links [ clause ] = [ p_expr ] \n",
+ " self . next_state_links [ clause ] = [ p_expr ] \n",
+ "\n",
+ " for a in actions : \n",
+ " num_args = len ( a . args ) \n",
+ " possible_args = tuple ( itertools . permutations ( objects , num_args )) \n",
+ "\n",
+ " for arg in possible_args : \n",
+ " if a . check_precond ( self . kb , arg ): \n",
+ " for num , symbol in enumerate ( a . args ): \n",
+ " if not symbol . op . islower (): \n",
+ " arg = list ( arg ) \n",
+ " arg [ num ] = symbol \n",
+ " arg = tuple ( arg ) \n",
+ "\n",
+ " new_action = a . substitute ( Expr ( a . name , * a . args ), arg ) \n",
+ " self . current_action_links [ new_action ] = [] \n",
+ "\n",
+ " for clause in a . precond : \n",
+ " new_clause = a . substitute ( clause , arg ) \n",
+ " self . current_action_links [ new_action ] . append ( new_clause ) \n",
+ " if new_clause in self . current_state_links : \n",
+ " self . current_state_links [ new_clause ] . append ( new_action ) \n",
+ " else : \n",
+ " self . current_state_links [ new_clause ] = [ new_action ] \n",
+ " \n",
+ " self . next_action_links [ new_action ] = [] \n",
+ " for clause in a . effect : \n",
+ " new_clause = a . substitute ( clause , arg ) \n",
+ "\n",
+ " self . next_action_links [ new_action ] . append ( new_clause ) \n",
+ " if new_clause in self . next_state_links : \n",
+ " self . next_state_links [ new_clause ] . append ( new_action ) \n",
+ " else : \n",
+ " self . next_state_links [ new_clause ] = [ new_action ] \n",
+ "\n",
+ " def perform_actions ( self ): \n",
+ " """Performs the necessary actions and returns a new Level""" \n",
+ "\n",
+ " new_kb = FolKB ( list ( set ( self . next_state_links . keys ()))) \n",
+ " return Level ( new_kb ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Level)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Each level stores the following data\n",
+ "1. The current state of the level in `current_state`\n",
+ "2. Links from an action to its preconditions in `current_action_links`\n",
+ "3. Links from a state to the possible actions in that state in `current_state_links`\n",
+ "4. Links from each action to its effects in `next_action_links`\n",
+ "5. Links from each possible next state from each action in `next_state_links`. This stores the same information as the `current_action_links` of the next level.\n",
+ "6. Mutex links in `mutex`.\n",
+ " \n",
+ " \n",
+ "The `find_mutex` method finds the mutex links according to the points given above.\n",
+ " \n",
+ "The `build` method populates the data structures storing the state and action information.\n",
+ "Persistence actions for each clause in the current state are also defined here. \n",
+ "The newly created persistence action has the same name as its state, prefixed with a 'P'."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's now look at the `Graph` class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class Graph : \n",
+ " """ \n",
+ " Contains levels of state and actions \n",
+ " Used in graph planning algorithm to extract a solution \n",
+ " """ \n",
+ "\n",
+ " def __init__ ( self , planningproblem ): \n",
+ " self . planningproblem = planningproblem \n",
+ " self . kb = FolKB ( planningproblem . init ) \n",
+ " self . levels = [ Level ( self . kb )] \n",
+ " self . objects = set ( arg for clause in self . kb . clauses for arg in clause . args ) \n",
+ "\n",
+ " def __call__ ( self ): \n",
+ " self . expand_graph () \n",
+ "\n",
+ " def expand_graph ( self ): \n",
+ " """Expands the graph by a level""" \n",
+ "\n",
+ " last_level = self . levels [ - 1 ] \n",
+ " last_level ( self . planningproblem . actions , self . objects ) \n",
+ " self . levels . append ( last_level . perform_actions ()) \n",
+ "\n",
+ " def non_mutex_goals ( self , goals , index ): \n",
+ " """Checks whether the goals are mutually exclusive""" \n",
+ "\n",
+ " goal_perm = itertools . combinations ( goals , 2 ) \n",
+ " for g in goal_perm : \n",
+ " if set ( g ) in self . levels [ index ] . mutex : \n",
+ " return False \n",
+ " return True \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Graph)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The class stores a problem definition in `pddl`, \n",
+ "a knowledge base in `kb`, \n",
+ "a list of `Level` objects in `levels` and \n",
+ "all the possible arguments found in the initial state of the problem in `objects`.\n",
+ " \n",
+ "The `expand_graph` method generates a new level of the graph.\n",
+ "This method is invoked when the goal conditions haven't been met in the current level or the actions that lead to it are mutually exclusive.\n",
+ "The `non_mutex_goals` method checks whether the goals in the current state are mutually exclusive.\n",
+ " \n",
+ " \n",
+ "Using these two classes, we can define a planning graph which can either be used to provide reliable heuristics for planning problems or used in the `GraphPlan` algorithm.\n",
+ " \n",
+ "Let's have a look at the `GraphPlan` class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class GraphPlan : \n",
+ " """ \n",
+ " Class for formulation GraphPlan algorithm \n",
+ " Constructs a graph of state and action space \n",
+ " Returns solution for the planning problem \n",
+ " """ \n",
+ "\n",
+ " def __init__ ( self , planningproblem ): \n",
+ " self . graph = Graph ( planningproblem ) \n",
+ " self . nogoods = [] \n",
+ " self . solution = [] \n",
+ "\n",
+ " def check_leveloff ( self ): \n",
+ " """Checks if the graph has levelled off""" \n",
+ "\n",
+ " check = ( set ( self . graph . levels [ - 1 ] . current_state ) == set ( self . graph . levels [ - 2 ] . current_state )) \n",
+ "\n",
+ " if check : \n",
+ " return True \n",
+ "\n",
+ " def extract_solution ( self , goals , index ): \n",
+ " """Extracts the solution""" \n",
+ "\n",
+ " level = self . graph . levels [ index ] \n",
+ " if not self . graph . non_mutex_goals ( goals , index ): \n",
+ " self . nogoods . append (( level , goals )) \n",
+ " return \n",
+ "\n",
+ " level = self . graph . levels [ index - 1 ] \n",
+ "\n",
+ " # Create all combinations of actions that satisfy the goal \n",
+ " actions = [] \n",
+ " for goal in goals : \n",
+ " actions . append ( level . next_state_links [ goal ]) \n",
+ "\n",
+ " all_actions = list ( itertools . product ( * actions )) \n",
+ "\n",
+ " # Filter out non-mutex actions \n",
+ " non_mutex_actions = [] \n",
+ " for action_tuple in all_actions : \n",
+ " action_pairs = itertools . combinations ( list ( set ( action_tuple )), 2 ) \n",
+ " non_mutex_actions . append ( list ( set ( action_tuple ))) \n",
+ " for pair in action_pairs : \n",
+ " if set ( pair ) in level . mutex : \n",
+ " non_mutex_actions . pop ( - 1 ) \n",
+ " break \n",
+ " \n",
+ "\n",
+ " # Recursion \n",
+ " for action_list in non_mutex_actions : \n",
+ " if [ action_list , index ] not in self . solution : \n",
+ " self . solution . append ([ action_list , index ]) \n",
+ "\n",
+ " new_goals = [] \n",
+ " for act in set ( action_list ): \n",
+ " if act in level . current_action_links : \n",
+ " new_goals = new_goals + level . current_action_links [ act ] \n",
+ "\n",
+ " if abs ( index ) + 1 == len ( self . graph . levels ): \n",
+ " return \n",
+ " elif ( level , new_goals ) in self . nogoods : \n",
+ " return \n",
+ " else : \n",
+ " self . extract_solution ( new_goals , index - 1 ) \n",
+ "\n",
+ " # Level-Order multiple solutions \n",
+ " solution = [] \n",
+ " for item in self . solution : \n",
+ " if item [ 1 ] == - 1 : \n",
+ " solution . append ([]) \n",
+ " solution [ - 1 ] . append ( item [ 0 ]) \n",
+ " else : \n",
+ " solution [ - 1 ] . append ( item [ 0 ]) \n",
+ "\n",
+ " for num , item in enumerate ( solution ): \n",
+ " item . reverse () \n",
+ " solution [ num ] = item \n",
+ "\n",
+ " return solution \n",
+ "\n",
+ " def goal_test ( self , kb ): \n",
+ " return all ( kb . ask ( q ) is not False for q in self . graph . planningproblem . goals ) \n",
+ "\n",
+ " def execute ( self ): \n",
+ " """Executes the GraphPlan algorithm for the given problem""" \n",
+ "\n",
+ " while True : \n",
+ " self . graph . expand_graph () \n",
+ " if ( self . goal_test ( self . graph . levels [ - 1 ] . kb ) and self . graph . non_mutex_goals ( self . graph . planningproblem . goals , - 1 )): \n",
+ " solution = self . extract_solution ( self . graph . planningproblem . goals , - 1 ) \n",
+ " if solution : \n",
+ " return solution \n",
+ " \n",
+ " if len ( self . graph . levels ) >= 2 and self . check_leveloff (): \n",
+ " return None \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(GraphPlan)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Given a planning problem defined as a PlanningProblem, `GraphPlan` creates a planning graph stored in `graph` and expands it till it reaches a state where all its required goals are present simultaneously without mutual exclusivity.\n",
+ " \n",
+ "Once a goal is found, `extract_solution` is called.\n",
+ "This method recursively finds the path to a solution given a planning graph.\n",
+ "In the case where `extract_solution` fails to find a solution for a set of goals as a given level, we record the `(level, goals)` pair as a **no-good**.\n",
+ "Whenever `extract_solution` is called again with the same level and goals, we can find the recorded no-good and immediately return failure rather than searching again. \n",
+ "No-goods are also used in the termination test.\n",
+ " \n",
+ "The `check_leveloff` method checks if the planning graph for the problem has **levelled-off**, ie, it has the same states, actions and mutex pairs as the previous level.\n",
+ "If the graph has already levelled off and we haven't found a solution, there is no point expanding the graph, as it won't lead to anything new.\n",
+ "In such a case, we can declare that the planning problem is unsolvable with the given constraints.\n",
+ " \n",
+ " \n",
+ "To summarize, the `GraphPlan` algorithm calls `expand_graph` and tests whether it has reached the goal and if the goals are non-mutex.\n",
+ " \n",
+ "If so, `extract_solution` is invoked which recursively reconstructs the solution from the planning graph.\n",
+ " \n",
+ "If not, then we check if our graph has levelled off and continue if it hasn't."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's solve a few planning problems that we had defined earlier."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Air cargo problem\n",
+ "In accordance with the summary above, we have defined a helper function to carry out `GraphPlan` on the `air_cargo` problem.\n",
+ "The function is pretty straightforward.\n",
+ "Let's have a look."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def air_cargo_graphplan (): \n",
+ " """Solves the air cargo problem using GraphPlan""" \n",
+ " return GraphPlan ( air_cargo ()) . execute () \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(air_cargo_graphplan)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's instantiate the problem and find a solution using this helper function."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[[[Load(C2, P2, JFK),\n",
+ " PAirport(SFO),\n",
+ " PAirport(JFK),\n",
+ " PPlane(P2),\n",
+ " PPlane(P1),\n",
+ " Fly(P2, JFK, SFO),\n",
+ " PCargo(C2),\n",
+ " Load(C1, P1, SFO),\n",
+ " Fly(P1, SFO, JFK),\n",
+ " PCargo(C1)],\n",
+ " [Unload(C2, P2, SFO), Unload(C1, P1, JFK)]]]"
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "airCargoG = air_cargo_graphplan()\n",
+ "airCargoG"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Each element in the solution is a valid action.\n",
+ "The solution is separated into lists for each level.\n",
+ "The actions prefixed with a 'P' are persistence actions and can be ignored.\n",
+ "They simply carry certain states forward.\n",
+ "We have another helper function `linearize` that presents the solution in a more readable format, much like a total-order planner, but it is _not_ a total-order planner."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Load(C2, P2, JFK),\n",
+ " Fly(P2, JFK, SFO),\n",
+ " Load(C1, P1, SFO),\n",
+ " Fly(P1, SFO, JFK),\n",
+ " Unload(C2, P2, SFO),\n",
+ " Unload(C1, P1, JFK)]"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "linearize(airCargoG)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Indeed, this is a correct solution.\n",
+ " \n",
+ "There are similar helper functions for some other planning problems.\n",
+ " \n",
+ "Lets' try solving the spare tire problem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Remove(Spare, Trunk), Remove(Flat, Axle), PutOn(Spare, Axle)]"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "spareTireG = spare_tire_graphplan()\n",
+ "linearize(spareTireG)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Solution for the cake problem"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Eat(Cake), Bake(Cake)]"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "cakeProblemG = have_cake_and_eat_cake_too_graphplan()\n",
+ "linearize(cakeProblemG)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Solution for the Sussman's Anomaly configuration of three blocks."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[MoveToTable(C, A), Move(B, Table, C), Move(A, Table, B)]"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "sussmanAnomalyG = three_block_tower_graphplan()\n",
+ "linearize(sussmanAnomalyG)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Solution of the socks and shoes problem"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[RightSock, LeftSock, RightShoe, LeftShoe]"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "socksShoesG = socks_and_shoes_graphplan()\n",
+ "linearize(socksShoesG)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/planning_hierarchical_search.ipynb b/planning_hierarchical_search.ipynb
new file mode 100644
index 000000000..18e57b23b
--- /dev/null
+++ b/planning_hierarchical_search.ipynb
@@ -0,0 +1,546 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Hierarchical Search \n",
+ "\n",
+ "Hierarchical search is a a planning algorithm in high level of abstraction. \n",
+ "Instead of actions as in classical planning (chapter 10) (primitive actions) we now use high level actions (HLAs) (see planning.ipynb) \n",
+ "\n",
+ "## Refinements\n",
+ "\n",
+ "Each __HLA__ has one or more refinements into a sequence of actions, each of which may be an HLA or a primitive action (which has no refinements by definition). \n",
+ "For example:\n",
+ "- (a) the high level action \"Go to San Fransisco airport\" (Go(Home, SFO)), might have two possible refinements, \"Drive to San Fransisco airport\" and \"Taxi to San Fransisco airport\". \n",
+ " \n",
+ "- (b) A recursive refinement for navigation in the vacuum world would be: to get to a\n",
+ "destination, take a step, and then go to the destination.\n",
+ " \n",
+ "\n",
+ " \n",
+ "- __implementation__: An HLA refinement that contains only primitive actions is called an implementation of the HLA\n",
+ "- An implementation of a high-level plan (a sequence of HLAs) is the concatenation of implementations of each HLA in the sequence\n",
+ "- A high-level plan __achieves the goal__ from a given state if at least one of its implementations achieves the goal from that state\n",
+ " \n",
+ "\n",
+ "The refinements function input is: \n",
+ "- __hla__: the HLA of which we want to compute its refinements\n",
+ "- __state__: the knoweledge base of the current problem (Problem.init)\n",
+ "- __library__: the hierarchy of the actions in the planning problem\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from planning import * \n",
+ "from notebook import psource"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def refinements ( hla , state , library ): # refinements may be (multiple) HLA themselves ... \n",
+ " """ \n",
+ " state is a Problem, containing the current state kb \n",
+ " library is a dictionary containing details for every possible refinement. eg: \n",
+ " { \n",
+ " 'HLA': [ \n",
+ " 'Go(Home, SFO)', \n",
+ " 'Go(Home, SFO)', \n",
+ " 'Drive(Home, SFOLongTermParking)', \n",
+ " 'Shuttle(SFOLongTermParking, SFO)', \n",
+ " 'Taxi(Home, SFO)' \n",
+ " ], \n",
+ " 'steps': [ \n",
+ " ['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'], \n",
+ " ['Taxi(Home, SFO)'], \n",
+ " [], \n",
+ " [], \n",
+ " [] \n",
+ " ], \n",
+ " # empty refinements indicate a primitive action \n",
+ " 'precond': [ \n",
+ " ['At(Home) & Have(Car)'], \n",
+ " ['At(Home)'], \n",
+ " ['At(Home) & Have(Car)'], \n",
+ " ['At(SFOLongTermParking)'], \n",
+ " ['At(Home)'] \n",
+ " ], \n",
+ " 'effect': [ \n",
+ " ['At(SFO) & ~At(Home)'], \n",
+ " ['At(SFO) & ~At(Home)'], \n",
+ " ['At(SFOLongTermParking) & ~At(Home)'], \n",
+ " ['At(SFO) & ~At(SFOLongTermParking)'], \n",
+ " ['At(SFO) & ~At(Home)'] \n",
+ " ] \n",
+ " } \n",
+ " """ \n",
+ " e = Expr ( hla . name , hla . args ) \n",
+ " indices = [ i for i , x in enumerate ( library [ 'HLA' ]) if expr ( x ) . op == hla . name ] \n",
+ " for i in indices : \n",
+ " actions = [] \n",
+ " for j in range ( len ( library [ 'steps' ][ i ])): \n",
+ " # find the index of the step [j] of the HLA \n",
+ " index_step = [ k for k , x in enumerate ( library [ 'HLA' ]) if x == library [ 'steps' ][ i ][ j ]][ 0 ] \n",
+ " precond = library [ 'precond' ][ index_step ][ 0 ] # preconditions of step [j] \n",
+ " effect = library [ 'effect' ][ index_step ][ 0 ] # effect of step [j] \n",
+ " actions . append ( HLA ( library [ 'steps' ][ i ][ j ], precond , effect )) \n",
+ " yield actions \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Problem.refinements)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Hierarchical search \n",
+ "\n",
+ "Hierarchical search is a breadth-first implementation of hierarchical forward planning search in the space of refinements. (i.e. repeatedly choose an HLA in the current plan and replace it with one of its refinements, until the plan achieves the goal.) \n",
+ "\n",
+ " \n",
+ "The algorithms input is: problem and hierarchy\n",
+ "- __problem__: is of type Problem \n",
+ "- __hierarchy__: is a dictionary consisting of all the actions and the order in which they are performed. \n",
+ " \n",
+ "\n",
+ "In top level call, initialPlan contains [act] (i.e. is the action to be performed) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def hierarchical_search ( problem , hierarchy ): \n",
+ " """ \n",
+ " [Figure 11.5] 'Hierarchical Search, a Breadth First Search implementation of Hierarchical \n",
+ " Forward Planning Search' \n",
+ " The problem is a real-world problem defined by the problem class, and the hierarchy is \n",
+ " a dictionary of HLA - refinements (see refinements generator for details) \n",
+ " """ \n",
+ " act = Node ( problem . init , None , [ problem . actions [ 0 ]]) \n",
+ " frontier = deque () \n",
+ " frontier . append ( act ) \n",
+ " while True : \n",
+ " if not frontier : \n",
+ " return None \n",
+ " plan = frontier . popleft () \n",
+ " ( hla , index ) = Problem . find_hla ( plan , hierarchy ) # finds the first non primitive hla in plan actions \n",
+ " prefix = plan . action [: index ] \n",
+ " outcome = Problem ( Problem . result ( problem . init , prefix ), problem . goals , problem . actions ) \n",
+ " suffix = plan . action [ index + 1 :] \n",
+ " if not hla : # hla is None and plan is primitive \n",
+ " if outcome . goal_test (): \n",
+ " return plan . action \n",
+ " else : \n",
+ " for sequence in Problem . refinements ( hla , outcome , hierarchy ): # find refinements \n",
+ " frontier . append ( Node ( outcome . init , plan , prefix + sequence + suffix )) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Problem.hierarchical_search)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example\n",
+ "\n",
+ "Suppose that somebody wants to get to the airport. \n",
+ "The possible ways to do so is either get a taxi, or drive to the airport. \n",
+ "Those two actions have some preconditions and some effects. \n",
+ "If you get the taxi, you need to have cash, whereas if you drive you need to have a car. \n",
+ "Thus we define the following hierarchy of possible actions.\n",
+ "\n",
+ "##### hierarchy"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "library = {\n",
+ " 'HLA': ['Go(Home,SFO)', 'Go(Home,SFO)', 'Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)', 'Taxi(Home, SFO)'],\n",
+ " 'steps': [['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'], ['Taxi(Home, SFO)'], [], [], []],\n",
+ " 'precond': [['At(Home) & Have(Car)'], ['At(Home)'], ['At(Home) & Have(Car)'], ['At(SFOLongTermParking)'], ['At(Home)']],\n",
+ " 'effect': [['At(SFO) & ~At(Home)'], ['At(SFO) & ~At(Home) & ~Have(Cash)'], ['At(SFOLongTermParking) & ~At(Home)'], ['At(SFO) & ~At(LongTermParking)'], ['At(SFO) & ~At(Home) & ~Have(Cash)']] }\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "the possible actions are the following:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "go_SFO = HLA('Go(Home,SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home)')\n",
+ "taxi_SFO = HLA('Taxi(Home,SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home) & ~Have(Cash)')\n",
+ "drive_SFOLongTermParking = HLA('Drive(Home, SFOLongTermParking)', 'At(Home) & Have(Car)','At(SFOLongTermParking) & ~At(Home)' )\n",
+ "shuttle_SFO = HLA('Shuttle(SFOLongTermParking, SFO)', 'At(SFOLongTermParking)', 'At(SFO) & ~At(LongTermParking)')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Suppose that (our preconditionds are that) we are Home and we have cash and car and our goal is to get to SFO and maintain our cash, and our possible actions are the above. \n",
+ "##### Then our problem is: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "prob = Problem('At(Home) & Have(Cash) & Have(Car)', 'At(SFO) & Have(Cash)', [go_SFO])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##### Refinements\n",
+ "\n",
+ "The refinements of the action Go(Home, SFO), are defined as: \n",
+ "['Drive(Home,SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'], ['Taxi(Home, SFO)']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[HLA(Drive(Home, SFOLongTermParking)), HLA(Shuttle(SFOLongTermParking, SFO))]\n",
+ "[{'completed': False, 'args': (Home, SFOLongTermParking), 'name': 'Drive', 'uses': {}, 'duration': 0, 'effect': [At(SFOLongTermParking), NotAt(Home)], 'consumes': {}, 'precond': [At(Home), Have(Car)]}, {'completed': False, 'args': (SFOLongTermParking, SFO), 'name': 'Shuttle', 'uses': {}, 'duration': 0, 'effect': [At(SFO), NotAt(LongTermParking)], 'consumes': {}, 'precond': [At(SFOLongTermParking)]}] \n",
+ "\n",
+ "[HLA(Taxi(Home, SFO))]\n",
+ "[{'completed': False, 'args': (Home, SFO), 'name': 'Taxi', 'uses': {}, 'duration': 0, 'effect': [At(SFO), NotAt(Home), NotHave(Cash)], 'consumes': {}, 'precond': [At(Home)]}] \n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "for sequence in Problem.refinements(go_SFO, prob, library):\n",
+ " print (sequence)\n",
+ " print([x.__dict__ for x in sequence ], '\\n')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Run the hierarchical search\n",
+ "##### Top level call"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[HLA(Drive(Home, SFOLongTermParking)), HLA(Shuttle(SFOLongTermParking, SFO))] \n",
+ "\n",
+ "[{'completed': False, 'args': (Home, SFOLongTermParking), 'name': 'Drive', 'uses': {}, 'duration': 0, 'effect': [At(SFOLongTermParking), NotAt(Home)], 'consumes': {}, 'precond': [At(Home), Have(Car)]}, {'completed': False, 'args': (SFOLongTermParking, SFO), 'name': 'Shuttle', 'uses': {}, 'duration': 0, 'effect': [At(SFO), NotAt(LongTermParking)], 'consumes': {}, 'precond': [At(SFOLongTermParking)]}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "plan= Problem.hierarchical_search(prob, library)\n",
+ "print (plan, '\\n')\n",
+ "print ([x.__dict__ for x in plan])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example 2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "library_2 = {\n",
+ " 'HLA': ['Go(Home,SFO)', 'Go(Home,SFO)', 'Bus(Home, MetroStop)', 'Metro(MetroStop, SFO)' , 'Metro(MetroStop, SFO)', 'Metro1(MetroStop, SFO)', 'Metro2(MetroStop, SFO)' ,'Taxi(Home, SFO)'],\n",
+ " 'steps': [['Bus(Home, MetroStop)', 'Metro(MetroStop, SFO)'], ['Taxi(Home, SFO)'], [], ['Metro1(MetroStop, SFO)'], ['Metro2(MetroStop, SFO)'],[],[],[]],\n",
+ " 'precond': [['At(Home)'], ['At(Home)'], ['At(Home)'], ['At(MetroStop)'], ['At(MetroStop)'],['At(MetroStop)'], ['At(MetroStop)'] ,['At(Home) & Have(Cash)']],\n",
+ " 'effect': [['At(SFO) & ~At(Home)'], ['At(SFO) & ~At(Home) & ~Have(Cash)'], ['At(MetroStop) & ~At(Home)'], ['At(SFO) & ~At(MetroStop)'], ['At(SFO) & ~At(MetroStop)'], ['At(SFO) & ~At(MetroStop)'] , ['At(SFO) & ~At(MetroStop)'] ,['At(SFO) & ~At(Home) & ~Have(Cash)']] \n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[HLA(Bus(Home, MetroStop)), HLA(Metro1(MetroStop, SFO))] \n",
+ "\n",
+ "[{'completed': False, 'args': (Home, MetroStop), 'name': 'Bus', 'uses': {}, 'duration': 0, 'effect': [At(MetroStop), NotAt(Home)], 'consumes': {}, 'precond': [At(Home)]}, {'completed': False, 'args': (MetroStop, SFO), 'name': 'Metro1', 'uses': {}, 'duration': 0, 'effect': [At(SFO), NotAt(MetroStop)], 'consumes': {}, 'precond': [At(MetroStop)]}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "plan_2 = Problem.hierarchical_search(prob, library_2)\n",
+ "print(plan_2, '\\n')\n",
+ "print([x.__dict__ for x in plan_2])"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/planning_partial_order_planner.ipynb b/planning_partial_order_planner.ipynb
new file mode 100644
index 000000000..4b1a98bb3
--- /dev/null
+++ b/planning_partial_order_planner.ipynb
@@ -0,0 +1,850 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### PARTIAL ORDER PLANNER\n",
+ "A partial-order planning algorithm is significantly different from a total-order planner.\n",
+ "The way a partial-order plan works enables it to take advantage of _problem decomposition_ and work on each subproblem separately.\n",
+ "It works on several subgoals independently, solves them with several subplans, and then combines the plan.\n",
+ " \n",
+ "A partial-order planner also follows the **least commitment** strategy, where it delays making choices for as long as possible.\n",
+ "Variables are not bound unless it is absolutely necessary and new actions are chosen only if the existing actions cannot fulfil the required precondition.\n",
+ " \n",
+ "Any planning algorithm that can place two actions into a plan without specifying which comes first is called a **partial-order planner**.\n",
+ "A partial-order planner searches through the space of plans rather than the space of states, which makes it perform better for certain problems.\n",
+ " \n",
+ " \n",
+ "Let's have a look at the `PartialOrderPlanner` class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from planning import *\n",
+ "from notebook import psource"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class PartialOrderPlanner : \n",
+ "\n",
+ " def __init__ ( self , planningproblem ): \n",
+ " self . planningproblem = planningproblem \n",
+ " self . initialize () \n",
+ "\n",
+ " def initialize ( self ): \n",
+ " """Initialize all variables""" \n",
+ " self . causal_links = [] \n",
+ " self . start = Action ( 'Start' , [], self . planningproblem . init ) \n",
+ " self . finish = Action ( 'Finish' , self . planningproblem . goals , []) \n",
+ " self . actions = set () \n",
+ " self . actions . add ( self . start ) \n",
+ " self . actions . add ( self . finish ) \n",
+ " self . constraints = set () \n",
+ " self . constraints . add (( self . start , self . finish )) \n",
+ " self . agenda = set () \n",
+ " for precond in self . finish . precond : \n",
+ " self . agenda . add (( precond , self . finish )) \n",
+ " self . expanded_actions = self . expand_actions () \n",
+ "\n",
+ " def expand_actions ( self , name = None ): \n",
+ " """Generate all possible actions with variable bindings for precondition selection heuristic""" \n",
+ "\n",
+ " objects = set ( arg for clause in self . planningproblem . init for arg in clause . args ) \n",
+ " expansions = [] \n",
+ " action_list = [] \n",
+ " if name is not None : \n",
+ " for action in self . planningproblem . actions : \n",
+ " if str ( action . name ) == name : \n",
+ " action_list . append ( action ) \n",
+ " else : \n",
+ " action_list = self . planningproblem . actions \n",
+ "\n",
+ " for action in action_list : \n",
+ " for permutation in itertools . permutations ( objects , len ( action . args )): \n",
+ " bindings = unify ( Expr ( action . name , * action . args ), Expr ( action . name , * permutation )) \n",
+ " if bindings is not None : \n",
+ " new_args = [] \n",
+ " for arg in action . args : \n",
+ " if arg in bindings : \n",
+ " new_args . append ( bindings [ arg ]) \n",
+ " else : \n",
+ " new_args . append ( arg ) \n",
+ " new_expr = Expr ( str ( action . name ), * new_args ) \n",
+ " new_preconds = [] \n",
+ " for precond in action . precond : \n",
+ " new_precond_args = [] \n",
+ " for arg in precond . args : \n",
+ " if arg in bindings : \n",
+ " new_precond_args . append ( bindings [ arg ]) \n",
+ " else : \n",
+ " new_precond_args . append ( arg ) \n",
+ " new_precond = Expr ( str ( precond . op ), * new_precond_args ) \n",
+ " new_preconds . append ( new_precond ) \n",
+ " new_effects = [] \n",
+ " for effect in action . effect : \n",
+ " new_effect_args = [] \n",
+ " for arg in effect . args : \n",
+ " if arg in bindings : \n",
+ " new_effect_args . append ( bindings [ arg ]) \n",
+ " else : \n",
+ " new_effect_args . append ( arg ) \n",
+ " new_effect = Expr ( str ( effect . op ), * new_effect_args ) \n",
+ " new_effects . append ( new_effect ) \n",
+ " expansions . append ( Action ( new_expr , new_preconds , new_effects )) \n",
+ "\n",
+ " return expansions \n",
+ "\n",
+ " def find_open_precondition ( self ): \n",
+ " """Find open precondition with the least number of possible actions""" \n",
+ "\n",
+ " number_of_ways = dict () \n",
+ " actions_for_precondition = dict () \n",
+ " for element in self . agenda : \n",
+ " open_precondition = element [ 0 ] \n",
+ " possible_actions = list ( self . actions ) + self . expanded_actions \n",
+ " for action in possible_actions : \n",
+ " for effect in action . effect : \n",
+ " if effect == open_precondition : \n",
+ " if open_precondition in number_of_ways : \n",
+ " number_of_ways [ open_precondition ] += 1 \n",
+ " actions_for_precondition [ open_precondition ] . append ( action ) \n",
+ " else : \n",
+ " number_of_ways [ open_precondition ] = 1 \n",
+ " actions_for_precondition [ open_precondition ] = [ action ] \n",
+ "\n",
+ " number = sorted ( number_of_ways , key = number_of_ways . __getitem__ ) \n",
+ " \n",
+ " for k , v in number_of_ways . items (): \n",
+ " if v == 0 : \n",
+ " return None , None , None \n",
+ "\n",
+ " act1 = None \n",
+ " for element in self . agenda : \n",
+ " if element [ 0 ] == number [ 0 ]: \n",
+ " act1 = element [ 1 ] \n",
+ " break \n",
+ "\n",
+ " if number [ 0 ] in self . expanded_actions : \n",
+ " self . expanded_actions . remove ( number [ 0 ]) \n",
+ "\n",
+ " return number [ 0 ], act1 , actions_for_precondition [ number [ 0 ]] \n",
+ "\n",
+ " def find_action_for_precondition ( self , oprec ): \n",
+ " """Find action for a given precondition""" \n",
+ "\n",
+ " # either \n",
+ " # choose act0 E Actions such that act0 achieves G \n",
+ " for action in self . actions : \n",
+ " for effect in action . effect : \n",
+ " if effect == oprec : \n",
+ " return action , 0 \n",
+ "\n",
+ " # or \n",
+ " # choose act0 E Actions such that act0 achieves G \n",
+ " for action in self . planningproblem . actions : \n",
+ " for effect in action . effect : \n",
+ " if effect . op == oprec . op : \n",
+ " bindings = unify ( effect , oprec ) \n",
+ " if bindings is None : \n",
+ " break \n",
+ " return action , bindings \n",
+ "\n",
+ " def generate_expr ( self , clause , bindings ): \n",
+ " """Generate atomic expression from generic expression given variable bindings""" \n",
+ "\n",
+ " new_args = [] \n",
+ " for arg in clause . args : \n",
+ " if arg in bindings : \n",
+ " new_args . append ( bindings [ arg ]) \n",
+ " else : \n",
+ " new_args . append ( arg ) \n",
+ "\n",
+ " try : \n",
+ " return Expr ( str ( clause . name ), * new_args ) \n",
+ " except : \n",
+ " return Expr ( str ( clause . op ), * new_args ) \n",
+ " \n",
+ " def generate_action_object ( self , action , bindings ): \n",
+ " """Generate action object given a generic action andvariable bindings""" \n",
+ "\n",
+ " # if bindings is 0, it means the action already exists in self.actions \n",
+ " if bindings == 0 : \n",
+ " return action \n",
+ "\n",
+ " # bindings cannot be None \n",
+ " else : \n",
+ " new_expr = self . generate_expr ( action , bindings ) \n",
+ " new_preconds = [] \n",
+ " for precond in action . precond : \n",
+ " new_precond = self . generate_expr ( precond , bindings ) \n",
+ " new_preconds . append ( new_precond ) \n",
+ " new_effects = [] \n",
+ " for effect in action . effect : \n",
+ " new_effect = self . generate_expr ( effect , bindings ) \n",
+ " new_effects . append ( new_effect ) \n",
+ " return Action ( new_expr , new_preconds , new_effects ) \n",
+ "\n",
+ " def cyclic ( self , graph ): \n",
+ " """Check cyclicity of a directed graph""" \n",
+ "\n",
+ " new_graph = dict () \n",
+ " for element in graph : \n",
+ " if element [ 0 ] in new_graph : \n",
+ " new_graph [ element [ 0 ]] . append ( element [ 1 ]) \n",
+ " else : \n",
+ " new_graph [ element [ 0 ]] = [ element [ 1 ]] \n",
+ "\n",
+ " path = set () \n",
+ "\n",
+ " def visit ( vertex ): \n",
+ " path . add ( vertex ) \n",
+ " for neighbor in new_graph . get ( vertex , ()): \n",
+ " if neighbor in path or visit ( neighbor ): \n",
+ " return True \n",
+ " path . remove ( vertex ) \n",
+ " return False \n",
+ "\n",
+ " value = any ( visit ( v ) for v in new_graph ) \n",
+ " return value \n",
+ "\n",
+ " def add_const ( self , constraint , constraints ): \n",
+ " """Add the constraint to constraints if the resulting graph is acyclic""" \n",
+ "\n",
+ " if constraint [ 0 ] == self . finish or constraint [ 1 ] == self . start : \n",
+ " return constraints \n",
+ "\n",
+ " new_constraints = set ( constraints ) \n",
+ " new_constraints . add ( constraint ) \n",
+ "\n",
+ " if self . cyclic ( new_constraints ): \n",
+ " return constraints \n",
+ " return new_constraints \n",
+ "\n",
+ " def is_a_threat ( self , precondition , effect ): \n",
+ " """Check if effect is a threat to precondition""" \n",
+ "\n",
+ " if ( str ( effect . op ) == 'Not' + str ( precondition . op )) or ( 'Not' + str ( effect . op ) == str ( precondition . op )): \n",
+ " if effect . args == precondition . args : \n",
+ " return True \n",
+ " return False \n",
+ "\n",
+ " def protect ( self , causal_link , action , constraints ): \n",
+ " """Check and resolve threats by promotion or demotion""" \n",
+ "\n",
+ " threat = False \n",
+ " for effect in action . effect : \n",
+ " if self . is_a_threat ( causal_link [ 1 ], effect ): \n",
+ " threat = True \n",
+ " break \n",
+ "\n",
+ " if action != causal_link [ 0 ] and action != causal_link [ 2 ] and threat : \n",
+ " # try promotion \n",
+ " new_constraints = set ( constraints ) \n",
+ " new_constraints . add (( action , causal_link [ 0 ])) \n",
+ " if not self . cyclic ( new_constraints ): \n",
+ " constraints = self . add_const (( action , causal_link [ 0 ]), constraints ) \n",
+ " else : \n",
+ " # try demotion \n",
+ " new_constraints = set ( constraints ) \n",
+ " new_constraints . add (( causal_link [ 2 ], action )) \n",
+ " if not self . cyclic ( new_constraints ): \n",
+ " constraints = self . add_const (( causal_link [ 2 ], action ), constraints ) \n",
+ " else : \n",
+ " # both promotion and demotion fail \n",
+ " print ( 'Unable to resolve a threat caused by' , action , 'onto' , causal_link ) \n",
+ " return \n",
+ " return constraints \n",
+ "\n",
+ " def convert ( self , constraints ): \n",
+ " """Convert constraints into a dict of Action to set orderings""" \n",
+ "\n",
+ " graph = dict () \n",
+ " for constraint in constraints : \n",
+ " if constraint [ 0 ] in graph : \n",
+ " graph [ constraint [ 0 ]] . add ( constraint [ 1 ]) \n",
+ " else : \n",
+ " graph [ constraint [ 0 ]] = set () \n",
+ " graph [ constraint [ 0 ]] . add ( constraint [ 1 ]) \n",
+ " return graph \n",
+ "\n",
+ " def toposort ( self , graph ): \n",
+ " """Generate topological ordering of constraints""" \n",
+ "\n",
+ " if len ( graph ) == 0 : \n",
+ " return \n",
+ "\n",
+ " graph = graph . copy () \n",
+ "\n",
+ " for k , v in graph . items (): \n",
+ " v . discard ( k ) \n",
+ "\n",
+ " extra_elements_in_dependencies = _reduce ( set . union , graph . values ()) - set ( graph . keys ()) \n",
+ "\n",
+ " graph . update ({ element : set () for element in extra_elements_in_dependencies }) \n",
+ " while True : \n",
+ " ordered = set ( element for element , dependency in graph . items () if len ( dependency ) == 0 ) \n",
+ " if not ordered : \n",
+ " break \n",
+ " yield ordered \n",
+ " graph = { element : ( dependency - ordered ) for element , dependency in graph . items () if element not in ordered } \n",
+ " if len ( graph ) != 0 : \n",
+ " raise ValueError ( 'The graph is not acyclic and cannot be linearly ordered' ) \n",
+ "\n",
+ " def display_plan ( self ): \n",
+ " """Display causal links, constraints and the plan""" \n",
+ "\n",
+ " print ( 'Causal Links' ) \n",
+ " for causal_link in self . causal_links : \n",
+ " print ( causal_link ) \n",
+ "\n",
+ " print ( ' \\n Constraints' ) \n",
+ " for constraint in self . constraints : \n",
+ " print ( constraint [ 0 ], '<' , constraint [ 1 ]) \n",
+ "\n",
+ " print ( ' \\n Partial Order Plan' ) \n",
+ " print ( list ( reversed ( list ( self . toposort ( self . convert ( self . constraints )))))) \n",
+ "\n",
+ " def execute ( self , display = True ): \n",
+ " """Execute the algorithm""" \n",
+ "\n",
+ " step = 1 \n",
+ " self . tries = 1 \n",
+ " while len ( self . agenda ) > 0 : \n",
+ " step += 1 \n",
+ " # select <G, act1> from Agenda \n",
+ " try : \n",
+ " G , act1 , possible_actions = self . find_open_precondition () \n",
+ " except IndexError : \n",
+ " print ( 'Probably Wrong' ) \n",
+ " break \n",
+ "\n",
+ " act0 = possible_actions [ 0 ] \n",
+ " # remove <G, act1> from Agenda \n",
+ " self . agenda . remove (( G , act1 )) \n",
+ "\n",
+ " # For actions with variable number of arguments, use least commitment principle \n",
+ " # act0_temp, bindings = self.find_action_for_precondition(G) \n",
+ " # act0 = self.generate_action_object(act0_temp, bindings) \n",
+ "\n",
+ " # Actions = Actions U {act0} \n",
+ " self . actions . add ( act0 ) \n",
+ "\n",
+ " # Constraints = add_const(start < act0, Constraints) \n",
+ " self . constraints = self . add_const (( self . start , act0 ), self . constraints ) \n",
+ "\n",
+ " # for each CL E CausalLinks do \n",
+ " # Constraints = protect(CL, act0, Constraints) \n",
+ " for causal_link in self . causal_links : \n",
+ " self . constraints = self . protect ( causal_link , act0 , self . constraints ) \n",
+ "\n",
+ " # Agenda = Agenda U {<P, act0>: P is a precondition of act0} \n",
+ " for precondition in act0 . precond : \n",
+ " self . agenda . add (( precondition , act0 )) \n",
+ "\n",
+ " # Constraints = add_const(act0 < act1, Constraints) \n",
+ " self . constraints = self . add_const (( act0 , act1 ), self . constraints ) \n",
+ "\n",
+ " # CausalLinks U {<act0, G, act1>} \n",
+ " if ( act0 , G , act1 ) not in self . causal_links : \n",
+ " self . causal_links . append (( act0 , G , act1 )) \n",
+ "\n",
+ " # for each A E Actions do \n",
+ " # Constraints = protect(<act0, G, act1>, A, Constraints) \n",
+ " for action in self . actions : \n",
+ " self . constraints = self . protect (( act0 , G , act1 ), action , self . constraints ) \n",
+ "\n",
+ " if step > 200 : \n",
+ " print ( 'Couldn \\' t find a solution' ) \n",
+ " return None , None \n",
+ "\n",
+ " if display : \n",
+ " self . display_plan () \n",
+ " else : \n",
+ " return self . constraints , self . causal_links \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(PartialOrderPlanner)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will first describe the data-structures and helper methods used, followed by the algorithm used to find a partial-order plan."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Each plan has the following four components:\n",
+ "\n",
+ "1. **`actions`**: a set of actions that make up the steps of the plan.\n",
+ "`actions` is always a subset of `pddl.actions` the set of possible actions for the given planning problem. \n",
+ "The `start` and `finish` actions are dummy actions defined to bring uniformity to the problem. The `start` action has no preconditions and its effects constitute the initial state of the planning problem. \n",
+ "The `finish` action has no effects and its preconditions constitute the goal state of the planning problem.\n",
+ "The empty plan consists of just these two dummy actions.\n",
+ "2. **`constraints`**: a set of temporal constraints that define the order of performing the actions relative to each other.\n",
+ "`constraints` does not define a linear ordering, rather it usually represents a directed graph which is also acyclic if the plan is consistent.\n",
+ "Each ordering is of the form A < B, which reads as \"A before B\" and means that action A _must_ be executed sometime before action B, but not necessarily immediately before.\n",
+ "`constraints` stores these as a set of tuples `(Action(A), Action(B))` which is interpreted as given above.\n",
+ "A constraint cannot be added to `constraints` if it breaks the acyclicity of the existing graph.\n",
+ "3. **`causal_links`**: a set of causal-links. \n",
+ "A causal link between two actions _A_ and _B_ in the plan is written as _A_ --_p_--> _B_ and is read as \"A achieves p for B\".\n",
+ "This imples that _p_ is an effect of _A_ and a precondition of _B_.\n",
+ "It also asserts that _p_ must remain true from the time of action _A_ to the time of action _B_.\n",
+ "Any violation of this rule is called a threat and must be resolved immediately by adding suitable ordering constraints.\n",
+ "`causal_links` stores this information as tuples `(Action(A), precondition(p), Action(B))` which is interpreted as given above.\n",
+ "Causal-links can also be called **protection-intervals**, because the link _A_ --_p_--> _B_ protects _p_ from being negated over the interval from _A_ to _B_.\n",
+ "4. **`agenda`**: a set of open-preconditions.\n",
+ "A precondition is open if it is not achieved by some action in the plan.\n",
+ "Planners will work to reduce the set of open preconditions to the empty set, without introducing a contradiction.\n",
+ "`agenda` stored this information as tuples `(precondition(p), Action(A))` where p is a precondition of the action A.\n",
+ "\n",
+ "A **consistent plan** is a plan in which there are no cycles in the ordering constraints and no conflicts with the causal-links.\n",
+ "A consistent plan with no open preconditions is a **solution**.\n",
+ " \n",
+ " \n",
+ "Let's briefly glance over the helper functions before going into the actual algorithm.\n",
+ " \n",
+ "**`expand_actions`**: generates all possible actions with variable bindings for use as a heuristic of selection of an open precondition.\n",
+ " \n",
+ "**`find_open_precondition`**: finds a precondition from the agenda with the least number of actions that fulfil that precondition.\n",
+ "This heuristic helps form mandatory ordering constraints and causal-links to further simplify the problem and reduce the probability of encountering a threat.\n",
+ " \n",
+ "**`find_action_for_precondition`**: finds an action that fulfils the given precondition along with the absolutely necessary variable bindings in accordance with the principle of _least commitment_.\n",
+ "In case of multiple possible actions, the action with the least number of effects is chosen to minimize the chances of encountering a threat.\n",
+ " \n",
+ "**`cyclic`**: checks if a directed graph is cyclic.\n",
+ " \n",
+ "**`add_const`**: adds `constraint` to `constraints` if the newly formed graph is acyclic and returns `constraints` otherwise.\n",
+ " \n",
+ "**`is_a_threat`**: checks if the given `effect` negates the given `precondition`.\n",
+ " \n",
+ "**`protect`**: checks if the given `action` poses a threat to the given `causal_link`.\n",
+ "If so, the threat is resolved by either promotion or demotion, whichever generates acyclic temporal constraints.\n",
+ "If neither promotion or demotion work, the chosen action is not the correct fit or the planning problem cannot be solved altogether.\n",
+ " \n",
+ "**`convert`**: converts a graph from a list of edges to an `Action` : `set` mapping, for use in topological sorting.\n",
+ " \n",
+ "**`toposort`**: a generator function that generates a topological ordering of a given graph as a list of sets.\n",
+ "Each set contains an action or several actions.\n",
+ "If a set has more that one action in it, it means that permutations between those actions also produce a valid plan.\n",
+ " \n",
+ "**`display_plan`**: displays the `causal_links`, `constraints` and the partial order plan generated from `toposort`.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The **`execute`** method executes the algorithm, which is summarized below:\n",
+ " \n",
+ "1. An open precondition is selected (a sub-goal that we want to achieve).\n",
+ "2. An action that fulfils the open precondition is chosen.\n",
+ "3. Temporal constraints are updated.\n",
+ "4. Existing causal links are protected. Protection is a method that checks if the causal links conflict\n",
+ " and if they do, temporal constraints are added to fix the threats.\n",
+ "5. The set of open preconditions is updated.\n",
+ "6. Temporal constraints of the selected action and the next action are established.\n",
+ "7. A new causal link is added between the selected action and the owner of the open precondition.\n",
+ "8. The set of new causal links is checked for threats and if found, the threat is removed by either promotion or demotion.\n",
+ " If promotion or demotion is unable to solve the problem, the planning problem cannot be solved with the current sequence of actions\n",
+ " or it may not be solvable at all.\n",
+ "9. These steps are repeated until the set of open preconditions is empty."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A partial-order plan can be used to generate different valid total-order plans.\n",
+ "This step is called **linearization** of the partial-order plan.\n",
+ "All possible linearizations of a partial-order plan for `socks_and_shoes` looks like this.\n",
+ " \n",
+ "\n",
+ " \n",
+ "Linearization can be carried out in many ways, but the most efficient way is to represent the set of temporal constraints as a directed graph.\n",
+ "We can easily realize that the graph should also be acyclic as cycles in constraints means that the constraints are inconsistent.\n",
+ "This acyclicity is enforced by the `add_const` method, which adds a new constraint only if the acyclicity of the existing graph is not violated.\n",
+ "The `protect` method also checks for acyclicity of the newly-added temporal constraints to make a decision between promotion and demotion in case of a threat.\n",
+ "This property of a graph created from the temporal constraints of a valid partial-order plan allows us to use topological sort to order the constraints linearly.\n",
+ "A topological sort may produce several different valid solutions for a given directed acyclic graph."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now that we know how `PartialOrderPlanner` works, let's solve a few problems using it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Causal Links\n",
+ "(Action(PutOn(Spare, Axle)), At(Spare, Axle), Action(Finish))\n",
+ "(Action(Start), Tire(Spare), Action(PutOn(Spare, Axle)))\n",
+ "(Action(Remove(Flat, Axle)), NotAt(Flat, Axle), Action(PutOn(Spare, Axle)))\n",
+ "(Action(Start), At(Flat, Axle), Action(Remove(Flat, Axle)))\n",
+ "(Action(Remove(Spare, Trunk)), At(Spare, Ground), Action(PutOn(Spare, Axle)))\n",
+ "(Action(Start), At(Spare, Trunk), Action(Remove(Spare, Trunk)))\n",
+ "(Action(Remove(Flat, Axle)), At(Flat, Ground), Action(Finish))\n",
+ "\n",
+ "Constraints\n",
+ "Action(Remove(Flat, Axle)) < Action(PutOn(Spare, Axle))\n",
+ "Action(Start) < Action(Finish)\n",
+ "Action(Remove(Spare, Trunk)) < Action(PutOn(Spare, Axle))\n",
+ "Action(Start) < Action(Remove(Spare, Trunk))\n",
+ "Action(Start) < Action(Remove(Flat, Axle))\n",
+ "Action(Remove(Flat, Axle)) < Action(Finish)\n",
+ "Action(PutOn(Spare, Axle)) < Action(Finish)\n",
+ "Action(Start) < Action(PutOn(Spare, Axle))\n",
+ "\n",
+ "Partial Order Plan\n",
+ "[{Action(Start)}, {Action(Remove(Flat, Axle)), Action(Remove(Spare, Trunk))}, {Action(PutOn(Spare, Axle))}, {Action(Finish)}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "st = spare_tire()\n",
+ "pop = PartialOrderPlanner(st)\n",
+ "pop.execute()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We observe that in the given partial order plan, Remove(Flat, Axle) and Remove(Spare, Trunk) are in the same set.\n",
+ "This means that the order of performing these actions does not affect the final outcome.\n",
+ "That aside, we also see that the PutOn(Spare, Axle) action has to be performed after both the Remove actions are complete, which seems logically consistent."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Causal Links\n",
+ "(Action(FromTable(C, B)), On(C, B), Action(Finish))\n",
+ "(Action(FromTable(B, A)), On(B, A), Action(Finish))\n",
+ "(Action(Start), OnTable(B), Action(FromTable(B, A)))\n",
+ "(Action(Start), OnTable(C), Action(FromTable(C, B)))\n",
+ "(Action(Start), Clear(C), Action(FromTable(C, B)))\n",
+ "(Action(Start), Clear(A), Action(FromTable(B, A)))\n",
+ "(Action(ToTable(A, B)), Clear(B), Action(FromTable(C, B)))\n",
+ "(Action(Start), On(A, B), Action(ToTable(A, B)))\n",
+ "(Action(ToTable(A, B)), Clear(B), Action(FromTable(B, A)))\n",
+ "(Action(Start), Clear(A), Action(ToTable(A, B)))\n",
+ "\n",
+ "Constraints\n",
+ "Action(Start) < Action(FromTable(C, B))\n",
+ "Action(FromTable(B, A)) < Action(FromTable(C, B))\n",
+ "Action(Start) < Action(FromTable(B, A))\n",
+ "Action(Start) < Action(ToTable(A, B))\n",
+ "Action(Start) < Action(Finish)\n",
+ "Action(FromTable(B, A)) < Action(Finish)\n",
+ "Action(FromTable(C, B)) < Action(Finish)\n",
+ "Action(ToTable(A, B)) < Action(FromTable(B, A))\n",
+ "Action(ToTable(A, B)) < Action(FromTable(C, B))\n",
+ "\n",
+ "Partial Order Plan\n",
+ "[{Action(Start)}, {Action(ToTable(A, B))}, {Action(FromTable(B, A))}, {Action(FromTable(C, B))}, {Action(Finish)}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "sbw = simple_blocks_world()\n",
+ "pop = PartialOrderPlanner(sbw)\n",
+ "pop.execute()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "We see that this plan does not have flexibility in selecting actions, ie, actions should be performed in this order and this order only, to successfully reach the goal state."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Causal Links\n",
+ "(Action(RightShoe), RightShoeOn, Action(Finish))\n",
+ "(Action(LeftShoe), LeftShoeOn, Action(Finish))\n",
+ "(Action(LeftSock), LeftSockOn, Action(LeftShoe))\n",
+ "(Action(RightSock), RightSockOn, Action(RightShoe))\n",
+ "\n",
+ "Constraints\n",
+ "Action(LeftSock) < Action(LeftShoe)\n",
+ "Action(RightSock) < Action(RightShoe)\n",
+ "Action(Start) < Action(RightShoe)\n",
+ "Action(Start) < Action(Finish)\n",
+ "Action(LeftShoe) < Action(Finish)\n",
+ "Action(Start) < Action(RightSock)\n",
+ "Action(Start) < Action(LeftShoe)\n",
+ "Action(Start) < Action(LeftSock)\n",
+ "Action(RightShoe) < Action(Finish)\n",
+ "\n",
+ "Partial Order Plan\n",
+ "[{Action(Start)}, {Action(LeftSock), Action(RightSock)}, {Action(LeftShoe), Action(RightShoe)}, {Action(Finish)}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "ss = socks_and_shoes()\n",
+ "pop = PartialOrderPlanner(ss)\n",
+ "pop.execute()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "This plan again doesn't have constraints in selecting socks or shoes.\n",
+ "As long as both socks are worn before both shoes, we are fine.\n",
+ "Notice however, there is one valid solution,\n",
+ " \n",
+ "LeftSock -> LeftShoe -> RightSock -> RightShoe\n",
+ " \n",
+ "that the algorithm could not find as it cannot be represented as a general partially-ordered plan but is a specific total-order solution."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Runtime differences\n",
+ "Let's briefly take a look at the running time of all the three algorithms on the `socks_and_shoes` problem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ss = socks_and_shoes()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "198 µs ± 3.53 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "GraphPlan(ss).execute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "844 µs ± 23.8 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "Linearize(ss).execute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "258 µs ± 4.03 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "PartialOrderPlanner(ss).execute(display=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We observe that `GraphPlan` is about 4 times faster than `Linearize` because `Linearize` essentially runs a `GraphPlan` subroutine under the hood and then carries out some transformations on the solved planning-graph.\n",
+ " \n",
+ "We also find that `GraphPlan` is slightly faster than `PartialOrderPlanner`, but this is mainly due to the `expand_actions` method in `PartialOrderPlanner` that slows it down as it generates all possible permutations of actions and variable bindings.\n",
+ " \n",
+ "Without heuristic functions, `PartialOrderPlanner` will be atleast as fast as `GraphPlan`, if not faster, but will have a higher tendency to encounter threats and conflicts which might take additional time to resolve.\n",
+ " \n",
+ "Different planning algorithms work differently for different problems."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/planning_total_order_planner.ipynb b/planning_total_order_planner.ipynb
new file mode 100644
index 000000000..b94941ece
--- /dev/null
+++ b/planning_total_order_planner.ipynb
@@ -0,0 +1,341 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TOTAL ORDER PLANNER\n",
+ "\n",
+ "In mathematical terminology, **total order**, **linear order** or **simple order** refers to a set *X* which is said to be totally ordered under ≤ if the following statements hold for all *a*, *b* and *c* in *X*:\n",
+ " \n",
+ "If *a* ≤ *b* and *b* ≤ *a*, then *a* = *b* (antisymmetry).\n",
+ " \n",
+ "If *a* ≤ *b* and *b* ≤ *c*, then *a* ≤ *c* (transitivity).\n",
+ " \n",
+ "*a* ≤ *b* or *b* ≤ *a* (connex relation).\n",
+ "\n",
+ " \n",
+ "In simpler terms, a total order plan is a linear ordering of actions to be taken to reach the goal state.\n",
+ "There may be several different total-order plans for a particular goal depending on the problem.\n",
+ " \n",
+ " \n",
+ "In the module, the `Linearize` class solves problems using this paradigm.\n",
+ "At its core, the `Linearize` uses a solved planning graph from `GraphPlan` and finds a valid total-order solution for it.\n",
+ "Let's have a look at the class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from planning import *\n",
+ "from notebook import psource"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class Linearize : \n",
+ "\n",
+ " def __init__ ( self , planningproblem ): \n",
+ " self . planningproblem = planningproblem \n",
+ "\n",
+ " def filter ( self , solution ): \n",
+ " """Filter out persistence actions from a solution""" \n",
+ "\n",
+ " new_solution = [] \n",
+ " for section in solution [ 0 ]: \n",
+ " new_section = [] \n",
+ " for operation in section : \n",
+ " if not ( operation . op [ 0 ] == 'P' and operation . op [ 1 ] . isupper ()): \n",
+ " new_section . append ( operation ) \n",
+ " new_solution . append ( new_section ) \n",
+ " return new_solution \n",
+ "\n",
+ " def orderlevel ( self , level , planningproblem ): \n",
+ " """Return valid linear order of actions for a given level""" \n",
+ "\n",
+ " for permutation in itertools . permutations ( level ): \n",
+ " temp = copy . deepcopy ( planningproblem ) \n",
+ " count = 0 \n",
+ " for action in permutation : \n",
+ " try : \n",
+ " temp . act ( action ) \n",
+ " count += 1 \n",
+ " except : \n",
+ " count = 0 \n",
+ " temp = copy . deepcopy ( planningproblem ) \n",
+ " break \n",
+ " if count == len ( permutation ): \n",
+ " return list ( permutation ), temp \n",
+ " return None \n",
+ "\n",
+ " def execute ( self ): \n",
+ " """Finds total-order solution for a planning graph""" \n",
+ "\n",
+ " graphplan_solution = GraphPlan ( self . planningproblem ) . execute () \n",
+ " filtered_solution = self . filter ( graphplan_solution ) \n",
+ " ordered_solution = [] \n",
+ " planningproblem = self . planningproblem \n",
+ " for level in filtered_solution : \n",
+ " level_solution , planningproblem = self . orderlevel ( level , planningproblem ) \n",
+ " for element in level_solution : \n",
+ " ordered_solution . append ( element ) \n",
+ "\n",
+ " return ordered_solution \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Linearize)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `filter` method removes the persistence actions (if any) from the planning graph representation.\n",
+ " \n",
+ "The `orderlevel` method finds a valid total-ordering of a specified level of the planning-graph, given the state of the graph after the previous level.\n",
+ " \n",
+ "The `execute` method sequentially calls `orderlevel` for all the levels in the planning-graph and returns the final total-order solution.\n",
+ " \n",
+ " \n",
+ "Let's look at some examples."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Load(C1, P1, SFO),\n",
+ " Fly(P1, SFO, JFK),\n",
+ " Load(C2, P2, JFK),\n",
+ " Fly(P2, JFK, SFO),\n",
+ " Unload(C2, P2, SFO),\n",
+ " Unload(C1, P1, JFK)]"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# total-order solution for air_cargo problem\n",
+ "Linearize(air_cargo()).execute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Remove(Spare, Trunk), Remove(Flat, Axle), PutOn(Spare, Axle)]"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# total-order solution for spare_tire problem\n",
+ "Linearize(spare_tire()).execute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[MoveToTable(C, A), Move(B, Table, C), Move(A, Table, B)]"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# total-order solution for three_block_tower problem\n",
+ "Linearize(three_block_tower()).execute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[ToTable(A, B), FromTable(B, A), FromTable(C, B)]"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# total-order solution for simple_blocks_world problem\n",
+ "Linearize(simple_blocks_world()).execute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[RightSock, LeftSock, RightShoe, LeftShoe]"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# total-order solution for socks_and_shoes problem\n",
+ "Linearize(socks_and_shoes()).execute()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/probabilistic_learning.py b/probabilistic_learning.py
new file mode 100644
index 000000000..1138e702d
--- /dev/null
+++ b/probabilistic_learning.py
@@ -0,0 +1,154 @@
+"""Learning probabilistic models. (Chapters 20)"""
+
+import heapq
+
+from utils import weighted_sampler, product, gaussian
+
+
+class CountingProbDist:
+ """
+ A probability distribution formed by observing and counting examples.
+ If p is an instance of this class and o is an observed value, then
+ there are 3 main operations:
+ p.add(o) increments the count for observation o by 1.
+ p.sample() returns a random element from the distribution.
+ p[o] returns the probability for o (as in a regular ProbDist).
+ """
+
+ def __init__(self, observations=None, default=0):
+ """
+ Create a distribution, and optionally add in some observations.
+ By default this is an unsmoothed distribution, but saying default=1,
+ for example, gives you add-one smoothing.
+ """
+ if observations is None:
+ observations = []
+ self.dictionary = {}
+ self.n_obs = 0
+ self.default = default
+ self.sampler = None
+
+ for o in observations:
+ self.add(o)
+
+ def add(self, o):
+ """Add an observation o to the distribution."""
+ self.smooth_for(o)
+ self.dictionary[o] += 1
+ self.n_obs += 1
+ self.sampler = None
+
+ def smooth_for(self, o):
+ """
+ Include o among the possible observations, whether or not
+ it's been observed yet.
+ """
+ if o not in self.dictionary:
+ self.dictionary[o] = self.default
+ self.n_obs += self.default
+ self.sampler = None
+
+ def __getitem__(self, item):
+ """Return an estimate of the probability of item."""
+ self.smooth_for(item)
+ return self.dictionary[item] / self.n_obs
+
+ # (top() and sample() are not used in this module, but elsewhere.)
+
+ def top(self, n):
+ """Return (count, obs) tuples for the n most frequent observations."""
+ return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
+
+ def sample(self):
+ """Return a random sample from the distribution."""
+ if self.sampler is None:
+ self.sampler = weighted_sampler(list(self.dictionary.keys()), list(self.dictionary.values()))
+ return self.sampler()
+
+
+def NaiveBayesLearner(dataset, continuous=True, simple=False):
+ if simple:
+ return NaiveBayesSimple(dataset)
+ if continuous:
+ return NaiveBayesContinuous(dataset)
+ else:
+ return NaiveBayesDiscrete(dataset)
+
+
+def NaiveBayesSimple(distribution):
+ """
+ A simple naive bayes classifier that takes as input a dictionary of
+ CountingProbDist objects and classifies items according to these distributions.
+ The input dictionary is in the following form:
+ (ClassName, ClassProb): CountingProbDist
+ """
+ target_dist = {c_name: prob for c_name, prob in distribution.keys()}
+ attr_dists = {c_name: count_prob for (c_name, _), count_prob in distribution.items()}
+
+ def predict(example):
+ """Predict the target value for example. Calculate probabilities for each
+ class and pick the max."""
+
+ def class_probability(target_val):
+ attr_dist = attr_dists[target_val]
+ return target_dist[target_val] * product(attr_dist[a] for a in example)
+
+ return max(target_dist.keys(), key=class_probability)
+
+ return predict
+
+
+def NaiveBayesDiscrete(dataset):
+ """
+ Just count how many times each value of each input attribute
+ occurs, conditional on the target value. Count the different
+ target values too.
+ """
+
+ target_vals = dataset.values[dataset.target]
+ target_dist = CountingProbDist(target_vals)
+ attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr]) for gv in target_vals for attr in dataset.inputs}
+ for example in dataset.examples:
+ target_val = example[dataset.target]
+ target_dist.add(target_val)
+ for attr in dataset.inputs:
+ attr_dists[target_val, attr].add(example[attr])
+
+ def predict(example):
+ """
+ Predict the target value for example. Consider each possible value,
+ and pick the most likely by looking at each attribute independently.
+ """
+
+ def class_probability(target_val):
+ return (target_dist[target_val] * product(attr_dists[target_val, attr][example[attr]]
+ for attr in dataset.inputs))
+
+ return max(target_vals, key=class_probability)
+
+ return predict
+
+
+def NaiveBayesContinuous(dataset):
+ """
+ Count how many times each target value occurs.
+ Also, find the means and deviations of input attribute values for each target value.
+ """
+ means, deviations = dataset.find_means_and_deviations()
+
+ target_vals = dataset.values[dataset.target]
+ target_dist = CountingProbDist(target_vals)
+
+ def predict(example):
+ """Predict the target value for example. Consider each possible value,
+ and pick the most likely by looking at each attribute independently."""
+
+ def class_probability(target_val):
+ prob = target_dist[target_val]
+ for attr in dataset.inputs:
+ prob *= gaussian(means[target_val][attr], deviations[target_val][attr], example[attr])
+ return prob
+
+ return max(target_vals, key=class_probability)
+
+ return predict
diff --git a/probability.ipynb b/probability.ipynb
index 446fc11fb..fe9643a83 100644
--- a/probability.ipynb
+++ b/probability.ipynb
@@ -1,24 +1,6515 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Probability \n",
+ "\n",
+ "This IPy notebook acts as supporting material for topics covered in **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning**, **Chapter 15 Probabilistic Reasoning over Time**, **Chapter 16 Making Simple Decisions** and parts of **Chapter 25 Robotics** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from probability import *\n",
+ "from utils import print_table\n",
+ "from notebook import psource, pseudocode, heatmap"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## CONTENTS\n",
+ "- Probability Distribution\n",
+ " - Joint probability distribution\n",
+ " - Inference using full joint distributions\n",
+ " \n",
+ "- Bayesian Networks\n",
+ " - BayesNode\n",
+ " - BayesNet\n",
+ " - Exact Inference in Bayesian Networks\n",
+ " - Enumeration\n",
+ " - Variable elimination\n",
+ " - Approximate Inference in Bayesian Networks\n",
+ " - Prior sample\n",
+ " - Rejection sampling\n",
+ " - Likelihood weighting\n",
+ " - Gibbs sampling\n",
+ " \n",
+ "- Hidden Markov Models\n",
+ " - Inference in Hidden Markov Models\n",
+ " - Forward-backward\n",
+ " - Fixed lag smoothing\n",
+ " - Particle filtering\n",
+ " \n",
+ " \n",
+ "- Monte Carlo Localization\n",
+ "- Decision Theoretic Agent\n",
+ "- Information Gathering Agent"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## PROBABILITY DISTRIBUTION\n",
+ "\n",
+ "Let us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class ProbDist : \n",
+ " """A discrete probability distribution. You name the random variable \n",
+ " in the constructor, then assign and query probability of values. \n",
+ " >>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H'] \n",
+ " 0.25 \n",
+ " >>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500}) \n",
+ " >>> P['lo'], P['med'], P['hi'] \n",
+ " (0.125, 0.375, 0.5) \n",
+ " """ \n",
+ "\n",
+ " def __init__ ( self , varname = '?' , freqs = None ): \n",
+ " """If freqs is given, it is a dictionary of values - frequency pairs, \n",
+ " then ProbDist is normalized.""" \n",
+ " self . prob = {} \n",
+ " self . varname = varname \n",
+ " self . values = [] \n",
+ " if freqs : \n",
+ " for ( v , p ) in freqs . items (): \n",
+ " self [ v ] = p \n",
+ " self . normalize () \n",
+ "\n",
+ " def __getitem__ ( self , val ): \n",
+ " """Given a value, return P(value).""" \n",
+ " try : \n",
+ " return self . prob [ val ] \n",
+ " except KeyError : \n",
+ " return 0 \n",
+ "\n",
+ " def __setitem__ ( self , val , p ): \n",
+ " """Set P(val) = p.""" \n",
+ " if val not in self . values : \n",
+ " self . values . append ( val ) \n",
+ " self . prob [ val ] = p \n",
+ "\n",
+ " def normalize ( self ): \n",
+ " """Make sure the probabilities of all values sum to 1. \n",
+ " Returns the normalized distribution. \n",
+ " Raises a ZeroDivisionError if the sum of the values is 0.""" \n",
+ " total = sum ( self . prob . values ()) \n",
+ " if not isclose ( total , 1.0 ): \n",
+ " for val in self . prob : \n",
+ " self . prob [ val ] /= total \n",
+ " return self \n",
+ "\n",
+ " def show_approx ( self , numfmt = '{:.3g}' ): \n",
+ " """Show the probabilities rounded and sorted by key, for the \n",
+ " sake of portable doctests.""" \n",
+ " return ', ' . join ([( '{}: ' + numfmt ) . format ( v , p ) \n",
+ " for ( v , p ) in sorted ( self . prob . items ())]) \n",
+ "\n",
+ " def __repr__ ( self ): \n",
+ " return "P({})" . format ( self . varname ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(ProbDist)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.75"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p = ProbDist('Flip')\n",
+ "p['H'], p['T'] = 0.25, 0.75\n",
+ "p['T']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'?'"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})\n",
+ "p.varname"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.125, 0.375, 0.5)"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "(p['low'], p['medium'], p['high'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['low', 'medium', 'high']"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p.values"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(50, 114, 64)"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p = ProbDist('Y')\n",
+ "p['Cat'] = 50\n",
+ "p['Dog'] = 114\n",
+ "p['Mice'] = 64\n",
+ "(p['Cat'], p['Dog'], p['Mice'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.21929824561403508, 0.5, 0.2807017543859649)"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p.normalize()\n",
+ "(p['Cat'], p['Dog'], p['Mice'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is also possible to display the approximate values upto decimals using the **show_approx** method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Cat: 0.219, Dog: 0.5, Mice: 0.281'"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p.show_approx()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Joint Probability Distribution\n",
+ "\n",
+ "The helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.\n",
+ "\n",
+ "\n",
+ "Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(8, 10)"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "event = {'A': 10, 'B': 9, 'C': 8}\n",
+ "variables = ['C', 'A']\n",
+ "event_values(event, variables)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class JointProbDist ( ProbDist ): \n",
+ " """A discrete probability distribute over a set of variables. \n",
+ " >>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25 \n",
+ " >>> P[1, 1] \n",
+ " 0.25 \n",
+ " >>> P[dict(X=0, Y=1)] = 0.5 \n",
+ " >>> P[dict(X=0, Y=1)] \n",
+ " 0.5""" \n",
+ "\n",
+ " def __init__ ( self , variables ): \n",
+ " self . prob = {} \n",
+ " self . variables = variables \n",
+ " self . vals = defaultdict ( list ) \n",
+ "\n",
+ " def __getitem__ ( self , values ): \n",
+ " """Given a tuple or dict of values, return P(values).""" \n",
+ " values = event_values ( values , self . variables ) \n",
+ " return ProbDist . __getitem__ ( self , values ) \n",
+ "\n",
+ " def __setitem__ ( self , values , p ): \n",
+ " """Set P(values) = p. Values can be a tuple or a dict; it must \n",
+ " have a value for each of the variables in the joint. Also keep track \n",
+ " of the values we have seen so far for each variable.""" \n",
+ " values = event_values ( values , self . variables ) \n",
+ " self . prob [ values ] = p \n",
+ " for var , val in zip ( self . variables , values ): \n",
+ " if val not in self . vals [ var ]: \n",
+ " self . vals [ var ] . append ( val ) \n",
+ "\n",
+ " def values ( self , var ): \n",
+ " """Return the set of possible values for a variable.""" \n",
+ " return self . vals [ var ] \n",
+ "\n",
+ " def __repr__ ( self ): \n",
+ " return "P({})" . format ( self . variables ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(JointProbDist)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).\n",
+ "\n",
+ "To specify a Joint distribution we first need an ordered list of variables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "P(['X', 'Y'])"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "variables = ['X', 'Y']\n",
+ "j = JointProbDist(variables)\n",
+ "j"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.\n",
+ "The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.2, 0.5)"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "j[1,1] = 0.2\n",
+ "j[dict(X=0, Y=1)] = 0.5\n",
+ "\n",
+ "(j[1,1], j[0,1])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is also possible to list all the values for a particular variable using the **values** method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[1, 0]"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "j.values('X')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Inference Using Full Joint Distributions\n",
+ "\n",
+ "In this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.\n",
+ "\n",
+ "This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.\n",
+ "\n",
+ "$$\\textbf{P}(X | \\textbf{e}) = \\alpha \\textbf{P}(X, \\textbf{e}) = \\alpha \\sum_{y} \\textbf{P}(X, \\textbf{e}, \\textbf{y})$$\n",
+ "\n",
+ "Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**\n",
+ "\n",
+ "We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])\n",
+ "full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108\n",
+ "full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012\n",
+ "full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016\n",
+ "full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064\n",
+ "full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072\n",
+ "full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144\n",
+ "full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008\n",
+ "full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def enumerate_joint ( variables , e , P ): \n",
+ " """Return the sum of those entries in P consistent with e, \n",
+ " provided variables is P's remaining variables (the ones not in e).""" \n",
+ " if not variables : \n",
+ " return P [ e ] \n",
+ " Y , rest = variables [ 0 ], variables [ 1 :] \n",
+ " return sum ([ enumerate_joint ( rest , extend ( e , Y , y ), P ) \n",
+ " for y in P . values ( Y )]) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(enumerate_joint)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.19999999999999998"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "evidence = dict(Toothache=True)\n",
+ "variables = ['Cavity', 'Catch'] # variables not part of evidence\n",
+ "ans1 = enumerate_joint(variables, evidence, full_joint)\n",
+ "ans1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)** "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.12"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "evidence = dict(Cavity=True, Toothache=True)\n",
+ "variables = ['Catch'] # variables not part of evidence\n",
+ "ans2 = enumerate_joint(variables, evidence, full_joint)\n",
+ "ans2"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \\frac{P(Cavity=True \\ and \\ Toothache=True)}{P(Toothache=True)}$$\n",
+ "\n",
+ "We have already calculated both the numerator and denominator."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.6"
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "ans2/ans1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi **. This is followed by normalization of the obtained distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def enumerate_joint_ask ( X , e , P ): \n",
+ " """Return a probability distribution over the values of the variable X, \n",
+ " given the {var:val} observations e, in the JointProbDist P. [Section 13.3] \n",
+ " >>> P = JointProbDist(['X', 'Y']) \n",
+ " >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125 \n",
+ " >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx() \n",
+ " '0: 0.667, 1: 0.167, 2: 0.167' \n",
+ " """ \n",
+ " assert X not in e , "Query variable must be distinct from evidence" \n",
+ " Q = ProbDist ( X ) # probability distribution for X, initially empty \n",
+ " Y = [ v for v in P . variables if v != X and v not in e ] # hidden variables. \n",
+ " for xi in P . values ( X ): \n",
+ " Q [ xi ] = enumerate_joint ( Y , extend ( e , X , xi ), P ) \n",
+ " return Q . normalize () \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(enumerate_joint_ask)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6, 0.39999999999999997)"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "query_variable = 'Cavity'\n",
+ "evidence = dict(Toothache=True)\n",
+ "ans = enumerate_joint_ask(query_variable, evidence, full_joint)\n",
+ "(ans[True], ans[False])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can verify that the first value is the same as we obtained earlier by manual calculation."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## BAYESIAN NETWORKS\n",
+ "\n",
+ "A Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.\n",
+ "\n",
+ "A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.\n",
+ "\n",
+ "Let us dive into the **BayesNode** implementation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class BayesNode : \n",
+ " """A conditional probability distribution for a boolean variable, \n",
+ " P(X | parents). Part of a BayesNet.""" \n",
+ "\n",
+ " def __init__ ( self , X , parents , cpt ): \n",
+ " """X is a variable name, and parents a sequence of variable \n",
+ " names or a space-separated string. cpt, the conditional \n",
+ " probability table, takes one of these forms: \n",
+ "\n",
+ " * A number, the unconditional probability P(X=true). You can \n",
+ " use this form when there are no parents. \n",
+ "\n",
+ " * A dict {v: p, ...}, the conditional probability distribution \n",
+ " P(X=true | parent=v) = p. When there's just one parent. \n",
+ "\n",
+ " * A dict {(v1, v2, ...): p, ...}, the distribution P(X=true | \n",
+ " parent1=v1, parent2=v2, ...) = p. Each key must have as many \n",
+ " values as there are parents. You can use this form always; \n",
+ " the first two are just conveniences. \n",
+ "\n",
+ " In all cases the probability of X being false is left implicit, \n",
+ " since it follows from P(X=true). \n",
+ "\n",
+ " >>> X = BayesNode('X', '', 0.2) \n",
+ " >>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7}) \n",
+ " >>> Z = BayesNode('Z', 'P Q', \n",
+ " ... {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7}) \n",
+ " """ \n",
+ " if isinstance ( parents , str ): \n",
+ " parents = parents . split () \n",
+ "\n",
+ " # We store the table always in the third form above. \n",
+ " if isinstance ( cpt , ( float , int )): # no parents, 0-tuple \n",
+ " cpt = {(): cpt } \n",
+ " elif isinstance ( cpt , dict ): \n",
+ " # one parent, 1-tuple \n",
+ " if cpt and isinstance ( list ( cpt . keys ())[ 0 ], bool ): \n",
+ " cpt = {( v ,): p for v , p in cpt . items ()} \n",
+ "\n",
+ " assert isinstance ( cpt , dict ) \n",
+ " for vs , p in cpt . items (): \n",
+ " assert isinstance ( vs , tuple ) and len ( vs ) == len ( parents ) \n",
+ " assert all ( isinstance ( v , bool ) for v in vs ) \n",
+ " assert 0 <= p <= 1 \n",
+ "\n",
+ " self . variable = X \n",
+ " self . parents = parents \n",
+ " self . cpt = cpt \n",
+ " self . children = [] \n",
+ "\n",
+ " def p ( self , value , event ): \n",
+ " """Return the conditional probability \n",
+ " P(X=value | parents=parent_values), where parent_values \n",
+ " are the values of parents in event. (event must assign each \n",
+ " parent a value.) \n",
+ " >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) \n",
+ " >>> bn.p(False, {'Burglary': False, 'Earthquake': True}) \n",
+ " 0.375""" \n",
+ " assert isinstance ( value , bool ) \n",
+ " ptrue = self . cpt [ event_values ( event , self . parents )] \n",
+ " return ptrue if value else 1 - ptrue \n",
+ "\n",
+ " def sample ( self , event ): \n",
+ " """Sample from the distribution for this variable conditioned \n",
+ " on event's values for parent_variables. That is, return True/False \n",
+ " at random according with the conditional probability given the \n",
+ " parents.""" \n",
+ " return probability ( self . p ( True , event )) \n",
+ "\n",
+ " def __repr__ ( self ): \n",
+ " return repr (( self . variable , ' ' . join ( self . parents ))) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(BayesNode)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).\n",
+ "\n",
+ "The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.\n",
+ "\n",
+ " \n",
+ "\n",
+ "The alarm node can be made as follows: "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'], \n",
+ " {(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})\n",
+ "mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.\n",
+ "# Equivalant to john_node definition."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The general format used for the alarm node always holds. For nodes with no parents we can also use. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "burglary_node = BayesNode('Burglary', '', 0.001)\n",
+ "earthquake_node = BayesNode('Earthquake', '', 0.002)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.09999999999999998"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class BayesNet : \n",
+ " """Bayesian network containing only boolean-variable nodes.""" \n",
+ "\n",
+ " def __init__ ( self , node_specs = None ): \n",
+ " """Nodes must be ordered with parents before children.""" \n",
+ " self . nodes = [] \n",
+ " self . variables = [] \n",
+ " node_specs = node_specs or [] \n",
+ " for node_spec in node_specs : \n",
+ " self . add ( node_spec ) \n",
+ "\n",
+ " def add ( self , node_spec ): \n",
+ " """Add a node to the net. Its parents must already be in the \n",
+ " net, and its variable must not.""" \n",
+ " node = BayesNode ( * node_spec ) \n",
+ " assert node . variable not in self . variables \n",
+ " assert all (( parent in self . variables ) for parent in node . parents ) \n",
+ " self . nodes . append ( node ) \n",
+ " self . variables . append ( node . variable ) \n",
+ " for parent in node . parents : \n",
+ " self . variable_node ( parent ) . children . append ( node ) \n",
+ "\n",
+ " def variable_node ( self , var ): \n",
+ " """Return the node for the variable named var. \n",
+ " >>> burglary.variable_node('Burglary').variable \n",
+ " 'Burglary'""" \n",
+ " for n in self . nodes : \n",
+ " if n . variable == var : \n",
+ " return n \n",
+ " raise Exception ( "No such variable: {}" . format ( var )) \n",
+ "\n",
+ " def variable_values ( self , var ): \n",
+ " """Return the domain of var.""" \n",
+ " return [ True , False ] \n",
+ "\n",
+ " def __repr__ ( self ): \n",
+ " return 'BayesNet({0!r})' . format ( self . nodes ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(BayesNet)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.\n",
+ "\n",
+ "**burglary** global is an instance of **BayesNet** corresponding to the above example.\n",
+ "\n",
+ " T, F = True, False\n",
+ "\n",
+ " burglary = BayesNet([\n",
+ " ('Burglary', '', 0.001),\n",
+ " ('Earthquake', '', 0.002),\n",
+ " ('Alarm', 'Burglary Earthquake',\n",
+ " {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),\n",
+ " ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),\n",
+ " ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})\n",
+ " ])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "BayesNet([('Burglary', ''), ('Earthquake', ''), ('Alarm', 'Burglary Earthquake'), ('JohnCalls', 'Alarm'), ('MaryCalls', 'Alarm')])"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "burglary"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "probability.BayesNode"
+ ]
+ },
+ "execution_count": 29,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "type(burglary.variable_node('Alarm'))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{(True, True): 0.95,\n",
+ " (True, False): 0.94,\n",
+ " (False, True): 0.29,\n",
+ " (False, False): 0.001}"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "burglary.variable_node('Alarm').cpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Exact Inference in Bayesian Networks\n",
+ "\n",
+ "A Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.\n",
+ "\n",
+ "Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section.\n",
+ "\n",
+ "### Inference by Enumeration\n",
+ "\n",
+ "We apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def enumerate_all ( variables , e , bn ): \n",
+ " """Return the sum of those entries in P(variables | e{others}) \n",
+ " consistent with e, where P is the joint distribution represented \n",
+ " by bn, and e{others} means e restricted to bn's other variables \n",
+ " (the ones other than variables). Parents must precede children in variables.""" \n",
+ " if not variables : \n",
+ " return 1.0 \n",
+ " Y , rest = variables [ 0 ], variables [ 1 :] \n",
+ " Ynode = bn . variable_node ( Y ) \n",
+ " if Y in e : \n",
+ " return Ynode . p ( e [ Y ], e ) * enumerate_all ( rest , e , bn ) \n",
+ " else : \n",
+ " return sum ( Ynode . p ( y , e ) * enumerate_all ( rest , extend ( e , Y , y ), bn ) \n",
+ " for y in bn . variable_values ( Y )) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(enumerate_all)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.\n",
+ "\n",
+ "$$\\textbf{P}(X | \\textbf{e}) = α \\textbf{P}(X, \\textbf{e}) = α \\sum_{y} \\textbf{P}(X, \\textbf{e}, \\textbf{y})$$ \n",
+ "\n",
+ "such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.\n",
+ "\n",
+ "**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them. \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def enumeration_ask ( X , e , bn ): \n",
+ " """Return the conditional probability distribution of variable X \n",
+ " given evidence e, from BayesNet bn. [Figure 14.9] \n",
+ " >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary \n",
+ " ... ).show_approx() \n",
+ " 'False: 0.716, True: 0.284'""" \n",
+ " assert X not in e , "Query variable must be distinct from evidence" \n",
+ " Q = ProbDist ( X ) \n",
+ " for xi in bn . variable_values ( X ): \n",
+ " Q [ xi ] = enumerate_all ( bn . variables , extend ( e , X , xi ), bn ) \n",
+ " return Q . normalize () \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(enumeration_ask)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.2841718353643929"
+ ]
+ },
+ "execution_count": 33,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)\n",
+ "ans_dist[True]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Variable Elimination\n",
+ "\n",
+ "The enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.\n",
+ "\n",
+ "Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. \n",
+ "\n",
+ "In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. \n",
+ "\n",
+ "\n",
+ "#### Helper Functions\n",
+ "\n",
+ "There are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def make_factor ( var , e , bn ): \n",
+ " """Return the factor for var in bn's joint distribution given e. \n",
+ " That is, bn's full joint distribution, projected to accord with e, \n",
+ " is the pointwise product of these factors for bn's variables.""" \n",
+ " node = bn . variable_node ( var ) \n",
+ " variables = [ X for X in [ var ] + node . parents if X not in e ] \n",
+ " cpt = { event_values ( e1 , variables ): node . p ( e1 [ var ], e1 ) \n",
+ " for e1 in all_events ( variables , bn , e )} \n",
+ " return Factor ( variables , cpt ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(make_factor)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.\n",
+ "\n",
+ "Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.\n",
+ "\n",
+ "The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def all_events ( variables , bn , e ): \n",
+ " """Yield every way of extending e with values for all variables.""" \n",
+ " if not variables : \n",
+ " yield e \n",
+ " else : \n",
+ " X , rest = variables [ 0 ], variables [ 1 :] \n",
+ " for e1 in all_events ( rest , bn , e ): \n",
+ " for x in bn . variable_values ( X ): \n",
+ " yield extend ( e1 , X , x ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(all_events)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. \n",
+ "\n",
+ "We can try this out using the example on **Page 524** of the book. We will make **f**5 (A) = P(m | A)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 37,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "f5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{(True,): 0.7, (False,): 0.01}"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "f5.cpt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['Alarm']"
+ ]
+ },
+ "execution_count": 39,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "f5.variables"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{(True,): 0.7, (False,): 0.30000000000000004}"
+ ]
+ },
+ "execution_count": 41,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "new_factor.cpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence.\n",
+ "\n",
+ "#### Operations on Factors\n",
+ "\n",
+ "We are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def pointwise_product ( self , other , bn ): \n",
+ " """Multiply two factors, combining their variables.""" \n",
+ " variables = list ( set ( self . variables ) | set ( other . variables )) \n",
+ " cpt = { event_values ( e , variables ): self . p ( e ) * other . p ( e ) \n",
+ " for e in all_events ( variables , bn , {})} \n",
+ " return Factor ( variables , cpt ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Factor.pointwise_product)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def pointwise_product ( factors , bn ): \n",
+ " return reduce ( lambda f , g : f . pointwise_product ( g , bn ), factors ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(pointwise_product)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def sum_out ( self , var , bn ): \n",
+ " """Make a factor eliminating var by summing over its values.""" \n",
+ " variables = [ X for X in self . variables if X != var ] \n",
+ " cpt = { event_values ( e , variables ): sum ( self . p ( extend ( e , var , val )) \n",
+ " for val in bn . variable_values ( var )) \n",
+ " for e in all_events ( variables , bn , {})} \n",
+ " return Factor ( variables , cpt ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Factor.sum_out)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def sum_out ( var , factors , bn ): \n",
+ " """Eliminate var from all factors by summing over its values.""" \n",
+ " result , var_factors = [], [] \n",
+ " for f in factors : \n",
+ " ( var_factors if var in f . variables else result ) . append ( f ) \n",
+ " result . append ( pointwise_product ( var_factors , bn ) . sum_out ( var , bn )) \n",
+ " return result \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(sum_out)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Elimination Ask\n",
+ "\n",
+ "The algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. \n",
+ "\n",
+ "The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring \n",
+ "\n",
+ "**P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def elimination_ask ( X , e , bn ): \n",
+ " """Compute bn's P(X|e) by variable elimination. [Figure 14.11] \n",
+ " >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary \n",
+ " ... ).show_approx() \n",
+ " 'False: 0.716, True: 0.284'""" \n",
+ " assert X not in e , "Query variable must be distinct from evidence" \n",
+ " factors = [] \n",
+ " for var in reversed ( bn . variables ): \n",
+ " factors . append ( make_factor ( var , e , bn )) \n",
+ " if is_hidden ( var , X , e ): \n",
+ " factors = sum_out ( var , factors , bn ) \n",
+ " return pointwise_product ( factors , bn ) . normalize () \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(elimination_ask)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'False: 0.716, True: 0.284'"
+ ]
+ },
+ "execution_count": 47,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Elimination Ask Optimizations\n",
+ "\n",
+ "`elimination_ask` has some critical point to consider and some optimizations could be performed:\n",
+ "\n",
+ "- **Operation on factors**:\n",
+ "\n",
+ " `sum_out` and `pointwise_product` function used in `elimination_ask` is where space and time complexity arise in the variable elimination algorithm (AIMA3e pg. 526).\n",
+ "\n",
+ ">The only trick is to notice that any factor that does not depend on the variable to be summed out can be moved outside the summation.\n",
+ "\n",
+ "- **Variable ordering**:\n",
+ "\n",
+ " Elimination ordering is important, every choice of ordering yields a valid algorithm, but different orderings cause different intermediate factors to be generated during the calculation (AIMA3e pg. 527). In this case the algorithm applies a reversed order.\n",
+ "\n",
+ "> In general, the time and space requirements of variable elimination are dominated by the size of the largest factor constructed during the operation of the algorithm. This in turn is determined by the order of elimination of variables and by the structure of the network. It turns out to be intractable to determine the optimal ordering, but several good heuristics are available. One fairly effective method is a greedy one: eliminate whichever variable minimizes the size of the next factor to be constructed. \n",
+ "\n",
+ "- **Variable relevance**\n",
+ " \n",
+ " Some variables could be irrelevant to resolve a query (i.e. sums to 1). A variable elimination algorithm can therefore remove all these variables before evaluating the query (AIMA3e pg. 528).\n",
+ "\n",
+ "> An optimization is to remove 'every variable that is not an ancestor of a query variable or evidence variable is irrelevant to the query'."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Runtime comparison\n",
+ "Let's see how the runtimes of these two algorithms compare.\n",
+ "We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "105 µs ± 11.9 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "262 µs ± 54.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this test case we observe that variable elimination is slower than what we expected. It has something to do with number of threads, how Python tries to optimize things and this happens because the network is very small, with just 5 nodes. The `elimination_ask` has some critical point and some optimizations must be perfomed as seen above.\n",
+ " \n",
+ "Of course, for more complicated networks, variable elimination will be significantly faster and runtime will drop not just by a constant factor, but by a polynomial factor proportional to the number of nodes, due to the reduction in repeated calculations."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Approximate Inference in Bayesian Networks\n",
+ "\n",
+ "Exact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ " def sample ( self , event ): \n",
+ " """Sample from the distribution for this variable conditioned \n",
+ " on event's values for parent_variables. That is, return True/False \n",
+ " at random according with the conditional probability given the \n",
+ " parents.""" \n",
+ " return probability ( self . p ( True , event )) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(BayesNode.sample)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it.\n",
+ "\n",
+ "### Prior Sampling\n",
+ "\n",
+ "The idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi )** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def prior_sample ( bn ): \n",
+ " """Randomly sample from bn's full joint distribution. The result \n",
+ " is a {variable: value} dict. [Figure 14.13]""" \n",
+ " event = {} \n",
+ " for node in bn . nodes : \n",
+ " event [ node . variable ] = node . sample ( event ) \n",
+ " return event \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(prior_sample)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**\n",
+ "\n",
+ " \n",
+ "\n",
+ "Traversing the graph in topological order is important.\n",
+ "There are two possible topological orderings for this particular directed acyclic graph.\n",
+ " \n",
+ "1. `Cloudy -> Sprinkler -> Rain -> Wet Grass`\n",
+ "2. `Cloudy -> Rain -> Sprinkler -> Wet Grass`\n",
+ " \n",
+ " \n",
+ "We can follow any of the two orderings to sample from the network.\n",
+ "Any ordering other than these two, however, cannot be used.\n",
+ " \n",
+ "One way to think about this is that `Cloudy` can be seen as a precondition of both `Rain` and `Sprinkler` and just like we have seen in planning, preconditions need to be satisfied before a certain action can be executed.\n",
+ " \n",
+ "We store the samples on the observations. Let us find **P(Rain=True)** by taking 1000 random samples from the network."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "N = 1000\n",
+ "all_observations = [prior_sample(sprinkler) for x in range(N)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we filter to get the observations where Rain = True"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rain_true = [observation for observation in all_observations if observation['Rain'] == True]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, we can find **P(Rain=True)**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "0.503\n"
+ ]
+ }
+ ],
+ "source": [
+ "answer = len(rain_true) / N\n",
+ "print(answer)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Sampling this another time might give different results as we have no control over the distribution of the random samples"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "0.519\n"
+ ]
+ }
+ ],
+ "source": [
+ "N = 1000\n",
+ "all_observations = [prior_sample(sprinkler) for x in range(N)]\n",
+ "rain_true = [observation for observation in all_observations if observation['Rain'] == True]\n",
+ "answer = len(rain_true) / N\n",
+ "print(answer)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "0.8265895953757225\n"
+ ]
+ }
+ ],
+ "source": [
+ "rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]\n",
+ "answer = len(rain_and_cloudy) / len(rain_true)\n",
+ "print(answer)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Rejection Sampling\n",
+ "\n",
+ "Rejection Sampling is based on an idea similar to what we did just now. \n",
+ "First, it generates samples from the prior distribution specified by the network. \n",
+ "Then, it rejects all those that do not match the evidence. \n",
+ " \n",
+ "Rejection sampling is advantageous only when we know the query beforehand.\n",
+ "While prior sampling generally works for any query, it might fail in some scenarios.\n",
+ " \n",
+ "Let's say we have a generic Bayesian network and we have evidence `e`, and we want to know how many times a state `A` is true, given evidence `e` is true.\n",
+ "Normally, prior sampling can answer this question, but let's assume that the probability of evidence `e` being true in our actual probability distribution is very small.\n",
+ "In this situation, it might be possible that sampling never encounters a data-point where `e` is true.\n",
+ "If our sampled data has no instance of `e` being true, `P(e) = 0`, and therefore `P(A | e) / P(e) = 0/0`, which is undefined.\n",
+ "We cannot find the required value using this sample.\n",
+ " \n",
+ "We can definitely increase the number of sample points, but we can never guarantee that we will encounter the case where `e` is non-zero (assuming our actual probability distribution has atleast one case where `e` is true).\n",
+ "To guarantee this, we would have to consider every single data point, which means we lose the speed advantage that approximation provides us and we essentially have to calculate the exact inference model of the Bayesian network.\n",
+ " \n",
+ " \n",
+ "Rejection sampling will be useful in this situation, as we already know the query.\n",
+ " \n",
+ "While sampling from the network, we will reject any sample which is inconsistent with the evidence variables of the given query (in this example, the only evidence variable is `e`).\n",
+ "We will only consider samples that do not violate **any** of the evidence variables.\n",
+ "In this way, we will have enough data with the required evidence to infer queries involving a subset of that evidence.\n",
+ " \n",
+ " \n",
+ "The function **rejection_sampling** implements the algorithm described by **Figure 14.14**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def rejection_sampling ( X , e , bn , N = 10000 ): \n",
+ " """Estimate the probability distribution of variable X given \n",
+ " evidence e in BayesNet bn, using N samples. [Figure 14.14] \n",
+ " Raises a ZeroDivisionError if all the N samples are rejected, \n",
+ " i.e., inconsistent with e. \n",
+ " >>> random.seed(47) \n",
+ " >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), \n",
+ " ... burglary, 10000).show_approx() \n",
+ " 'False: 0.7, True: 0.3' \n",
+ " """ \n",
+ " counts = { x : 0 for x in bn . variable_values ( X )} # bold N in [Figure 14.14] \n",
+ " for j in range ( N ): \n",
+ " sample = prior_sample ( bn ) # boldface x in [Figure 14.14] \n",
+ " if consistent_with ( sample , e ): \n",
+ " counts [ sample [ X ]] += 1 \n",
+ " return ProbDist ( X , counts ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(rejection_sampling)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.\n",
+ "\n",
+ "**consistent_with** is used to check consistency."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def consistent_with ( event , evidence ): \n",
+ " """Is event consistent with the given evidence?""" \n",
+ " return all ( evidence . get ( k , v ) == v \n",
+ " for k , v in event . items ()) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(consistent_with)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To answer **P(Cloudy=True | Rain=True)**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 59,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.8035019455252919"
+ ]
+ },
+ "execution_count": 59,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)\n",
+ "p[True]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Likelihood Weighting\n",
+ "\n",
+ "Rejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.\n",
+ "Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.\n",
+ "\n",
+ "The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 60,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def weighted_sample ( bn , e ): \n",
+ " """Sample an event from bn that's consistent with the evidence e; \n",
+ " return the event and its weight, the likelihood that the event \n",
+ " accords to the evidence.""" \n",
+ " w = 1 \n",
+ " event = dict ( e ) # boldface x in [Figure 14.15] \n",
+ " for node in bn . nodes : \n",
+ " Xi = node . variable \n",
+ " if Xi in e : \n",
+ " w *= node . p ( e [ Xi ], event ) \n",
+ " else : \n",
+ " event [ Xi ] = node . sample ( event ) \n",
+ " return event , w \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(weighted_sample)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.\n",
+ "\n",
+ "The weight is obtained by multiplying **P(xi | parents(xi ))** for each node in evidence. We set the values of **event = evidence** at the start of the function."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 61,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "({'Rain': True, 'Cloudy': False, 'Sprinkler': True, 'WetGrass': True}, 0.2)"
+ ]
+ },
+ "execution_count": 61,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "weighted_sample(sprinkler, dict(Rain=True))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 62,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def likelihood_weighting ( X , e , bn , N = 10000 ): \n",
+ " """Estimate the probability distribution of variable X given \n",
+ " evidence e in BayesNet bn. [Figure 14.15] \n",
+ " >>> random.seed(1017) \n",
+ " >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), \n",
+ " ... burglary, 10000).show_approx() \n",
+ " 'False: 0.702, True: 0.298' \n",
+ " """ \n",
+ " W = { x : 0 for x in bn . variable_values ( X )} \n",
+ " for j in range ( N ): \n",
+ " sample , weight = weighted_sample ( bn , e ) # boldface x, w in [Figure 14.15] \n",
+ " W [ sample [ X ]] += weight \n",
+ " return ProbDist ( X , W ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(likelihood_weighting)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 63,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'False: 0.2, True: 0.8'"
+ ]
+ },
+ "execution_count": 63,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Gibbs Sampling\n",
+ "\n",
+ "In likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.\n",
+ "\n",
+ "Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask** "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 64,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def gibbs_ask ( X , e , bn , N = 1000 ): \n",
+ " """[Figure 14.16]""" \n",
+ " assert X not in e , "Query variable must be distinct from evidence" \n",
+ " counts = { x : 0 for x in bn . variable_values ( X )} # bold N in [Figure 14.16] \n",
+ " Z = [ var for var in bn . variables if var not in e ] \n",
+ " state = dict ( e ) # boldface x in [Figure 14.16] \n",
+ " for Zi in Z : \n",
+ " state [ Zi ] = random . choice ( bn . variable_values ( Zi )) \n",
+ " for j in range ( N ): \n",
+ " for Zi in Z : \n",
+ " state [ Zi ] = markov_blanket_sample ( Zi , state , bn ) \n",
+ " counts [ state [ X ]] += 1 \n",
+ " return ProbDist ( X , counts ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(gibbs_ask)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 65,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'False: 0.215, True: 0.785'"
+ ]
+ },
+ "execution_count": 65,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Runtime analysis\n",
+ "Let's take a look at how much time each algorithm takes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 66,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "13.2 ms ± 3.45 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "all_observations = [prior_sample(sprinkler) for x in range(1000)]\n",
+ "rain_true = [observation for observation in all_observations if observation['Rain'] == True]\n",
+ "len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 67,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "11 ms ± 687 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 68,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2.12 ms ± 554 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 69,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "14.4 ms ± 2.16 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As expected, all algorithms have a very similar runtime.\n",
+ "However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.\n",
+ " \n",
+ "Likelihood weighting is the fastest out of all as it doesn't involve rejecting samples, but also has a quite high variance."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## HIDDEN MARKOV MODELS"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Often, we need to carry out probabilistic inference on temporal data or a sequence of observations where the order of observations matter.\n",
+ "We require a model similar to a Bayesian Network, but one that grows over time to keep up with the latest evidences.\n",
+ "If you are familiar with the `mdp` module or Markov models in general, you can probably guess that a Markov model might come close to representing our problem accurately.\n",
+ " \n",
+ "A Markov model is basically a chain-structured Bayesian Network in which there is one state for each time step and each node has an identical probability distribution.\n",
+ "The first node, however, has a different distribution, called the prior distribution which models the initial state of the process.\n",
+ "A state in a Markov model depends only on the previous state and the latest evidence and not on the states before it.\n",
+ " \n",
+ "A **Hidden Markov Model** or **HMM** is a special case of a Markov model in which the state of the process is described by a single discrete random variable.\n",
+ "The possible values of the variable are the possible states of the world.\n",
+ " \n",
+ "But what if we want to model a process with two or more state variables?\n",
+ "In that case, we can still fit the process into the HMM framework by redefining our state variables as a single \"megavariable\".\n",
+ "We do this because carrying out inference on HMMs have standard optimized algorithms.\n",
+ "A HMM is very similar to an MDP, but we don't have the option of taking actions like in MDPs, instead, the process carries on as new evidence appears.\n",
+ " \n",
+ "If a HMM is truncated at a fixed length, it becomes a Bayesian network and general BN inference can be used on it to answer queries.\n",
+ "\n",
+ "Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\\textbf{t}$. In the sensor or observation model, the observation or evidence $\\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\\textbf{t}$. Based on that, the transition model is \n",
+ "\n",
+ "| $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| \n",
+ "| ------------- |------------- | ----------------------------------|\n",
+ "| ***${False}$*** | ***${False}$*** | 0.7 |\n",
+ "| ***${False}$*** | ***${True}$*** | 0.3 |\n",
+ "| ***${True}$*** | ***${False}$*** | 0.3 |\n",
+ "| ***${True}$*** | ***${True}$*** | 0.7 |\n",
+ "\n",
+ "And the the sensor model will be,\n",
+ "\n",
+ "| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| \n",
+ "| :-------------: |:-------------: | :------------------------:|\n",
+ "| ***${False}$*** | ***${True}$*** | 0.2 |\n",
+ "| ***${False}$*** | ***${False}$*** | 0.8 |\n",
+ "| ***${True}$*** | ***${True}$*** | 0.9 |\n",
+ "| ***${True}$*** | ***${False}$*** | 0.1 |\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "HMMs are implemented in the **`HiddenMarkovModel`** class.\n",
+ "Let's have a look."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 70,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class HiddenMarkovModel : \n",
+ " """A Hidden markov model which takes Transition model and Sensor model as inputs""" \n",
+ "\n",
+ " def __init__ ( self , transition_model , sensor_model , prior = None ): \n",
+ " self . transition_model = transition_model \n",
+ " self . sensor_model = sensor_model \n",
+ " self . prior = prior or [ 0.5 , 0.5 ] \n",
+ "\n",
+ " def sensor_dist ( self , ev ): \n",
+ " if ev is True : \n",
+ " return self . sensor_model [ 0 ] \n",
+ " else : \n",
+ " return self . sensor_model [ 1 ] \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(HiddenMarkovModel)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 71,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]\n",
+ "umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]\n",
+ "hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 72,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[0.9, 0.2]"
+ ]
+ },
+ "execution_count": 72,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "hmm.sensor_dist(ev=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.\n",
+ " \n",
+ "The basic inference tasks that must be solved are:\n",
+ "1. **Filtering**: Computing the posterior probability distribution over the most recent state, given all the evidence up to the current time step.\n",
+ "2. **Prediction**: Computing the posterior probability distribution over the future state.\n",
+ "3. **Smoothing**: Computing the posterior probability distribution over a past state. Smoothing provides a better estimation as it incorporates more evidence.\n",
+ "4. **Most likely explanation**: Finding the most likely sequence of states for a given observation\n",
+ "5. **Learning**: The transition and sensor models can be learnt, if not yet known, just like in an information gathering agent\n",
+ " \n",
+ " \n",
+ "\n",
+ "There are three primary methods to carry out inference in Hidden Markov Models:\n",
+ "1. The Forward-Backward algorithm\n",
+ "2. Fixed lag smoothing\n",
+ "3. Particle filtering\n",
+ "\n",
+ "Let's have a look at how we can carry out inference and answer queries based on our umbrella HMM using these algorithms."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### FORWARD-BACKWARD\n",
+ "This is a general algorithm that works for all Markov models, not just HMMs.\n",
+ "In the filtering task (inference) we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. \n",
+ "We can think of it as a three step process:\n",
+ "1. In every step we start with the current belief $P(X_{t}|e_{1:t})$\n",
+ "2. We update it for time\n",
+ "3. We update it for evidence\n",
+ "\n",
+ "The forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5]. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 73,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def forward ( HMM , fv , ev ): \n",
+ " prediction = vector_add ( scalar_vector_product ( fv [ 0 ], HMM . transition_model [ 0 ]), \n",
+ " scalar_vector_product ( fv [ 1 ], HMM . transition_model [ 1 ])) \n",
+ " sensor_dist = HMM . sensor_dist ( ev ) \n",
+ "\n",
+ " return normalize ( element_wise_product ( sensor_dist , prediction )) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(forward)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 74,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The probability of raining on day 1 is 0.82\n"
+ ]
+ }
+ ],
+ "source": [
+ "umbrella_prior = [0.5, 0.5]\n",
+ "belief_day_1 = forward(hmm, umbrella_prior, ev=True)\n",
+ "print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In **Day 2** our initial belief is the updated belief of **Day 1**.\n",
+ "Again using the **`forward()`** function we can compute the probability of raining in **Day 2**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 75,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The probability of raining in day 2 is 0.88\n"
+ ]
+ }
+ ],
+ "source": [
+ "belief_day_2 = forward(hmm, belief_day_1, ev=True)\n",
+ "print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\\leq k\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def backward ( HMM , b , ev ): \n",
+ " sensor_dist = HMM . sensor_dist ( ev ) \n",
+ " prediction = element_wise_product ( sensor_dist , b ) \n",
+ "\n",
+ " return normalize ( vector_add ( scalar_vector_product ( prediction [ 0 ], HMM . transition_model [ 0 ]), \n",
+ " scalar_vector_product ( prediction [ 1 ], HMM . transition_model [ 1 ]))) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(backward)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 77,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[0.6272727272727272, 0.37272727272727274]"
+ ]
+ },
+ "execution_count": 77,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "b = [1, 1]\n",
+ "backward(hmm, b, ev=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.\n",
+ "\n",
+ "In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 78,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/markdown": [
+ "### AIMA3e\n",
+ "__function__ FORWARD-BACKWARD(__ev__, _prior_) __returns__ a vector of probability distributions \n",
+ " __inputs__: __ev__, a vector of evidence values for steps 1,…,_t_ \n",
+ " _prior_, the prior distribution on the initial state, __P__(__X__0 ) \n",
+ " __local variables__: __fv__, a vector of forward messages for steps 0,…,_t_ \n",
+ " __b__, a representation of the backward message, initially all 1s \n",
+ " __sv__, a vector of smoothed estimates for steps 1,…,_t_ \n",
+ "\n",
+ " __fv__\\[0\\] ← _prior_ \n",
+ " __for__ _i_ = 1 __to__ _t_ __do__ \n",
+ " __fv__\\[_i_\\] ← FORWARD(__fv__\\[_i_ − 1\\], __ev__\\[_i_\\]) \n",
+ " __for__ _i_ = _t_ __downto__ 1 __do__ \n",
+ " __sv__\\[_i_\\] ← NORMALIZE(__fv__\\[_i_\\] × __b__) \n",
+ " __b__ ← BACKWARD(__b__, __ev__\\[_i_\\]) \n",
+ " __return__ __sv__\n",
+ "\n",
+ "---\n",
+ "__Figure ??__ The forward\\-backward algorithm for smoothing: computing posterior probabilities of a sequence of states given a sequence of observations. The FORWARD and BACKWARD operators are defined by Equations (__??__) and (__??__), respectively."
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 78,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "pseudocode('Forward-Backward')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 79,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88\n"
+ ]
+ }
+ ],
+ "source": [
+ "umbrella_prior = [0.5, 0.5]\n",
+ "prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)\n",
+ "print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.\n",
+ "The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.\n",
+ " \n",
+ "Additionally, the forward pass stores $t$ vectors of size $S$ which makes the auxiliary space requirement equivalent to $O(St)$.\n",
+ " \n",
+ " \n",
+ "Is there any way we can improve the time or space complexity?\n",
+ " \n",
+ "Fortunately, the matrix representation of HMM properties allows us to do so.\n",
+ " \n",
+ "If $f$ and $b$ represent the forward and backward messages respectively, we can modify the smoothing algorithm by first\n",
+ "running the standard forward pass to compute $f_{t:t}$ (forgetting all the intermediate results) and then running\n",
+ "backward pass for both $b$ and $f$ together, using them to compute the smoothed estimate at each step.\n",
+ "This optimization reduces auxlilary space requirement to constant (irrespective of the length of the sequence) provided\n",
+ "the transition matrix is invertible and the sensor model has no zeros (which is sometimes hard to accomplish)\n",
+ " \n",
+ " \n",
+ "Let's look at another algorithm, that carries out smoothing in a more optimized way."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### FIXED LAG SMOOTHING\n",
+ "The matrix formulation allows to optimize online smoothing with a fixed lag.\n",
+ " \n",
+ "Since smoothing can be done in constant, there should exist an algorithm whose time complexity is independent of the length of the lag.\n",
+ "For smoothing a time slice $t - d$ where $d$ is the lag, we need to compute $\\alpha f_{1:t-d}$ x $b_{t-d+1:t}$ incrementally.\n",
+ " \n",
+ "As we already know, the forward equation is\n",
+ " \n",
+ "$$f_{1:t+1} = \\alpha O_{t+1}{T}^{T}f_{1:t}$$\n",
+ " \n",
+ "and the backward equation is\n",
+ " \n",
+ "$$b_{k+1:t} = TO_{k+1}b_{k+2:t}$$\n",
+ " \n",
+ "where $T$ and $O$ are the transition and sensor models respectively.\n",
+ " \n",
+ "For smoothing, the forward message is easy to compute but there exists no simple relation between the backward message of this time step and the one at the previous time step, hence we apply the backward equation $d$ times to get\n",
+ " \n",
+ "$$b_{t-d+1:t} = \\left ( \\prod_{i=t-d+1}^{t}{TO_i} \\right )b_{t+1:t} = B_{t-d+1:t}1$$\n",
+ " \n",
+ "where $B_{t-d+1:t}$ is the product of the sequence of $T$ and $O$ matrices.\n",
+ " \n",
+ "Here's how the `probability` module implements `fixed_lag_smoothing`.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 80,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def fixed_lag_smoothing ( e_t , HMM , d , ev , t ): \n",
+ " """[Figure 15.6] \n",
+ " Smoothing algorithm with a fixed time lag of 'd' steps. \n",
+ " Online algorithm that outputs the new smoothed estimate if observation \n",
+ " for new time step is given.""" \n",
+ " ev . insert ( 0 , None ) \n",
+ "\n",
+ " T_model = HMM . transition_model \n",
+ " f = HMM . prior \n",
+ " B = [[ 1 , 0 ], [ 0 , 1 ]] \n",
+ " evidence = [] \n",
+ "\n",
+ " evidence . append ( e_t ) \n",
+ " O_t = vector_to_diagonal ( HMM . sensor_dist ( e_t )) \n",
+ " if t > d : \n",
+ " f = forward ( HMM , f , e_t ) \n",
+ " O_tmd = vector_to_diagonal ( HMM . sensor_dist ( ev [ t - d ])) \n",
+ " B = matrix_multiplication ( inverse_matrix ( O_tmd ), inverse_matrix ( T_model ), B , T_model , O_t ) \n",
+ " else : \n",
+ " B = matrix_multiplication ( B , T_model , O_t ) \n",
+ " t += 1 \n",
+ "\n",
+ " if t > d : \n",
+ " # always returns a 1x2 matrix \n",
+ " return [ normalize ( i ) for i in matrix_multiplication ([ f ], B )][ 0 ] \n",
+ " else : \n",
+ " return None \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(fixed_lag_smoothing)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.\n",
+ "This optimization could be achieved only because HMM properties can be represented as matrices.\n",
+ " \n",
+ "`vector_to_diagonal`, `matrix_multiplication` and `inverse_matrix` are matrix manipulation functions to simplify the implementation.\n",
+ " \n",
+ "`normalize` is used to normalize the output before returning it."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here's how we can use `fixed_lag_smoothing` for inference on our umbrella HMM."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 81,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]\n",
+ "umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]\n",
+ "hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Given evidence T, F, T, F and T, we want to calculate the probability distribution for the fourth day with a fixed lag of 2 days.\n",
+ " \n",
+ "Let `e_t = False`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 82,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[0.1111111111111111, 0.8888888888888888]"
+ ]
+ },
+ "execution_count": 82,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "e_t = F\n",
+ "evidence = [T, F, T, F, T]\n",
+ "fixed_lag_smoothing(e_t, hmm, d=2, ev=evidence, t=4)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 83,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[0.9938650306748466, 0.006134969325153394]"
+ ]
+ },
+ "execution_count": 83,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "e_t = T\n",
+ "evidence = [T, T, F, T, T]\n",
+ "fixed_lag_smoothing(e_t, hmm, d=1, ev=evidence, t=4)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We cannot calculate probability distributions when $t$ is less than $d$"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 84,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fixed_lag_smoothing(e_t, hmm, d=5, ev=evidence, t=4)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As expected, the output is `None`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### PARTICLE FILTERING\n",
+ "The filtering problem is too expensive to solve using the previous methods for problems with large or continuous state spaces.\n",
+ "Particle filtering is a method that can solve the same problem but when the state space is a lot larger, where we wouldn't be able to do these computations in a reasonable amount of time as fast, as time goes by, and we want to keep track of things as they happen.\n",
+ " \n",
+ "The downside is that it is a sampling method and hence isn't accurate, but the more samples we're willing to take, the more accurate we'd get.\n",
+ " \n",
+ "In this method, instead of keping track of the probability distribution, we will drop particles in a similar proportion at the required regions.\n",
+ "The internal representation of this distribution is usually a list of particles with coordinates in the state-space.\n",
+ "A particle is just a new name for a sample.\n",
+ "\n",
+ "Particle filtering can be divided into four steps:\n",
+ "1. __Initialization__: \n",
+ "If we have some idea about the prior probability distribution, we drop the initial particles accordingly, or else we just drop them uniformly over the state space.\n",
+ "\n",
+ "2. __Forward pass__: \n",
+ "As time goes by and measurements come in, we are going to move the selected particles into the grid squares that makes the most sense in terms of representing the distribution that we are trying to track.\n",
+ "When time goes by, we just loop through all our particles and try to simulate what could happen to each one of them by sampling its next position from the transition model.\n",
+ "This is like prior sampling - samples' frequencies reflect the transition probabilities.\n",
+ "If we have enough samples we are pretty close to exact values.\n",
+ "We work through the list of particles, one particle at a time, all we do is stochastically simulate what the outcome might be.\n",
+ "If we had no dimension of time, and we had no new measurements come in, this would be exactly the same as what we did in prior sampling.\n",
+ "\n",
+ "3. __Reweight__:\n",
+ "As observations come in, don't sample the observations, fix them and downweight the samples based on the evidence just like in likelihood weighting.\n",
+ "$$w(x) = P(e/x)$$\n",
+ "$$B(X) \\propto P(e/X)B'(X)$$\n",
+ " \n",
+ "As before, the probabilities don't sum to one, since most have been downweighted.\n",
+ "They sum to an approximation of $P(e)$.\n",
+ "To normalize the resulting distribution, we can divide by $P(e)$\n",
+ " \n",
+ "Likelihood weighting wasn't the best thing for Bayesian networks, because we were not accounting for the incoming evidence so we were getting samples from the prior distribution, in some sense not the right distribution, so we might end up with a lot of particles with low weights. \n",
+ "These samples were very uninformative and the way we fixed it then was by using __Gibbs sampling__.\n",
+ "Theoretically, Gibbs sampling can be run on a HMM, but as we iterated over the process infinitely many times in a Bayesian network, we cannot do that here as we have new incoming evidence and we also need computational cycles to propagate through time.\n",
+ " \n",
+ "A lot of samples with very low weight and they are not representative of the _actual probability distribution_.\n",
+ "So if we keep running likelihood weighting, we keep propagating the samples with smaller weights and carry out computations for that even though these samples have no significant contribution to the actual probability distribution.\n",
+ "Which is why we require this last step.\n",
+ "\n",
+ "4. __Resample__:\n",
+ "Rather than tracking weighted samples, we _resample_.\n",
+ "We choose from our weighted sample distribution as many times as the number of particles we initially had and we replace these particles too, so that we have a constant number of particles.\n",
+ "This is equivalent to renormalizing the distribution.\n",
+ "The samples with low weight are rarely chosen in the new distribution after resampling.\n",
+ "This newer set of particles after resampling is in some sense more representative of the actual distribution and so we are better allocating our computational cycles.\n",
+ "Now the update is complete for this time step, continue with the next one.\n",
+ "\n",
+ " \n",
+ "Let's see how this is implemented in the module."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 85,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def particle_filtering ( e , N , HMM ): \n",
+ " """Particle filtering considering two states variables.""" \n",
+ " dist = [ 0.5 , 0.5 ] \n",
+ " # Weight Initialization \n",
+ " w = [ 0 for _ in range ( N )] \n",
+ " # STEP 1 \n",
+ " # Propagate one step using transition model given prior state \n",
+ " dist = vector_add ( scalar_vector_product ( dist [ 0 ], HMM . transition_model [ 0 ]), \n",
+ " scalar_vector_product ( dist [ 1 ], HMM . transition_model [ 1 ])) \n",
+ " # Assign state according to probability \n",
+ " s = [ 'A' if probability ( dist [ 0 ]) else 'B' for _ in range ( N )] \n",
+ " w_tot = 0 \n",
+ " # Calculate importance weight given evidence e \n",
+ " for i in range ( N ): \n",
+ " if s [ i ] == 'A' : \n",
+ " # P(U|A)*P(A) \n",
+ " w_i = HMM . sensor_dist ( e )[ 0 ] * dist [ 0 ] \n",
+ " if s [ i ] == 'B' : \n",
+ " # P(U|B)*P(B) \n",
+ " w_i = HMM . sensor_dist ( e )[ 1 ] * dist [ 1 ] \n",
+ " w [ i ] = w_i \n",
+ " w_tot += w_i \n",
+ "\n",
+ " # Normalize all the weights \n",
+ " for i in range ( N ): \n",
+ " w [ i ] = w [ i ] / w_tot \n",
+ "\n",
+ " # Limit weights to 4 digits \n",
+ " for i in range ( N ): \n",
+ " w [ i ] = float ( "{0:.4f}" . format ( w [ i ])) \n",
+ "\n",
+ " # STEP 2 \n",
+ "\n",
+ " s = weighted_sample_with_replacement ( N , s , w ) \n",
+ "\n",
+ " return s \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(particle_filtering)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.\n",
+ " \n",
+ "This implementation considers two state variables with generic names 'A' and 'B'.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here's how we can use `particle_filtering` on our umbrella HMM, though it doesn't make much sense using particle filtering on a problem with such a small state space.\n",
+ "It is just to get familiar with the syntax."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 86,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]\n",
+ "umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]\n",
+ "hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)"
+ ]
+ },
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 87,
"metadata": {
- "collapsed": false
+ "scrolled": false
},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']"
+ ]
+ },
+ "execution_count": 87,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "particle_filtering(T, 10, hmm)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We got 5 samples from state `A` and 5 samples from state `B`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 88,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['A', 'B', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B']"
+ ]
+ },
+ "execution_count": 88,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "particle_filtering([F, T, F, F, T], 10, hmm)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This time we got 2 samples from state `A` and 8 samples from state `B`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Comparing runtimes for these algorithms will not be useful, as each solves the filtering task efficiently for a different scenario.\n",
+ " \n",
+ "`forward_backward` calculates the exact probability distribution.\n",
+ " \n",
+ "`fixed_lag_smoothing` calculates an approximate distribution and its runtime will depend on the value of the lag chosen.\n",
+ " \n",
+ "`particle_filtering` is an efficient method for approximating distributions for a very large or continuous state space."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## MONTE CARLO LOCALIZATION\n",
+ "In the domain of robotics, particle filtering is used for _robot localization_.\n",
+ "__Localization__ is the problem of finding out where things are, in this case, we want to find the position of a robot in a continuous state space.\n",
+ " \n",
+ "__Monte Carlo Localization__ is an algorithm for robots to _localize_ using a _particle filter_.\n",
+ "Given a map of the environment, the algorithm estimates the position and orientation of a robot as it moves and senses the environment.\n",
+ " \n",
+ "Initially, particles are distributed uniformly over the state space, ie the robot has no information of where it is and assumes it is equally likely to be at any point in space.\n",
+ " \n",
+ "When the robot moves, it analyses the incoming evidence to shift and change the probability to better approximate the probability distribution of its position.\n",
+ "The particles are then resampled based on their weights.\n",
+ " \n",
+ "Gradually, as more evidence comes in, the robot gets better at approximating its location and the particles converge towards the actual position of the robot.\n",
+ " \n",
+ "The pose of a robot is defined by its two Cartesian coordinates with values $x$ and $y$ and its direction with value $\\theta$.\n",
+ "We use the kinematic equations of motion to model a deterministic state prediction.\n",
+ "This is our motion model (or transition model).\n",
+ " \n",
+ "Next, we need a sensor model.\n",
+ "There can be two kinds of sensor models, the first assumes that the sensors detect _stable_, _recognizable_ features of the environment called __landmarks__.\n",
+ "The robot senses the location and bearing of each landmark and updates its belief according to that.\n",
+ "We can also assume the noise in measurements to be Gaussian, to simplify things.\n",
+ " \n",
+ "Another kind of sensor model is used for an array of range sensors, each of which has a fixed bearing relative to the robot.\n",
+ "These sensors provide a set of range values in each direction.\n",
+ "This will also be corrupted by Gaussian noise, but we can assume that the errors for different beam directions are independent and identically distributed.\n",
+ " \n",
+ "After evidence comes in, the robot updates its belief state and reweights the particle distribution to better aproximate the actual distribution.\n",
+ " \n",
+ " \n",
+ "Let's have a look at how this algorithm is implemented in the module"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 89,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def monte_carlo_localization ( a , z , N , P_motion_sample , P_sensor , m , S = None ): \n",
+ " """Monte Carlo localization algorithm from Fig 25.9""" \n",
+ "\n",
+ " def ray_cast ( sensor_num , kin_state , m ): \n",
+ " return m . ray_cast ( sensor_num , kin_state ) \n",
+ "\n",
+ " M = len ( z ) \n",
+ " W = [ 0 ] * N \n",
+ " S_ = [ 0 ] * N \n",
+ " W_ = [ 0 ] * N \n",
+ " v = a [ 'v' ] \n",
+ " w = a [ 'w' ] \n",
+ "\n",
+ " if S is None : \n",
+ " S = [ m . sample () for _ in range ( N )] \n",
+ "\n",
+ " for i in range ( N ): \n",
+ " S_ [ i ] = P_motion_sample ( S [ i ], v , w ) \n",
+ " W_ [ i ] = 1 \n",
+ " for j in range ( M ): \n",
+ " z_ = ray_cast ( j , S_ [ i ], m ) \n",
+ " W_ [ i ] = W_ [ i ] * P_sensor ( z [ j ], z_ ) \n",
+ "\n",
+ " S = weighted_sample_with_replacement ( N , S_ , W_ ) \n",
+ " return S \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(monte_carlo_localization)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Our implementation of Monte Carlo Localization uses the range scan method.\n",
+ "The `ray_cast` helper function casts rays in different directions and stores the range values.\n",
+ " \n",
+ "`a` stores the `v` and `w` components of the robot's velocity.\n",
+ " \n",
+ "`z` is a range scan.\n",
+ " \n",
+ "`P_motion_sample` is the motion or transition model.\n",
+ " \n",
+ "`P_sensor` is the range sensor noise model.\n",
+ " \n",
+ "`m` is the 2D map of the environment\n",
+ " \n",
+ "`S` is a vector of samples of size N"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
"source": [
- "import probability"
+ "We'll now define a simple 2D map to run Monte Carlo Localization on.\n",
+ " \n",
+ "Let's say this is the map we want\n",
+ " "
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 90,
"metadata": {
- "collapsed": true
+ "scrolled": true
},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfAAAAFaCAYAAADhKw9uAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAASOUlEQVR4nO3df4ztd13n8dd779hAKSwlvaj9oaVaUJao0JGARFYpxIJIMbvZBcUUf6SJP6AQFIsmaGI0ZDWoiQZTC7aJDailArqKdPEHmrDVuQWEclEaiu2FSoclCLrGWnz7x5yScXrnzvSc750zn9PHI7mZ8+M75/v+3Dszz/s958w51d0BAMbyn5Y9AADw4Ak4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOBwiFXVx6vq2Tsue2lV/cUEt91V9dWL3g6wHAIOAAMScBhYVZ1bVW+tqs2quqOqXr7tuqdW1Xur6rNVdXdV/UpVnTG77j2zzT5QVf9YVf+zqr6lqk5U1aur6p7Z57ywqp5XVX9bVZ+pqp/Yz+3Pru+qenlVfayqPl1VP19VfubARHwzwaBmMfy9JB9Icl6SS5O8oqq+bbbJF5K8Msk5SZ4+u/6HkqS7nznb5uu7+6zu/q3Z+S9L8rDZ7b02ya8neUmSS5J8c5LXVtVFe93+Nt+ZZD3JU5JcnuT7plg7kJTXQofDq6o+nq1A3rft4jOS3JrkVUl+p7u/Ytv2r0ny+O7+3pPc1iuS/Nfu/s7Z+U5ycXffPjv/LUn+MMlZ3f2Fqnpkks8leVp33zLb5liSn+nut+3z9p/b3e+cnf+hJP+tuy9d4K8EmFlb9gDAnl7Y3f/n/jNV9dIkP5DkK5OcW1Wf3bbtkSR/Ptvu8Ulen60j4DOz9f1+bI99/b/u/sLs9D/PPn5q2/X/nOSsB3H7d207/XdJzt1j/8A+uQsdxnVXkju6+9Hb/jyyu583u/4NST6SraPsRyX5iSQ14f73c/sXbDv9FUk+OeH+4SFNwGFcf5nkc1X141X18Ko6UlVPqqpvnF1//13g/1hVX5PkB3d8/qeSXJT57XX7SfJjVXV2VV2Q5Kokv3WSbYA5CDgManZX93ck+YYkdyT5dJJrk/zn2SY/muS7knw+W09G2xnPn05y/exZ5P9jjhH2uv0keXu27lZ/f5L/neSNc+wHOAlPYgNOi51PkgOm5QgcAAYk4AAwIHehA8CAHIEDwIAO9IVczjnnnL7wwgsPcpfAijh2bK/XoGE/LrnkkmWPcFoc9NfHQf49Hjt27NPdfXTn5Qd6F/r6+npvbGwc2P6A1VE15WvQPHSt6sOmB/31cZB/j1V1rLvXd17uLnQAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAY0EIBr6rLqupvqur2qrp6qqEAgFObO+BVdSTJryZ5bpInJnlxVT1xqsEAgN0tcgT+1CS3d/fHuvveJG9Jcvk0YwEAp7JIwM9Lcte28ydml/0HVXVlVW1U1cbm5uYCuwMA7rdIwE/21i8PeHuW7r6mu9e7e/3o0Qe8GxoAMIdFAn4iyQXbzp+f5JOLjQMA7MciAf+rJBdX1eOq6owkL0ryjmnGAgBOZW3eT+zu+6rqR5L8UZIjSd7U3bdNNhkAsKu5A54k3f0HSf5golkAgH3ySmwAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgBb6PXAA2E3Vyd4yg6k4AgeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAyouvvgdlZ1cDuDh6iD/J4+SFW17BFWwgH/zD+wfR20A/57PNbd6zsvdwQOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABjQ3AGvqguq6k+q6nhV3VZVV005GACwu7UFPve+JK/q7lur6pFJjlXVzd394YlmAwB2MfcReHff3d23zk5/PsnxJOdNNRgAsLtFjsC/qKouTPLkJLec5Lork1w5xX4AgC0Lv51oVZ2V5M+S/Gx337THtqv5PodwiHg7UU7F24lOY/i3E62qL0ny1iQ37BVvAGA6izwLvZK8Mcnx7n79dCMBAHtZ5Aj8GUm+J8mzqur9sz/Pm2guAOAU5n4SW3f/RZLVfYADAA4xr8QGAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAk7wb2X5dcskl2djYOMhdAsBKcgQOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAY0NqyBzhdqmrZIwDAaeMIHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwoIUDXlVHqup9VfX7UwwEAOxtiiPwq5Icn+B2AIB9WijgVXV+km9Pcu004wAA+7HoEfgvJXl1kn/bbYOqurKqNqpqY3Nzc8HdAQDJAgGvqucnuae7j51qu+6+prvXu3v96NGj8+4OANhmkSPwZyR5QVV9PMlbkjyrqn5zkqkAgFOaO+Dd/ZruPr+7L0zyoiR/3N0vmWwyAGBXfg8cAAa0NsWNdPefJvnTKW4LANibI3AAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQJP8Hvhh1N3LHgGYUFUtewQ4VByBA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADGihgFfVo6vqxqr6SFUdr6qnTzUYALC7tQU//5eTvLO7/3tVnZHkzAlmAgD2MHfAq+pRSZ6Z5KVJ0t33Jrl3mrEAgFNZ5C70i5JsJvmNqnpfVV1bVY/YuVFVXVlVG1W1sbm5ucDuAID7LRLwtSRPSfKG7n5ykn9KcvXOjbr7mu5e7+71o0ePLrA7AOB+iwT8RJIT3X3L7PyN2Qo6AHCazR3w7v77JHdV1RNmF12a5MOTTAUAnNKiz0J/WZIbZs9A/1iS7118JABgLwsFvLvfn2R9olkAgH3ySmwAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAa06CuxkaSqlj0Ch1x3L3sEYMU4AgeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxobdkDAOxHdy97BB6kg/w3q6oD29dh4QgcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADCghQJeVa+sqtuq6kNV9eaqethUgwEAu5s74FV1XpKXJ1nv7iclOZLkRVMNBgDsbtG70NeSPLyq1pKcmeSTi48EAOxl7oB39yeS/EKSO5PcneQfuvtdO7erqiuraqOqNjY3N+efFAD4okXuQj87yeVJHpfk3CSPqKqX7Nyuu6/p7vXuXj969Oj8kwIAX7TIXejPTnJHd292978muSnJN00zFgBwKosE/M4kT6uqM2vrndQvTXJ8mrEAgFNZ5DHwW5LcmOTWJB+c3dY1E80FAJzC2iKf3N0/leSnJpoFANgnr8QGAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADGih3wNnS3cvewSAQ2frRTo5XRyBA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADCgtWUPsAqqatkjcMh197JHGJ7vs2kc5NfiQe7rofj14QgcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABrRnwKvqTVV1T1V9aNtlj6mqm6vqo7OPZ5/eMQGA7fZzBH5dkst2XHZ1knd398VJ3j07DwAckD0D3t3vSfKZHRdfnuT62enrk7xw4rkAgFOY9zHwL+3uu5Nk9vGxu21YVVdW1UZVbWxubs65OwBgu9P+JLbuvqa717t7/ejRo6d7dwDwkDBvwD9VVV+eJLOP90w3EgCwl3kD/o4kV8xOX5Hk7dOMAwDsx35+jezNSd6b5AlVdaKqvj/J65I8p6o+muQ5s/MAwAFZ22uD7n7xLlddOvEsAMA+eSU2ABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADqu4+uJ1VbSb5uwf5aeck+fRpGOcwWNW1req6ktVd26quK1ndta3qupLVXdu86/rK7n7Au4EdaMDnUVUb3b2+7DlOh1Vd26quK1ndta3qupLVXduqritZ3bVNvS53oQPAgAQcAAY0QsCvWfYAp9Gqrm1V15Ws7tpWdV3J6q5tVdeVrO7aJl3XoX8MHAB4oBGOwAGAHQQcAAZ0qANeVZdV1d9U1e1VdfWy55lCVV1QVX9SVcer6raqumrZM02tqo5U1fuq6veXPctUqurRVXVjVX1k9m/39GXPNJWqeuXsa/FDVfXmqnrYsmeaR1W9qaruqaoPbbvsMVV1c1V9dPbx7GXOOK9d1vbzs6/Hv66q362qRy9zxnmcbF3brvvRquqqOmcZsy1qt7VV1ctmXbutqv7XIvs4tAGvqiNJfjXJc5M8McmLq+qJy51qEvcleVV3f22SpyX54RVZ13ZXJTm+7CEm9stJ3tndX5Pk67Mi66uq85K8PMl6dz8pyZEkL1ruVHO7LsllOy67Osm7u/viJO+enR/RdXng2m5O8qTu/rokf5vkNQc91ASuywPXlaq6IMlzktx50ANN6LrsWFtVfWuSy5N8XXf/lyS/sMgODm3Akzw1ye3d/bHuvjfJW7K18KF1993dfevs9OezFYLzljvVdKrq/CTfnuTaZc8ylap6VJJnJnljknT3vd392eVONam1JA+vqrUkZyb55JLnmUt3vyfJZ3ZcfHmS62enr0/ywgMdaiInW1t3v6u775ud/b9Jzj/wwRa0y79ZkvxiklcnGfZZ1rus7QeTvK67/2W2zT2L7OMwB/y8JHdtO38iKxS6JKmqC5M8Ockty51kUr+UrW+8f1v2IBO6KMlmkt+YPTRwbVU9YtlDTaG7P5Gto4A7k9yd5B+6+13LnWpSX9rddydb/3lO8tglz3O6fF+SP1z2EFOoqhck+UR3f2DZs5wGj0/yzVV1S1X9WVV94yI3dpgDXie5bNj/je1UVWcleWuSV3T355Y9zxSq6vlJ7unuY8ueZWJrSZ6S5A3d/eQk/5Rx74r9D2aPCV+e5HFJzk3yiKp6yXKn4sGoqp/M1kNzNyx7lkVV1ZlJfjLJa5c9y2myluTsbD18+mNJfruqTta6fTnMAT+R5IJt58/PoHft7VRVX5KteN/Q3Tcte54JPSPJC6rq49l6yONZVfWbyx1pEieSnOju++8puTFbQV8Fz05yR3dvdve/JrkpyTcteaYpfaqqvjxJZh8XusvysKmqK5I8P8l392q8qMdXZes/kx+Y/Rw5P8mtVfVlS51qOieS3NRb/jJb91TO/SS9wxzwv0pycVU9rqrOyNYTa96x5JkWNvvf1huTHO/u1y97nil192u6+/zuvjBb/15/3N3DH811998nuauqnjC76NIkH17iSFO6M8nTqurM2dfmpVmRJ+jNvCPJFbPTVyR5+xJnmVRVXZbkx5O8oLv//7LnmUJ3f7C7H9vdF85+jpxI8pTZ9+AqeFuSZyVJVT0+yRlZ4F3XDm3AZ0/O+JEkf5StHyi/3d23LXeqSTwjyfdk6+j0/bM/z1v2UOzpZUluqKq/TvINSX5uyfNMYnavwo1Jbk3ywWz9TBjyZSyr6s1J3pvkCVV1oqq+P8nrkjynqj6arWc1v26ZM85rl7X9SpJHJrl59nPk15Y65Bx2WddK2GVtb0py0exXy96S5IpF7jnxUqoAMKBDewQOAOxOwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMKB/B24h+wUcnnY9AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],\n",
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+ " [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n",
+ " [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],\n",
+ " [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])\n",
+ "\n",
+ "heatmap(m.m, cmap='binary')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's define the motion model as a function `P_motion_sample`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 91,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def P_motion_sample(kin_state, v, w):\n",
+ " \"\"\"Sample from possible kinematic states.\n",
+ " Returns from a single element distribution (no uncertainity in motion)\"\"\"\n",
+ " pos = kin_state[:2]\n",
+ " orient = kin_state[2]\n",
+ "\n",
+ " # for simplicity the robot first rotates and then moves\n",
+ " orient = (orient + w)%4\n",
+ " for _ in range(orient):\n",
+ " v = (v[1], -v[0])\n",
+ " pos = vector_add(pos, v)\n",
+ " return pos + (orient,)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Define the sensor model as a function `P_sensor`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 92,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def P_sensor(x, y):\n",
+ " \"\"\"Conditional probability for sensor reading\"\"\"\n",
+ " # Need not be exact probability. Can use a scaled value.\n",
+ " if x == y:\n",
+ " return 0.8\n",
+ " elif abs(x - y) <= 2:\n",
+ " return 0.05\n",
+ " else:\n",
+ " return 0"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Initializing variables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 93,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "a = {'v': (0, 0), 'w': 0}\n",
+ "z = (2, 4, 1, 6)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's run `monte_carlo_localization` with these parameters to find a sample distribution S."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 94,
+ "metadata": {},
"outputs": [],
- "source": []
+ "source": [
+ "S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's plot the values in the sample distribution `S`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 95,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "GRID:\n",
+ " 0 0 12 0 143 14 0 0 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 0 0 17 52 201 6 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 0 3 5 19 9 3 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 6 166 0 21 0 0 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 0 1 11 75 0 0 0 0 0 0 0 0 0 0 0\n",
+ " 73 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0\n",
+ "124 0 0 0 0 0 0 1 0 3 0 0 0 0 0 0 0\n",
+ " 0 0 0 14 4 15 1 0 0 0 0 0 0 0 0 0 0\n",
+ " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n"
+ ]
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfAAAAFaCAYAAADhKw9uAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAATEElEQVR4nO3df6zldX3n8debuSK/F2Swlt+yi7pq2upOjdbU7Qqs+KNis5td7dJg2w1Ju1U0thZtIt1s0pi2cdukjV0WLSQl2i7S6nZbFW271qyLHVBUxFYiCKMIA4aCXSsF3vvHPSS317lzh3u+c858Lo9HMrn3nPO95/P+zNy5z/mee+6Z6u4AAGM5bNkDAACPn4ADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg6HsKq6varOXXfd66vqkxPcd1fVP5v3foDlEHAAGJCAw8Cq6uSq+kBV7a2q26rqjWtue0FVfaqq7q+qu6rqt6rq8Nltn5gddlNVfauq/n1V/UhV7amqt1bVPbOPeU1VvaKq/qaqvllVbz+Q+5/d3lX1xqr6SlXdW1W/VlW+5sBE/GWCQc1i+D+T3JTklCTnJHlTVb1sdsgjSd6cZGeSF81u/9kk6e6XzI75/u4+prt/f3b5aUmOmN3fO5L89yQXJvkXSX44yTuq6qzN7n+NH0uyK8nzk1yQ5Kem2DuQlNdCh0NXVd2e1UA+vObqw5PcmOQtSf5Hd5++5vi3JXlGd//kPu7rTUn+ZXf/2OxyJzm7u2+dXf6RJH+a5JjufqSqjk3yQJIXdvf1s2NuSPJfuvuPDvD+X97dH55d/tkk/6a7z5njtwSYWVn2AMCmXtPdH3vsQlW9Psl/THJGkpOr6v41x+5I8pez456R5F1ZPQM+Kqt/32/YZK37uvuR2fvfnr29e83t305yzOO4/zvXvP/VJCdvsj5wgDyEDuO6M8lt3X38ml/HdvcrZre/O8mXsnqWfVyStyepCdc/kPs/bc37pyf5+oTrwxOagMO4Pp3kgar6xao6sqp2VNVzq+oHZ7c/9hD4t6rqWUl+Zt3H353krGzdZvefJL9QVSdU1WlJLkny+/s4BtgCAYdBzR7q/tEkP5DktiT3JrkiyT+ZHfLzSX48yYNZfTLa+nj+cpKrZs8i/3dbGGGz+0+SD2b1YfXPJvlfSd6zhXWAffAkNuCgWP8kOWBazsABYEACDgAD8hA6AAzIGTgADGihL+Syc+eJfebpp21+4GgefWTzY6Z02I6FLfXQbZ9b2FqHn/Gcha21yN9DgHnc8Jmb7u3uk9Zfv9CAn3n6adn9yY9tfuBg+u8fWOh6dcRxC1vr9gtPWdhaZ1zxhwtbq444fmFrAcyjjj7pq/u63kPoADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMKC5Al5V51fVX1fVrVV16VRDAQD7t+WAV9WOJL+d5OVJnp3kdVX17KkGAwA2Ns8Z+AuS3NrdX+nuh5K8P8kF04wFAOzPPAE/Jcmday7vmV33j1TVxVW1u6p27733vjmWAwAeM0/Aax/X9Xdd0X15d+/q7l0n7TxxjuUAgMfME/A9Sdb+596nJvn6fOMAAAdinoD/VZKzq+rpVXV4ktcm+dA0YwEA+7Oy1Q/s7oer6ueSfCTJjiTv7e6bJ5sMANjQlgOeJN39J0n+ZKJZAIAD5JXYAGBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAHN9XPgrKojjlv2CAfNGZfftLC1+va/XNha//nHL17YWkly2advX9hatfLkha0FLI8zcAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIBWlj0Ah7Y6aufi1nrWjy5srV++8a6FrQVwMDgDB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMaMsBr6rTqurPq+qWqrq5qi6ZcjAAYGPzvBb6w0ne0t03VtWxSW6oquu6+4sTzQYAbGDLZ+DdfVd33zh7/8EktyQ5ZarBAICNTfI98Ko6M8nzkly/j9surqrdVbV77733TbEcADzhzR3wqjomyQeSvKm7H1h/e3df3t27unvXSTtPnHc5ACBzBryqnpTVeF/d3ddOMxIAsJl5noVeSd6T5Jbuftd0IwEAm5nnDPzFSX4iyUur6rOzX6+YaC4AYD+2/GNk3f3JJDXhLADAAfJKbAAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABjTP/wfOkvSjjyxwsQWu9e37F7fW4Ucvbq0kWTliYUvVYTsWthawPM7AAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AA1pZ9gA8fnXYjgWutsC1jnnq4tYCGJwzcAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgOYOeFXtqKrPVNUfTzEQALC5Kc7AL0lyywT3AwAcoLkCXlWnJnllkiumGQcAOBDznoH/RpK3Jnl0owOq6uKq2l1Vu/fee9+cywEAyRwBr6pXJbmnu2/Y33HdfXl37+ruXSftPHGrywEAa8xzBv7iJK+uqtuTvD/JS6vq9yaZCgDYry0HvLvf1t2ndveZSV6b5M+6+8LJJgMANuTnwAFgQCtT3El3/0WSv5jivgCAzTkDB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAY0yc+BP9H1w99Z6HrXvfL0ha31rz9y98LW6ge/sbC16tinLWwtgIPBGTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AA1pZ9gDbQa08eaHrnffhbyxsrf7Og4tb6//8t4WtVS+7bGFrARwMzsABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQHMFvKqOr6prqupLVXVLVb1oqsEAgI3N+1Kqv5nkw939b6vq8CRHTTATALCJLQe8qo5L8pIkr0+S7n4oyUPTjAUA7M88D6GflWRvkt+tqs9U1RVVdfT6g6rq4qraXVW799573xzLAQCPmSfgK0men+Td3f28JH+X5NL1B3X35d29q7t3nbTzxDmWAwAeM0/A9yTZ093Xzy5fk9WgAwAH2ZYD3t3fSHJnVT1zdtU5Sb44yVQAwH7N+yz0NyS5evYM9K8k+cn5RwIANjNXwLv7s0l2TTQLAHCAvBIbAAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABzftKbI/PA3fl0Y/9ykKWOuzcty9knWWoqsUt9uRjF7ZUveyyha3FNLp7YWst9PMeBuAMHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMKCVRS72yAPfzLc+evVC1jru3LcvZB04EN29sLWqamFrpR9d3Fq1Y3FrwQCcgQPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABjRXwKvqzVV1c1V9oareV1VHTDUYALCxLQe8qk5J8sYku7r7uUl2JHntVIMBABub9yH0lSRHVtVKkqOSfH3+kQCAzWw54N39tSS/nuSOJHcl+dvu/uj646rq4qraXVW77/v2Av/nIgDYxuZ5CP2EJBckeXqSk5McXVUXrj+uuy/v7l3dvevEIz1nDgCmME9Rz01yW3fv7e5/SHJtkh+aZiwAYH/mCfgdSV5YVUdVVSU5J8kt04wFAOzPPN8Dvz7JNUluTPL52X1dPtFcAMB+rMzzwd19WZLLJpoFADhAnlUGAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADGiunwN/vHac+pwc96sfW+SS21L//f2LW+xJRy9urYceXNxaR5ywuLWSrL5Y4fZTh+1Y9gjwhOUMHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABrSx7AB6/OuL4ZY9wcBz5lGVPADAMZ+AAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwoE0DXlXvrap7quoLa657SlVdV1Vfnr094eCOCQCsdSBn4FcmOX/ddZcm+Xh3n53k47PLAMCCbBrw7v5Ekm+uu/qCJFfN3r8qyWsmngsA2I+tfg/8e7r7riSZvX3qRgdW1cVVtbuqdu+9974tLgcArHXQn8TW3Zd3967u3nXSzhMP9nIA8ISw1YDfXVXfmySzt/dMNxIAsJmtBvxDSS6avX9Rkg9OMw4AcCAO5MfI3pfkU0meWVV7quqnk7wzyXlV9eUk580uAwALsrLZAd39ug1uOmfiWQCAA+SV2ABgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADKi6e3GLVe1N8tXH+WE7k9x7EMY5FGzXvW3XfSXbd2/bdV/J9t3bdt1Xsn33ttV9ndHdJ62/cqEB34qq2t3du5Y9x8GwXfe2XfeVbN+9bdd9Jdt3b9t1X8n23dvU+/IQOgAMSMABYEAjBPzyZQ9wEG3XvW3XfSXbd2/bdV/J9t3bdt1Xsn33Num+DvnvgQMA322EM3AAYB0BB4ABHdIBr6rzq+qvq+rWqrp02fNMoapOq6o/r6pbqurmqrpk2TNNrap2VNVnquqPlz3LVKrq+Kq6pqq+NPuze9GyZ5pKVb159rn4hap6X1UdseyZtqKq3ltV91TVF9Zc95Squq6qvjx7e8IyZ9yqDfb2a7PPx89V1R9W1fHLnHEr9rWvNbf9fFV1Ve1cxmzz2mhvVfWGWddurqpfnWeNQzbgVbUjyW8neXmSZyd5XVU9e7lTTeLhJG/p7n+e5IVJ/tM22ddalyS5ZdlDTOw3k3y4u5+V5PuzTfZXVackeWOSXd393CQ7krx2uVNt2ZVJzl933aVJPt7dZyf5+OzyiK7Md+/tuiTP7e7vS/I3Sd626KEmcGW+e1+pqtOSnJfkjkUPNKErs25vVfWvklyQ5Pu6+zlJfn2eBQ7ZgCd5QZJbu/sr3f1QkvdndeND6+67uvvG2fsPZjUEpyx3qulU1alJXpnkimXPMpWqOi7JS5K8J0m6+6Huvn+5U01qJcmRVbWS5KgkX1/yPFvS3Z9I8s11V1+Q5KrZ+1clec1Ch5rIvvbW3R/t7odnF/9vklMXPticNvgzS5L/muStSYZ9lvUGe/uZJO/s7u/MjrlnnjUO5YCfkuTONZf3ZBuFLkmq6swkz0ty/XInmdRvZPUv3qPLHmRCZyXZm+R3Z98auKKqjl72UFPo7q9l9SzgjiR3Jfnb7v7ocqea1Pd0913J6j+ekzx1yfMcLD+V5E+XPcQUqurVSb7W3Tcte5aD4BlJfriqrq+q/11VPzjPnR3KAa99XDfsv8bWq6pjknwgyZu6+4FlzzOFqnpVknu6+4ZlzzKxlSTPT/Lu7n5ekr/LuA/F/iOz7wlfkOTpSU5OcnRVXbjcqXg8quqXsvqtuauXPcu8quqoJL+U5B3LnuUgWUlyQla/ffoLSf6gqvbVugNyKAd8T5LT1lw+NYM+tLdeVT0pq/G+uruvXfY8E3pxkldX1e1Z/ZbHS6vq95Y70iT2JNnT3Y89UnJNVoO+HZyb5Lbu3tvd/5Dk2iQ/tOSZpnR3VX1vkszezvWQ5aGmqi5K8qok/6G3x4t6/NOs/mPyptnXkVOT3FhVT1vqVNPZk+TaXvXprD5SueUn6R3KAf+rJGdX1dOr6vCsPrHmQ0ueaW6zf229J8kt3f2uZc8zpe5+W3ef2t1nZvXP68+6e/izue7+RpI7q+qZs6vOSfLFJY40pTuSvLCqjpp9bp6TbfIEvZkPJblo9v5FST64xFkmVVXnJ/nFJK/u7v+37Hmm0N2f7+6ndveZs68je5I8f/Z3cDv4oyQvTZKqekaSwzPH/7p2yAZ89uSMn0vykax+QfmD7r55uVNN4sVJfiKrZ6efnf16xbKHYlNvSHJ1VX0uyQ8k+ZUlzzOJ2aMK1yS5Mcnns/o1YciXsayq9yX5VJJnVtWeqvrpJO9Mcl5VfTmrz2p+5zJn3KoN9vZbSY5Nct3s68jvLHXILdhgX9vCBnt7b5KzZj9a9v4kF83zyImXUgWAAR2yZ+AAwMYEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AA/r/85kBLqIO9qEAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "grid = [[0]*17 for _ in range(11)]\n",
+ "for x, y, _ in S:\n",
+ " if 0 <= x < 11 and 0 <= y < 17:\n",
+ " grid[x][y] += 1\n",
+ "print(\"GRID:\")\n",
+ "print_table(grid)\n",
+ "heatmap(grid, cmap='Oranges')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's look at another scenario."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 96,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "GRID:\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 1000 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
+ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n"
+ ]
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfAAAAFaCAYAAADhKw9uAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAARl0lEQVR4nO3df6zld13n8dd7OzbQFpbaKUp/YOluwWWJSnckIJF1KWQLshSzmxV2MUXdNNEVCkGxaIIkm2zIalhNNJhuwTaxAd1SBV1FKv5gSdjqtFChFKWh0A5UOlOCoGu2Ft/7xz01l8vcucM9Z+bM+/J4JJN7fnzv+b4/nbn3eb/fc+5pdXcAgFn+0boHAAC+dgIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4HAKq6pPVdXzttz2iqr6wAoeu6vqny77OMB6CDgADCTgMFhVnVdV76yqw1V1T1W9atN9z6iqD1bVF6rq/qr6xao6fXHf+xeb3VFVf11V319V31NVh6rqdVX1wOJzXlJVL6yqv6iqz1fVTx3P4y/u76p6VVV9sqqOVNXPVpXvObAivphgqEUMfyvJHUnOT3JZkldX1b9ebPLlJK9Jsj/Jsxb3/2iSdPdzFtt8e3ef1d2/trj+zUketXi8NyT5H0lenuRfJPnuJG+oqot3evxNvi/JgSSXJrkiyQ+tYu1AUt4LHU5dVfWpbATy4U03n57k9iSvTfI/u/uJm7Z/fZInd/cPHuWxXp3kX3b39y2ud5JLuvvuxfXvSfK7Sc7q7i9X1WOSfDHJM7v71sU2tyX5L939m8f5+C/o7vcsrv9okn/b3Zct8Z8EWNi37gGAHb2ku3//kStV9Yok/ynJtyQ5r6q+sGnb05L878V2T07y5mwcAZ+Rja/323bY14Pd/eXF5b9dfPzcpvv/NslZX8Pj37fp8qeTnLfD/oHj5BQ6zHVfknu6+3Gb/jymu1+4uP8tST6ejaPsxyb5qSS1wv0fz+NfuOnyE5N8doX7h69rAg5z/UmSL1bVT1bVo6vqtKp6WlV95+L+R06B/3VVfWuSH9ny+Z9LcnF2b6fHT5KfqKqzq+rCJFcn+bWjbAPsgoDDUItT3f8myXckuSfJkSTXJfnHi01+PMl/SPKlbLwYbWs835jkhsWryP/9LkbY6fGT5F3ZOK3+4ST/K8lbd7Ef4Ci8iA04Iba+SA5YLUfgADCQgAPAQE6hA8BAjsABYKCT+kYu+/ef0xc98cKdNwQAkiS3feiOI9197tbbT2rAL3rihTn4gd/feUMAIElSZ5776aPd7hQ6AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMtFTAq+ryqvrzqrq7qq5Z1VAAwLHtOuBVdVqSX0rygiRPTfKyqnrqqgYDALa3zBH4M5Lc3d2f7O6HkrwjyRWrGQsAOJZlAn5+kvs2XT+0uO0rVNVVVXWwqg4ePvLgErsDAB6xTMDrKLf1V93QfW13H+juA+fuP2eJ3QEAj1gm4IeSbP6fe1+Q5LPLjQMAHI9lAv6nSS6pqidV1elJXprk3asZCwA4ln27/cTufriqfizJ7yU5LcnbuvvOlU0GAGxr1wFPku7+nSS/s6JZAIDj5J3YAGAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABlrq98CBU88bL33CydvX7feftH0BX8kROAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMNC+dQ8ArNYbb79/3SMAJ4EjcAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWCgXQe8qi6sqj+sqruq6s6qunqVgwEA21vmvdAfTvLa7r69qh6T5LaquqW7P7ai2QCAbez6CLy77+/u2xeXv5TkriTnr2owAGB7K3kOvKouSvL0JLce5b6rqupgVR08fOTBVewOAL7uLR3wqjoryTuTvLq7v7j1/u6+trsPdPeBc/efs+zuAIAsGfCq+oZsxPvG7r55NSMBADtZ5lXoleStSe7q7jevbiQAYCfLHIE/O8kPJHluVX148eeFK5oLADiGXf8aWXd/IEmtcBYA4Dh5JzYAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgoKUDXlWnVdWHquq3VzEQALCzVRyBX53krhU8DgBwnJYKeFVdkOR7k1y3mnEAgOOx7BH4zyd5XZK/326Dqrqqqg5W1cHDRx5ccncAQLJEwKvqRUke6O7bjrVdd1/b3Qe6+8C5+8/Z7e4AgE2WOQJ/dpIXV9WnkrwjyXOr6ldXMhUAcEy7Dnh3v767L+jui5K8NMkfdPfLVzYZALAtvwcOAAPtW8WDdPcfJfmjVTwWALAzR+AAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADDQUgGvqsdV1U1V9fGququqnrWqwQCA7e1b8vN/Icl7uvvfVdXpSc5YwUwAwA52HfCqemyS5yR5RZJ090NJHlrNWADAsSxzCv3iJIeT/EpVfaiqrquqM7duVFVXVdXBqjp4+MiDS+wOAHjEMgHfl+TSJG/p7qcn+Zsk12zdqLuv7e4D3X3g3P3nLLE7AOARywT8UJJD3X3r4vpN2Qg6AHCC7Trg3f2XSe6rqqcsbrosycdWMhUAcEzLvgr9lUluXLwC/ZNJfnD5kQCAnSwV8O7+cJIDK5oFADhO3okNAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGGipgFfVa6rqzqr6aFW9vaoetarBAIDt7TrgVXV+klclOdDdT0tyWpKXrmowAGB7y55C35fk0VW1L8kZST67/EgAwE52HfDu/kySn0tyb5L7k/xVd79363ZVdVVVHayqg4ePPLj7SQGAf7DMKfSzk1yR5ElJzktyZlW9fOt23X1tdx/o7gPn7j9n95MCAP9gmVPoz0tyT3cf7u6/S3Jzku9azVgAwLEsE/B7kzyzqs6oqkpyWZK7VjMWAHAsyzwHfmuSm5LcnuQji8e6dkVzAQDHsG+ZT+7un0nyMyuaBQA4Tt6JDQAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABtox4FX1tqp6oKo+uum2b6yqW6rqE4uPZ5/YMQGAzY7nCPz6JJdvue2aJO/r7kuSvG9xHQA4SXYMeHe/P8nnt9x8RZIbFpdvSPKSFc8FABzDbp8D/6buvj9JFh8fv92GVXVVVR2sqoOHjzy4y90BAJud8Bexdfe13X2guw+cu/+cE707APi6sNuAf66qnpAki48PrG4kAGAnuw34u5Ncubh8ZZJ3rWYcAOB4HM+vkb09yQeTPKWqDlXVDyd5U5LnV9Unkjx/cR0AOEn27bRBd79sm7suW/EsAMBx8k5sADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAxU3X3ydlZ1OMmnv8ZP25/kyAkY51SwV9e2V9eV7N217dV1JXt3bXt1XcneXdtu1/Ut3X3u1htPasB3o6oOdveBdc9xIuzVte3VdSV7d217dV3J3l3bXl1XsnfXtup1OYUOAAMJOAAMNCHg1657gBNor65tr64r2btr26vrSvbu2vbqupK9u7aVruuUfw4cAPhqE47AAYAtBBwABjqlA15Vl1fVn1fV3VV1zbrnWYWqurCq/rCq7qqqO6vq6nXPtGpVdVpVfaiqfnvds6xKVT2uqm6qqo8v/u6ete6ZVqWqXrP4t/jRqnp7VT1q3TPtRlW9raoeqKqPbrrtG6vqlqr6xOLj2euccbe2WdvPLv49/llV/UZVPW6dM+7G0da16b4fr6quqv3rmG1Z262tql656NqdVfXfltnHKRvwqjotyS8leUGSpyZ5WVU9db1TrcTDSV7b3f8syTOT/Oc9sq7Nrk5y17qHWLFfSPKe7v7WJN+ePbK+qjo/yauSHOjupyU5LclL1zvVrl2f5PItt12T5H3dfUmS9y2uT3R9vnpttyR5Wnd/W5K/SPL6kz3UClyfr15XqurCJM9Pcu/JHmiFrs+WtVXVv0pyRZJv6+5/nuTnltnBKRvwJM9Icnd3f7K7H0ryjmwsfLTuvr+7b19c/lI2QnD+eqdanaq6IMn3Jrlu3bOsSlU9Nslzkrw1Sbr7oe7+wnqnWql9SR5dVfuSnJHks2ueZ1e6+/1JPr/l5iuS3LC4fEOSl5zUoVbkaGvr7vd298OLq/8nyQUnfbAlbfN3liT/Pcnrkox9lfU2a/uRJG/q7v+32OaBZfZxKgf8/CT3bbp+KHsodElSVRcleXqSW9c7yUr9fDa+8P5+3YOs0MVJDif5lcVTA9dV1ZnrHmoVuvsz2TgKuDfJ/Un+qrvfu96pVuqbuvv+ZOOH5ySPX/M8J8oPJfnddQ+xClX14iSf6e471j3LCfDkJN9dVbdW1R9X1Xcu82CncsDrKLeN/Wlsq6o6K8k7k7y6u7+47nlWoapelOSB7r5t3bOs2L4klyZ5S3c/PcnfZO6p2K+weE74iiRPSnJekjOr6uXrnYqvRVX9dDaemrtx3bMsq6rOSPLTSd6w7llOkH1Jzs7G06c/keTXq+porTsup3LADyW5cNP1CzL01N5WVfUN2Yj3jd1987rnWaFnJ3lxVX0qG095PLeqfnW9I63EoSSHuvuRMyU3ZSPoe8HzktzT3Ye7+++S3Jzku9Y80yp9rqqekCSLj0udsjzVVNWVSV6U5D/23nhTj3+SjR8m71h8H7kgye1V9c1rnWp1DiW5uTf8STbOVO76RXqncsD/NMklVfWkqjo9Gy+sefeaZ1ra4qettya5q7vfvO55Vqm7X9/dF3T3Rdn4+/qD7h5/NNfdf5nkvqp6yuKmy5J8bI0jrdK9SZ5ZVWcs/m1elj3yAr2Fdye5cnH5yiTvWuMsK1VVlyf5ySQv7u7/u+55VqG7P9Ldj+/uixbfRw4luXTxNbgX/GaS5yZJVT05yelZ4v+6dsoGfPHijB9L8nvZ+Iby691953qnWolnJ/mBbBydfnjx54XrHoodvTLJjVX1Z0m+I8l/XfM8K7E4q3BTktuTfCQb3xNGvo1lVb09yQeTPKWqDlXVDyd5U5LnV9UnsvGq5jetc8bd2mZtv5jkMUluWXwf+eW1DrkL26xrT9hmbW9LcvHiV8vekeTKZc6ceCtVABjolD0CBwC2J+AAMJCAA8BAAg4AAwk4AAwk4AAwkIADwED/H3ZBvi8oWJldAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "a = {'v': (0, 1), 'w': 0}\n",
+ "z = (2, 3, 5, 7)\n",
+ "S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)\n",
+ "grid = [[0]*17 for _ in range(11)]\n",
+ "for x, y, _ in S:\n",
+ " if 0 <= x < 11 and 0 <= y < 17:\n",
+ " grid[x][y] += 1\n",
+ "print(\"GRID:\")\n",
+ "print_table(grid)\n",
+ "heatmap(grid, cmap='Oranges')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In this case, the robot is 99.9% certain that it is at position `(6, 7)`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## DECISION THEORETIC AGENT\n",
+ "We now move into the domain of probabilistic decision making.\n",
+ " \n",
+ "To make choices between different possible plans in a certain situation in a given environment, an agent must have _preference_ between the possible outcomes of the various plans.\n",
+ " \n",
+ "__Utility theory__ is used to represent and reason with preferences.\n",
+ "The agent prefers states with a higher _utility_.\n",
+ "While constructing multi-agent systems, one major element in the design is the mechanism the agents use for making decisions about which actions to adopt in order to achieve their goals.\n",
+ "What is usually required is a mechanism which ensures that the actions adopted lead to benefits for both individual agents, and the community of which they are part.\n",
+ "The utility of a state is _relative_ to an agent.\n",
+ " \n",
+ "Preferences, as expressed by utilities, are combined with probabilities in the general theory of rational decisions called __decision theory__.\n",
+ " \n",
+ "An agent is said to be _rational_ if and only if it chooses the action that yields the highest expected utility, averaged over all the possible outcomes of the action."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here we'll see how a decision-theoretic agent is implemented in the module."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 97,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def DTAgentProgram ( belief_state ): \n",
+ " """A decision-theoretic agent. [Figure 13.1]""" \n",
+ " def program ( percept ): \n",
+ " belief_state . observe ( program . action , percept ) \n",
+ " program . action = argmax ( belief_state . actions (), \n",
+ " key = belief_state . expected_outcome_utility ) \n",
+ " return program . action \n",
+ " program . action = None \n",
+ " return program \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(DTAgentProgram)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `DTAgentProgram` function is pretty self-explanatory.\n",
+ " \n",
+ "It encapsulates a function `program` that takes in an observation or a `percept`, updates its `belief_state` and returns the action that maximizes the `expected_outcome_utility`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## INFORMATION GATHERING AGENT\n",
+ "Before we discuss what an information gathering agent is, we'll need to know what decision networks are.\n",
+ "For an agent in an environment, a decision network represents information about the agent's current state, its possible actions, the state that will result from the agent's action, and the utility of that state.\n",
+ "Decision networks have three primary kinds of nodes which are:\n",
+ "1. __Chance nodes__: These represent random variables, just like in Bayesian networks.\n",
+ "2. __Decision nodes__: These represent points where the decision-makes has a choice between different actions and the decision maker tries to find the optimal decision at these nodes with regard to the cost, safety and resulting utility.\n",
+ "3. __Utility nodes__: These represent the agent's utility function.\n",
+ "A description of the agent's utility as a function is associated with a utility node.\n",
+ " \n",
+ " \n",
+ "To evaluate a decision network, we do the following:\n",
+ "1. Initialize the evidence variables according to the current state.\n",
+ "2. Calculate posterior probabilities for each possible value of the decision node and calculate the utility resulting from that action.\n",
+ "3. Return the action with the highest utility.\n",
+ " \n",
+ "Let's have a look at the implementation of the `DecisionNetwork` class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 98,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class DecisionNetwork ( BayesNet ): \n",
+ " """An abstract class for a decision network as a wrapper for a BayesNet. \n",
+ " Represents an agent's current state, its possible actions, reachable states \n",
+ " and utilities of those states.""" \n",
+ "\n",
+ " def __init__ ( self , action , infer ): \n",
+ " """action: a single action node \n",
+ " infer: the preferred method to carry out inference on the given BayesNet""" \n",
+ " super ( DecisionNetwork , self ) . __init__ () \n",
+ " self . action = action \n",
+ " self . infer = infer \n",
+ "\n",
+ " def best_action ( self ): \n",
+ " """Return the best action in the network""" \n",
+ " return self . action \n",
+ "\n",
+ " def get_utility ( self , action , state ): \n",
+ " """Return the utility for a particular action and state in the network""" \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def get_expected_utility ( self , action , evidence ): \n",
+ " """Compute the expected utility given an action and evidence""" \n",
+ " u = 0.0 \n",
+ " prob_dist = self . infer ( action , evidence , self ) . prob \n",
+ " for item , _ in prob_dist . items (): \n",
+ " u += prob_dist [ item ] * self . get_utility ( action , item ) \n",
+ "\n",
+ " return u \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(DecisionNetwork)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `DecisionNetwork` class inherits from `BayesNet` and has a few extra helper methods.\n",
+ " \n",
+ "`best_action` returns the best action in the network.\n",
+ " \n",
+ "`get_utility` is an abstract method which is supposed to return the utility of a particular action and state in the network.\n",
+ " \n",
+ "`get_expected_utility` computes the expected utility, given an action and evidence.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Before we proceed, we need to know a few more terms.\n",
+ " \n",
+ "Having __perfect information__ refers to a state of being fully aware of the current state, the cost functions and the outcomes of actions.\n",
+ "This in turn allows an agent to find the exact utility value of each state.\n",
+ "If an agent has perfect information about the environment, maximum expected utility calculations are exact and can be computed with absolute certainty.\n",
+ " \n",
+ "In decision theory, the __value of perfect information__ (VPI) is the price that an agent would be willing to pay in order to gain access to _perfect information_.\n",
+ "VPI calculations are extensively used to calculate expected utilities for nodes in a decision network.\n",
+ " \n",
+ "For a random variable $E_j$ whose value is currently unknown, the value of discovering $E_j$, given current information $e$ must average over all possible values $e_{jk}$ that we might discover for $E_j$, using our _current_ beliefs about its value.\n",
+ "The VPI of $E_j$ is then given by:\n",
+ " \n",
+ " \n",
+ "$$VPI_e(E_j) = \\left(\\sum_{k}P(E_j=e_{jk}\\ |\\ e) EU(\\alpha_{e_{jk}}\\ |\\ e, E_j=e_{jk})\\right) - EU(\\alpha\\ |\\ e)$$\n",
+ " \n",
+ "VPI is _non-negative_, _non-additive_ and _order-indepentent_."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "An information gathering agent is an agent with certain properties that explores decision networks as and when required with heuristics driven by VPI calculations of nodes.\n",
+ "A sensible agent should ask questions in a reasonable order, should avoid asking irrelevant questions, should take into account the importance of each piece of information in relation to its cost and should stop asking questions when that is appropriate.\n",
+ "_VPI_ is used as the primary heuristic to consider all these points in an information gathering agent as the agent ultimately wants to maximize the utility and needs to find the optimal cost and extent of finding the required information.\n",
+ " \n",
+ "As an overview, an information gathering agent works by repeatedly selecting the observations with the highest information value, until the cost of the next observation is greater than its expected benefit.\n",
+ " \n",
+ "The `InformationGatheringAgent` class is an abstract class that inherits from `Agent` and works on the principles discussed above.\n",
+ "Let's have a look.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 99,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class InformationGatheringAgent ( Agent ): \n",
+ " """A simple information gathering agent. The agent works by repeatedly selecting \n",
+ " the observation with the highest information value, until the cost of the next \n",
+ " observation is greater than its expected benefit. [Figure 16.9]""" \n",
+ "\n",
+ " def __init__ ( self , decnet , infer , initial_evidence = None ): \n",
+ " """decnet: a decision network \n",
+ " infer: the preferred method to carry out inference on the given decision network \n",
+ " initial_evidence: initial evidence""" \n",
+ " self . decnet = decnet \n",
+ " self . infer = infer \n",
+ " self . observation = initial_evidence or [] \n",
+ " self . variables = self . decnet . nodes \n",
+ "\n",
+ " def integrate_percept ( self , percept ): \n",
+ " """Integrate the given percept into the decision network""" \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def execute ( self , percept ): \n",
+ " """Execute the information gathering algorithm""" \n",
+ " self . observation = self . integrate_percept ( percept ) \n",
+ " vpis = self . vpi_cost_ratio ( self . variables ) \n",
+ " j = argmax ( vpis ) \n",
+ " variable = self . variables [ j ] \n",
+ "\n",
+ " if self . vpi ( variable ) > self . cost ( variable ): \n",
+ " return self . request ( variable ) \n",
+ "\n",
+ " return self . decnet . best_action () \n",
+ "\n",
+ " def request ( self , variable ): \n",
+ " """Return the value of the given random variable as the next percept""" \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def cost ( self , var ): \n",
+ " """Return the cost of obtaining evidence through tests, consultants or questions""" \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def vpi_cost_ratio ( self , variables ): \n",
+ " """Return the VPI to cost ratio for the given variables""" \n",
+ " v_by_c = [] \n",
+ " for var in variables : \n",
+ " v_by_c . append ( self . vpi ( var ) / self . cost ( var )) \n",
+ " return v_by_c \n",
+ "\n",
+ " def vpi ( self , variable ): \n",
+ " """Return VPI for a given variable""" \n",
+ " vpi = 0.0 \n",
+ " prob_dist = self . infer ( variable , self . observation , self . decnet ) . prob \n",
+ " for item , _ in prob_dist . items (): \n",
+ " post_prob = prob_dist [ item ] \n",
+ " new_observation = list ( self . observation ) \n",
+ " new_observation . append ( item ) \n",
+ " expected_utility = self . decnet . get_expected_utility ( variable , new_observation ) \n",
+ " vpi += post_prob * expected_utility \n",
+ "\n",
+ " vpi -= self . decnet . get_expected_utility ( variable , self . observation ) \n",
+ " return vpi \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(InformationGatheringAgent)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `cost` method is an abstract method that returns the cost of obtaining the evidence through tests, consultants, questions or any other means.\n",
+ " \n",
+ "The `request` method returns the value of the given random variable as the next percept.\n",
+ " \n",
+ "The `vpi_cost_ratio` method returns a list of VPI divided by cost for each variable in the `variables` list provided to it.\n",
+ " \n",
+ "The `vpi` method calculates the VPI for a given variable\n",
+ " \n",
+ "And finally, the `execute` method executes the general information gathering algorithm, as described in __figure 16.9__ in the book.\n",
+ " \n",
+ "Our agent implements a form of information gathering that is called __myopic__ as the VPI formula is used shortsightedly here.\n",
+ "It calculates the value of information as if only a single evidence variable will be acquired.\n",
+ "This is similar to greedy search, where we do not look at the bigger picture and aim for local optimizations to hopefully reach the global optimum.\n",
+ "This often works well in practice but a myopic agent might hastily take an action when it would have been better to request more variables before taking an action.\n",
+ "A _conditional plan_, on the other hand might work better for some scenarios.\n",
+ " \n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With this we conclude this notebook."
+ ]
}
],
"metadata": {
@@ -37,9 +6528,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.1"
+ "version": "3.6.9"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 2
}
diff --git a/probability.py b/probability.py
index b73ddfb09..e1e77d224 100644
--- a/probability.py
+++ b/probability.py
@@ -1,32 +1,32 @@
-"""Probability models. (Chapter 13-15)
-"""
+"""Probability models (Chapter 13-15)"""
-from utils import * # noqa
-from logic import extend
-
-import random
from collections import defaultdict
from functools import reduce
-# ______________________________________________________________________________
+from agents import Agent
+from utils import *
def DTAgentProgram(belief_state):
- "A decision-theoretic agent. [Fig. 13.1]"
+ """
+ [Figure 13.1]
+ A decision-theoretic agent.
+ """
+
def program(percept):
belief_state.observe(program.action, percept)
- program.action = argmax(belief_state.actions(),
- belief_state.expected_outcome_utility)
+ program.action = max(belief_state.actions(), key=belief_state.expected_outcome_utility)
return program.action
+
program.action = None
return program
+
# ______________________________________________________________________________
class ProbDist:
-
- """A discrete probability distribution. You name the random variable
+ """A discrete probability distribution. You name the random variable
in the constructor, then assign and query probability of values.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
0.25
@@ -35,26 +35,26 @@ class ProbDist:
(0.125, 0.375, 0.5)
"""
- def __init__(self, varname='?', freqs=None):
- """If freqs is given, it is a dictionary of value: frequency pairs,
- and the ProbDist then is normalized."""
+ def __init__(self, var_name='?', freq=None):
+ """If freq is given, it is a dictionary of values - frequency pairs,
+ then ProbDist is normalized."""
self.prob = {}
- self.varname = varname
+ self.var_name = var_name
self.values = []
- if freqs:
- for (v, p) in list(freqs.items()):
+ if freq:
+ for (v, p) in freq.items():
self[v] = p
self.normalize()
def __getitem__(self, val):
- "Given a value, return P(value)."
+ """Given a value, return P(value)."""
try:
return self.prob[val]
except KeyError:
return 0
def __setitem__(self, val, p):
- "Set P(val) = p."
+ """Set P(val) = p."""
if val not in self.values:
self.values.append(val)
self.prob[val] = p
@@ -62,29 +62,23 @@ def __setitem__(self, val, p):
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
- Raises a ZeroDivisionError if the sum of the values is 0.
- >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65
- >>> P = P.normalize()
- >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])
- 0.350 0.650
- """
- total = float(sum(self.prob.values()))
- if not (1.0-epsilon < total < 1.0+epsilon):
+ Raises a ZeroDivisionError if the sum of the values is 0."""
+ total = sum(self.prob.values())
+ if not np.isclose(total, 1.0):
for val in self.prob:
self.prob[val] /= total
return self
- def show_approx(self, numfmt='%.3g'):
+ def show_approx(self, numfmt='{:.3g}'):
"""Show the probabilities rounded and sorted by key, for the
sake of portable doctests."""
- return ', '.join([('%s: ' + numfmt) % (v, p)
- for (v, p) in sorted(self.prob.items())])
+ return ', '.join([('{}: ' + numfmt).format(v, p) for (v, p) in sorted(self.prob.items())])
-epsilon = 0.001
+ def __repr__(self):
+ return "P({})".format(self.var_name)
class JointProbDist(ProbDist):
-
"""A discrete probability distribute over a set of variables.
>>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25
>>> P[1, 1]
@@ -99,12 +93,12 @@ def __init__(self, variables):
self.vals = defaultdict(list)
def __getitem__(self, values):
- "Given a tuple or dict of values, return P(values)."
+ """Given a tuple or dict of values, return P(values)."""
values = event_values(values, self.variables)
return ProbDist.__getitem__(self, values)
def __setitem__(self, values, p):
- """Set P(values) = p. Values can be a tuple or a dict; it must
+ """Set P(values) = p. Values can be a tuple or a dict; it must
have a value for each of the variables in the joint. Also keep track
of the values we have seen so far for each variable."""
values = event_values(values, self.variables)
@@ -114,15 +108,15 @@ def __setitem__(self, values, p):
self.vals[var].append(val)
def values(self, var):
- "Return the set of possible values for a variable."
+ """Return the set of possible values for a variable."""
return self.vals[var]
def __repr__(self):
- return "P(%s)" % self.variables
+ return "P({})".format(self.variables)
def event_values(event, variables):
- """Return a tuple of the values of variables variables in event.
+ """Return a tuple of the values of variables in event.
>>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
(8, 10)
>>> event_values ((1, 2), ['C', 'A'])
@@ -133,12 +127,15 @@ def event_values(event, variables):
else:
return tuple([event[var] for var in variables])
+
# ______________________________________________________________________________
def enumerate_joint_ask(X, e, P):
- """Return a probability distribution over the values of the variable X,
- given the {var:val} observations e, in the JointProbDist P. [Section 13.3]
+ """
+ [Section 13.3]
+ Return a probability distribution over the values of the variable X,
+ given the {var:val} observations e, in the JointProbDist P.
>>> P = JointProbDist(['X', 'Y'])
>>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125
>>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()
@@ -158,20 +155,20 @@ def enumerate_joint(variables, e, P):
if not variables:
return P[e]
Y, rest = variables[0], variables[1:]
- return sum([enumerate_joint(rest, extend(e, Y, y), P)
- for y in P.values(Y)])
+ return sum([enumerate_joint(rest, extend(e, Y, y), P) for y in P.values(Y)])
+
# ______________________________________________________________________________
class BayesNet:
+ """Bayesian network containing only boolean-variable nodes."""
- "Bayesian network containing only boolean-variable nodes."
-
- def __init__(self, node_specs=[]):
- "nodes must be ordered with parents before children."
+ def __init__(self, node_specs=None):
+ """Nodes must be ordered with parents before children."""
self.nodes = []
self.variables = []
+ node_specs = node_specs or []
for node_spec in node_specs:
self.add(node_spec)
@@ -180,7 +177,7 @@ def add(self, node_spec):
net, and its variable must not."""
node = BayesNode(*node_spec)
assert node.variable not in self.variables
- assert every(lambda parent: parent in self.variables, node.parents)
+ assert all((parent in self.variables) for parent in node.parents)
self.nodes.append(node)
self.variables.append(node.variable)
for parent in node.parents:
@@ -193,24 +190,115 @@ def variable_node(self, var):
for n in self.nodes:
if n.variable == var:
return n
- raise Exception("No such variable: %s" % var)
+ raise Exception("No such variable: {}".format(var))
def variable_values(self, var):
- "Return the domain of var."
+ """Return the domain of var."""
return [True, False]
def __repr__(self):
- return 'BayesNet(%r)' % self.nodes
+ return 'BayesNet({0!r})'.format(self.nodes)
-class BayesNode:
+class DecisionNetwork(BayesNet):
+ """An abstract class for a decision network as a wrapper for a BayesNet.
+ Represents an agent's current state, its possible actions, reachable states
+ and utilities of those states."""
+
+ def __init__(self, action, infer):
+ """action: a single action node
+ infer: the preferred method to carry out inference on the given BayesNet"""
+ super(DecisionNetwork, self).__init__()
+ self.action = action
+ self.infer = infer
+
+ def best_action(self):
+ """Return the best action in the network"""
+ return self.action
+
+ def get_utility(self, action, state):
+ """Return the utility for a particular action and state in the network"""
+ raise NotImplementedError
+
+ def get_expected_utility(self, action, evidence):
+ """Compute the expected utility given an action and evidence"""
+ u = 0.0
+ prob_dist = self.infer(action, evidence, self).prob
+ for item, _ in prob_dist.items():
+ u += prob_dist[item] * self.get_utility(action, item)
+ return u
+
+
+class InformationGatheringAgent(Agent):
+ """
+ [Figure 16.9]
+ A simple information gathering agent. The agent works by repeatedly selecting
+ the observation with the highest information value, until the cost of the next
+ observation is greater than its expected benefit."""
+
+ def __init__(self, decnet, infer, initial_evidence=None):
+ """decnet: a decision network
+ infer: the preferred method to carry out inference on the given decision network
+ initial_evidence: initial evidence"""
+ self.decnet = decnet
+ self.infer = infer
+ self.observation = initial_evidence or []
+ self.variables = self.decnet.nodes
+
+ def integrate_percept(self, percept):
+ """Integrate the given percept into the decision network"""
+ raise NotImplementedError
+
+ def execute(self, percept):
+ """Execute the information gathering algorithm"""
+ self.observation = self.integrate_percept(percept)
+ vpis = self.vpi_cost_ratio(self.variables)
+ j = max(vpis)
+ variable = self.variables[j]
+
+ if self.vpi(variable) > self.cost(variable):
+ return self.request(variable)
+
+ return self.decnet.best_action()
+
+ def request(self, variable):
+ """Return the value of the given random variable as the next percept"""
+ raise NotImplementedError
+
+ def cost(self, var):
+ """Return the cost of obtaining evidence through tests, consultants or questions"""
+ raise NotImplementedError
+
+ def vpi_cost_ratio(self, variables):
+ """Return the VPI to cost ratio for the given variables"""
+ v_by_c = []
+ for var in variables:
+ v_by_c.append(self.vpi(var) / self.cost(var))
+ return v_by_c
+
+ def vpi(self, variable):
+ """Return VPI for a given variable"""
+ vpi = 0.0
+ prob_dist = self.infer(variable, self.observation, self.decnet).prob
+ for item, _ in prob_dist.items():
+ post_prob = prob_dist[item]
+ new_observation = list(self.observation)
+ new_observation.append(item)
+ expected_utility = self.decnet.get_expected_utility(variable, new_observation)
+ vpi += post_prob * expected_utility
+
+ vpi -= self.decnet.get_expected_utility(variable, self.observation)
+ return vpi
+
+
+class BayesNode:
"""A conditional probability distribution for a boolean variable,
P(X | parents). Part of a BayesNet."""
def __init__(self, X, parents, cpt):
"""X is a variable name, and parents a sequence of variable
- names or a space-separated string. cpt, the conditional
+ names or a space-separated string. cpt, the conditional
probability table, takes one of these forms:
* A number, the unconditional probability P(X=true). You can
@@ -241,12 +329,12 @@ def __init__(self, X, parents, cpt):
elif isinstance(cpt, dict):
# one parent, 1-tuple
if cpt and isinstance(list(cpt.keys())[0], bool):
- cpt = dict(((v,), p) for v, p in list(cpt.items()))
+ cpt = {(v,): p for v, p in cpt.items()}
assert isinstance(cpt, dict)
- for vs, p in list(cpt.items()):
+ for vs, p in cpt.items():
assert isinstance(vs, tuple) and len(vs) == len(parents)
- assert every(lambda v: isinstance(v, bool), vs)
+ assert all(isinstance(v, bool) for v in vs)
assert 0 <= p <= 1
self.variable = X
@@ -264,7 +352,7 @@ def p(self, value, event):
0.375"""
assert isinstance(value, bool)
ptrue = self.cpt[event_values(event, self.parents)]
- return (ptrue if value else 1 - ptrue)
+ return ptrue if value else 1 - ptrue
def sample(self, event):
"""Sample from the distribution for this variable conditioned
@@ -276,25 +364,27 @@ def sample(self, event):
def __repr__(self):
return repr((self.variable, ' '.join(self.parents)))
-# Burglary example [Fig. 14.2]
+
+# Burglary example [Figure 14.2]
T, F = True, False
-burglary = BayesNet([
- ('Burglary', '', 0.001),
- ('Earthquake', '', 0.002),
- ('Alarm', 'Burglary Earthquake',
- {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
- ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
- ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
-])
+burglary = BayesNet([('Burglary', '', 0.001),
+ ('Earthquake', '', 0.002),
+ ('Alarm', 'Burglary Earthquake',
+ {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
+ ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
+ ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})])
+
# ______________________________________________________________________________
def enumeration_ask(X, e, bn):
- """Return the conditional probability distribution of variable X
- given evidence e, from BayesNet bn. [Fig. 14.9]
+ """
+ [Figure 14.9]
+ Return the conditional probability distribution of variable X
+ given evidence e, from BayesNet bn.
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'"""
@@ -320,11 +410,14 @@ def enumerate_all(variables, e, bn):
return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)
for y in bn.variable_values(Y))
+
# ______________________________________________________________________________
def elimination_ask(X, e, bn):
- """Compute bn's P(X|e) by variable elimination. [Fig. 14.11]
+ """
+ [Figure 14.11]
+ Compute bn's P(X|e) by variable elimination.
>>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'"""
@@ -338,7 +431,7 @@ def elimination_ask(X, e, bn):
def is_hidden(var, X, e):
- "Is var a hidden variable when querying P(X|e)?"
+ """Is var a hidden variable when querying P(X|e)?"""
return var != X and var not in e
@@ -348,8 +441,8 @@ def make_factor(var, e, bn):
is the pointwise product of these factors for bn's variables."""
node = bn.variable_node(var)
variables = [X for X in [var] + node.parents if X not in e]
- cpt = dict((event_values(e1, variables), node.p(e1[var], e1))
- for e1 in all_events(variables, bn, e))
+ cpt = {event_values(e1, variables): node.p(e1[var], e1)
+ for e1 in all_events(variables, bn, e)}
return Factor(variables, cpt)
@@ -358,7 +451,7 @@ def pointwise_product(factors, bn):
def sum_out(var, factors, bn):
- "Eliminate var from all factors by summing over its values."
+ """Eliminate var from all factors by summing over its values."""
result, var_factors = [], []
for f in factors:
(var_factors if var in f.variables else result).append(f)
@@ -367,42 +460,37 @@ def sum_out(var, factors, bn):
class Factor:
-
- "A factor in a joint distribution."
+ """A factor in a joint distribution."""
def __init__(self, variables, cpt):
self.variables = variables
self.cpt = cpt
def pointwise_product(self, other, bn):
- "Multiply two factors, combining their variables."
+ """Multiply two factors, combining their variables."""
variables = list(set(self.variables) | set(other.variables))
- cpt = dict((event_values(e, variables), self.p(e) * other.p(e))
- for e in all_events(variables, bn, {}))
+ cpt = {event_values(e, variables): self.p(e) * other.p(e) for e in all_events(variables, bn, {})}
return Factor(variables, cpt)
def sum_out(self, var, bn):
- "Make a factor eliminating var by summing over its values."
+ """Make a factor eliminating var by summing over its values."""
variables = [X for X in self.variables if X != var]
- cpt = dict((event_values(e, variables),
- sum(self.p(extend(e, var, val))
- for val in bn.variable_values(var)))
- for e in all_events(variables, bn, {}))
+ cpt = {event_values(e, variables): sum(self.p(extend(e, var, val)) for val in bn.variable_values(var))
+ for e in all_events(variables, bn, {})}
return Factor(variables, cpt)
def normalize(self):
- "Return my probabilities; must be down to one variable."
+ """Return my probabilities; must be down to one variable."""
assert len(self.variables) == 1
- return ProbDist(self.variables[0],
- dict((k, v) for ((k,), v) in list(self.cpt.items())))
+ return ProbDist(self.variables[0], {k: v for ((k,), v) in self.cpt.items()})
def p(self, e):
- "Look up my value tabulated for e."
+ """Look up my value tabulated for e."""
return self.cpt[event_values(e, self.variables)]
def all_events(variables, bn, e):
- "Yield every way of extending e with values for all variables."
+ """Yield every way of extending e with values for all variables."""
if not variables:
yield e
else:
@@ -411,34 +499,42 @@ def all_events(variables, bn, e):
for x in bn.variable_values(X):
yield extend(e1, X, x)
+
# ______________________________________________________________________________
-# Fig. 14.12a: sprinkler network
+# [Figure 14.12a]: sprinkler network
+
+
+sprinkler = BayesNet([('Cloudy', '', 0.5),
+ ('Sprinkler', 'Cloudy', {T: 0.10, F: 0.50}),
+ ('Rain', 'Cloudy', {T: 0.80, F: 0.20}),
+ ('WetGrass', 'Sprinkler Rain',
+ {(T, T): 0.99, (T, F): 0.90, (F, T): 0.90, (F, F): 0.00})])
-sprinkler = BayesNet([
- ('Cloudy', '', 0.5),
- ('Sprinkler', 'Cloudy', {T: 0.10, F: 0.50}),
- ('Rain', 'Cloudy', {T: 0.80, F: 0.20}),
- ('WetGrass', 'Sprinkler Rain',
- {(T, T): 0.99, (T, F): 0.90, (F, T): 0.90, (F, F): 0.00})])
# ______________________________________________________________________________
def prior_sample(bn):
- """Randomly sample from bn's full joint distribution. The result
- is a {variable: value} dict. [Fig. 14.13]"""
+ """
+ [Figure 14.13]
+ Randomly sample from bn's full joint distribution.
+ The result is a {variable: value} dict.
+ """
event = {}
for node in bn.nodes:
event[node.variable] = node.sample(event)
return event
+
# _________________________________________________________________________
-def rejection_sampling(X, e, bn, N):
- """Estimate the probability distribution of variable X given
- evidence e in BayesNet bn, using N samples. [Fig. 14.14]
+def rejection_sampling(X, e, bn, N=10000):
+ """
+ [Figure 14.14]
+ Estimate the probability distribution of variable X given
+ evidence e in BayesNet bn, using N samples.
Raises a ZeroDivisionError if all the N samples are rejected,
i.e., inconsistent with e.
>>> random.seed(47)
@@ -446,44 +542,47 @@ def rejection_sampling(X, e, bn, N):
... burglary, 10000).show_approx()
'False: 0.7, True: 0.3'
"""
- counts = dict((x, 0)
- for x in bn.variable_values(X)) # bold N in Fig. 14.14
+ counts = {x: 0 for x in bn.variable_values(X)} # bold N in [Figure 14.14]
for j in range(N):
- sample = prior_sample(bn) # boldface x in Fig. 14.14
+ sample = prior_sample(bn) # boldface x in [Figure 14.14]
if consistent_with(sample, e):
counts[sample[X]] += 1
return ProbDist(X, counts)
def consistent_with(event, evidence):
- "Is event consistent with the given evidence?"
- return all(evidence.get(k, v) == v
- for k, v in list(event.items()))
+ """Is event consistent with the given evidence?"""
+ return all(evidence.get(k, v) == v for k, v in event.items())
+
# _________________________________________________________________________
-def likelihood_weighting(X, e, bn, N):
- """Estimate the probability distribution of variable X given
- evidence e in BayesNet bn. [Fig. 14.15]
+def likelihood_weighting(X, e, bn, N=10000):
+ """
+ [Figure 14.15]
+ Estimate the probability distribution of variable X given
+ evidence e in BayesNet bn.
>>> random.seed(1017)
>>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T),
... burglary, 10000).show_approx()
'False: 0.702, True: 0.298'
"""
- W = dict((x, 0) for x in bn.variable_values(X))
+ W = {x: 0 for x in bn.variable_values(X)}
for j in range(N):
- sample, weight = weighted_sample(bn, e) # boldface x, w in Fig. 14.15
+ sample, weight = weighted_sample(bn, e) # boldface x, w in [Figure 14.15]
W[sample[X]] += weight
return ProbDist(X, W)
def weighted_sample(bn, e):
- """Sample an event from bn that's consistent with the evidence e;
+ """
+ Sample an event from bn that's consistent with the evidence e;
return the event and its weight, the likelihood that the event
- accords to the evidence."""
+ accords to the evidence.
+ """
w = 1
- event = dict(e) # boldface x in Fig. 14.15
+ event = dict(e) # boldface x in [Figure 14.15]
for node in bn.nodes:
Xi = node.variable
if Xi in e:
@@ -492,21 +591,16 @@ def weighted_sample(bn, e):
event[Xi] = node.sample(event)
return event, w
+
# _________________________________________________________________________
-def gibbs_ask(X, e, bn, N):
- """[Fig. 14.16]
- >>> random.seed(1017)
- >>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000
- ... ).show_approx()
- 'False: 0.738, True: 0.262'
- """
+def gibbs_ask(X, e, bn, N=1000):
+ """[Figure 14.16]"""
assert X not in e, "Query variable must be distinct from evidence"
- counts = dict((x, 0)
- for x in bn.variable_values(X)) # bold N in Fig. 14.16
+ counts = {x: 0 for x in bn.variable_values(X)} # bold N in [Figure 14.16]
Z = [var for var in bn.variables if var not in e]
- state = dict(e) # boldface x in Fig. 14.16
+ state = dict(e) # boldface x in [Figure 14.16]
for Zi in Z:
state[Zi] = random.choice(bn.variable_values(Zi))
for j in range(N):
@@ -525,26 +619,22 @@ def markov_blanket_sample(X, e, bn):
Q = ProbDist(X)
for xi in bn.variable_values(X):
ei = extend(e, X, xi)
- # [Equation 14.12:]
- Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)
- for Yj in Xnode.children)
+ # [Equation 14.12]
+ Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei) for Yj in Xnode.children)
# (assuming a Boolean variable here)
return probability(Q.normalize()[True])
+
# _________________________________________________________________________
class HiddenMarkovModel:
+ """A Hidden markov model which takes Transition model and Sensor model as inputs"""
- """ A Hidden markov model which takes Transition model and Sensor model as inputs"""
-
- def __init__(self, transition_model, sensor_model, prior= [0.5, 0.5]):
+ def __init__(self, transition_model, sensor_model, prior=None):
self.transition_model = transition_model
self.sensor_model = sensor_model
- self.prior = prior
-
- def transition_model(self):
- return self.transition_model
+ self.prior = prior or [0.5, 0.5]
def sensor_dist(self, ev):
if ev is True:
@@ -558,140 +648,146 @@ def forward(HMM, fv, ev):
scalar_vector_product(fv[1], HMM.transition_model[1]))
sensor_dist = HMM.sensor_dist(ev)
- return(normalize(element_wise_product(sensor_dist, prediction)))
+ return normalize(element_wise_product(sensor_dist, prediction))
def backward(HMM, b, ev):
sensor_dist = HMM.sensor_dist(ev)
prediction = element_wise_product(sensor_dist, b)
- return(normalize(vector_add(scalar_vector_product(prediction[0], HMM.transition_model[0]),
- scalar_vector_product(prediction[1], HMM.transition_model[1]))))
+ return normalize(vector_add(scalar_vector_product(prediction[0], HMM.transition_model[0]),
+ scalar_vector_product(prediction[1], HMM.transition_model[1])))
-def forward_backward(HMM, ev, prior):
- """[Fig. 15.4]
+def forward_backward(HMM, ev):
+ """
+ [Figure 15.4]
Forward-Backward algorithm for smoothing. Computes posterior probabilities
of a sequence of states given a sequence of observations.
-
- umbrella_evidence = [T, T, F, T, T]
- umbrella_prior = [0.5, 0.5]
- umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
- umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
- umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)
-
- >>> forward_backward(umbrellaHMM, umbrella_evidence, umbrella_prior)
- [[0.6469, 0.3531], [0.8673, 0.1327], [0.8204, 0.1796],
- [0.3075, 0.6925], [0.8204, 0.1796], [0.8673, 0.1327]]
"""
t = len(ev)
ev.insert(0, None) # to make the code look similar to pseudo code
- fv = [[0.0, 0.0] for i in range(len(ev))]
+ fv = [[0.0, 0.0] for _ in range(len(ev))]
b = [1.0, 1.0]
- bv = [b] # we don't need bv; but we will have a list of all backward messages here
- sv = [[0, 0] for i in range(len(ev))]
+ sv = [[0, 0] for _ in range(len(ev))]
- fv[0] = prior
+ fv[0] = HMM.prior
for i in range(1, t + 1):
fv[i] = forward(HMM, fv[i - 1], ev[i])
for i in range(t, -1, -1):
sv[i - 1] = normalize(element_wise_product(fv[i], b))
b = backward(HMM, b, ev[i])
- bv.append(b)
sv = sv[::-1]
- return(sv)
+ return sv
+
+
+def viterbi(HMM, ev):
+ """
+ [Equation 15.11]
+ Viterbi algorithm to find the most likely sequence. Computes the best path and the
+ corresponding probabilities, given an HMM model and a sequence of observations.
+ """
+ t = len(ev)
+ ev = ev.copy()
+ ev.insert(0, None)
+
+ m = [[0.0, 0.0] for _ in range(len(ev) - 1)]
+
+ # the recursion is initialized with m1 = forward(P(X0), e1)
+ m[0] = forward(HMM, HMM.prior, ev[1])
+ # keep track of maximizing predecessors
+ backtracking_graph = []
+
+ for i in range(1, t):
+ m[i] = element_wise_product(HMM.sensor_dist(ev[i + 1]),
+ [max(element_wise_product(HMM.transition_model[0], m[i - 1])),
+ max(element_wise_product(HMM.transition_model[1], m[i - 1]))])
+ backtracking_graph.append([np.argmax(element_wise_product(HMM.transition_model[0], m[i - 1])),
+ np.argmax(element_wise_product(HMM.transition_model[1], m[i - 1]))])
+
+ # computed probabilities
+ ml_probabilities = [0.0] * (len(ev) - 1)
+ # most likely sequence
+ ml_path = [True] * (len(ev) - 1)
+
+ # the construction of the most likely sequence starts in the final state with the largest probability, and
+ # runs backwards; the algorithm needs to store for each xt its predecessor xt-1 maximizing its probability
+ i_max = np.argmax(m[-1])
+
+ for i in range(t - 1, -1, -1):
+ ml_probabilities[i] = m[i][i_max]
+ ml_path[i] = True if i_max == 0 else False
+ if i > 0:
+ i_max = backtracking_graph[i - 1][i_max]
+
+ return ml_path, ml_probabilities
+
# _________________________________________________________________________
def fixed_lag_smoothing(e_t, HMM, d, ev, t):
- """[Fig. 15.6]
+ """
+ [Figure 15.6]
Smoothing algorithm with a fixed time lag of 'd' steps.
Online algorithm that outputs the new smoothed estimate if observation
for new time step is given.
-
- umbrella_evidence = [T, T, F, T, T]
- e_t = T
- t = 4
- d = 3
- umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
- umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
- umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)
-
- >>> fixed_lag_smoothing(T, umbrellaHMM, d)
"""
ev.insert(0, None)
T_model = HMM.transition_model
f = HMM.prior
B = [[1, 0], [0, 1]]
- evidence = []
- evidence.append(e_t)
- O_t = vector_to_diagonal(HMM.sensor_dist(e_t))
+ O_t = np.diag(HMM.sensor_dist(e_t))
if t > d:
f = forward(HMM, f, e_t)
- O_tmd = vector_to_diagonal(HMM.sensor_dist(ev[t- d]))
- B = matrix_multiplication(inverse_matrix(O_tmd), inverse_matrix(T_model), B, T_model, O_t)
+ O_tmd = np.diag(HMM.sensor_dist(ev[t - d]))
+ B = matrix_multiplication(np.linalg.inv(O_tmd), np.linalg.inv(T_model), B, T_model, O_t)
else:
B = matrix_multiplication(B, T_model, O_t)
- t = t + 1
+ t += 1
if t > d:
# always returns a 1x2 matrix
- return([normalize(i) for i in matrix_multiplication([f], B)][0])
+ return [normalize(i) for i in matrix_multiplication([f], B)][0]
else:
return None
+
# _________________________________________________________________________
def particle_filtering(e, N, HMM):
- """
- Particle filtering considering two states variables
- N = 10
- umbrella_evidence = T
- umbrella_prior = [0.5, 0.5]
- umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
- umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
- umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)
-
- >>> particle_filtering(umbrella_evidence, N, umbrellaHMM)
- ['A', 'A', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B']
-
- NOTE: Output is an probabilistic answer, therfore can vary
- """
- s = []
+ """Particle filtering considering two states variables."""
dist = [0.5, 0.5]
- # State Initialization
- s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
# Weight Initialization
- w = [0 for i in range(N)]
+ w = [0 for _ in range(N)]
# STEP 1
# Propagate one step using transition model given prior state
dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]),
scalar_vector_product(dist[1], HMM.transition_model[1]))
# Assign state according to probability
- s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
+ s = ['A' if probability(dist[0]) else 'B' for _ in range(N)]
w_tot = 0
# Calculate importance weight given evidence e
for i in range(N):
if s[i] == 'A':
# P(U|A)*P(A)
- w_i = HMM.sensor_dist(e)[0]*dist[0]
+ w_i = HMM.sensor_dist(e)[0] * dist[0]
if s[i] == 'B':
# P(U|B)*P(B)
- w_i = HMM.sensor_dist(e)[1]*dist[1]
+ w_i = HMM.sensor_dist(e)[1] * dist[1]
w[i] = w_i
w_tot += w_i
# Normalize all the weights
for i in range(N):
- w[i] = w[i]/w_tot
+ w[i] = w[i] / w_tot
# Limit weights to 4 digits
for i in range(N):
@@ -699,55 +795,76 @@ def particle_filtering(e, N, HMM):
# STEP 2
s = weighted_sample_with_replacement(N, s, w)
+
return s
-def weighted_sample_with_replacement(N, s, w):
+# _________________________________________________________________________
+# TODO: Implement continuous map for MonteCarlo similar to Fig25.10 from the book
+
+
+class MCLmap:
+ """Map which provides probability distributions and sensor readings.
+ Consists of discrete cells which are either an obstacle or empty"""
+
+ def __init__(self, m):
+ self.m = m
+ self.nrows = len(m)
+ self.ncols = len(m[0])
+ # list of empty spaces in the map
+ self.empty = [(i, j) for i in range(self.nrows) for j in range(self.ncols) if not m[i][j]]
+
+ def sample(self):
+ """Returns a random kinematic state possible in the map"""
+ pos = random.choice(self.empty)
+ # 0N 1E 2S 3W
+ orient = random.choice(range(4))
+ kin_state = pos + (orient,)
+ return kin_state
+
+ def ray_cast(self, sensor_num, kin_state):
+ """Returns distance to nearest obstacle or map boundary in the direction of sensor"""
+ pos = kin_state[:2]
+ orient = kin_state[2]
+ # sensor layout when orientation is 0 (towards North)
+ # 0
+ # 3R1
+ # 2
+ delta = ((sensor_num % 2 == 0) * (sensor_num - 1), (sensor_num % 2 == 1) * (2 - sensor_num))
+ # sensor direction changes based on orientation
+ for _ in range(orient):
+ delta = (delta[1], -delta[0])
+ range_count = 0
+ while 0 <= pos[0] < self.nrows and 0 <= pos[1] < self.nrows and not self.m[pos[0]][pos[1]]:
+ pos = vector_add(pos, delta)
+ range_count += 1
+ return range_count
+
+
+def monte_carlo_localization(a, z, N, P_motion_sample, P_sensor, m, S=None):
"""
- Performs Weighted sampling over the paricles given weights of each particle.
- We keep on picking random states unitll we fill N number states in new distribution
+ [Figure 25.9]
+ Monte Carlo localization algorithm
"""
- s_wtd = []
- cnt = 0
- while (cnt <= N):
- # Generate a random number from 0 to N-1
- i = random.randint(0, N-1)
- if (probability(w[i])):
- s_wtd.append(s[i])
- cnt += 1
- return s_wtd
-# _________________________________________________________________________
-__doc__ += """
-# We can build up a probability distribution like this (p. 469):
->>> P = ProbDist()
->>> P['sunny'] = 0.7
->>> P['rain'] = 0.2
->>> P['cloudy'] = 0.08
->>> P['snow'] = 0.02
-
-# and query it like this: (Never mind this ELLIPSIS option
-# added to make the doctest portable.)
->>> P['rain'] #doctest:+ELLIPSIS
-0.2...
-
-# A Joint Probability Distribution is dealt with like this (Fig. 13.3): # noqa
->>> P = JointProbDist(['Toothache', 'Cavity', 'Catch'])
->>> T, F = True, False
->>> P[T, T, T] = 0.108; P[T, T, F] = 0.012; P[F, T, T] = 0.072; P[F, T, F] = 0.008
->>> P[T, F, T] = 0.016; P[T, F, F] = 0.064; P[F, F, T] = 0.144; P[F, F, F] = 0.576
-
->>> P[T, T, T]
-0.108
-
-# Ask for P(Cavity|Toothache=T)
->>> PC = enumerate_joint_ask('Cavity', {'Toothache': T}, P)
->>> PC.show_approx()
-'False: 0.4, True: 0.6'
-
->>> 0.6-epsilon < PC[T] < 0.6+epsilon
-True
-
->>> 0.4-epsilon < PC[F] < 0.4+epsilon
-True
-"""
+ def ray_cast(sensor_num, kin_state, m):
+ return m.ray_cast(sensor_num, kin_state)
+
+ M = len(z)
+ S_ = [0] * N
+ W_ = [0] * N
+ v = a['v']
+ w = a['w']
+
+ if S is None:
+ S = [m.sample() for _ in range(N)]
+
+ for i in range(N):
+ S_[i] = P_motion_sample(S[i], v, w)
+ W_[i] = 1
+ for j in range(M):
+ z_ = ray_cast(j, S_[i], m)
+ W_[i] = W_[i] * P_sensor(z[j], z_)
+
+ S = weighted_sample_with_replacement(N, S_, W_)
+ return S
diff --git a/probability4e.ipynb b/probability4e.ipynb
new file mode 100644
index 000000000..e148e929e
--- /dev/null
+++ b/probability4e.ipynb
@@ -0,0 +1,1381 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "button": false,
+ "deletable": true,
+ "new_sheet": false,
+ "run_control": {
+ "read_only": false
+ }
+ },
+ "source": [
+ "# Probability and Bayesian Networks\n",
+ "\n",
+ "Probability theory allows us to compute the likelihood of certain events, given assumptioons about the components of the event. A Bayesian network, or Bayes net for short, is a data structure to represent a joint probability distribution over several random variables, and do inference on it. \n",
+ "\n",
+ "As an example, here is a network with five random variables, each with its conditional probability table, and with arrows from parent to child variables. The story, from Judea Pearl, is that there is a house burglar alarm, which can be triggered by either a burglary or an earthquake. If the alarm sounds, one or both of the neighbors, John and Mary, might call the owwner to say the alarm is sounding.\n",
+ "\n",
+ " \n",
+ "\n",
+ "We implement this with the help of seven Python classes:\n",
+ "\n",
+ "\n",
+ "## `BayesNet()`\n",
+ "\n",
+ "A `BayesNet` is a graph (as in the diagram above) where each node represents a random variable, and the edges are parent→child links. You can construct an empty graph with `BayesNet()`, then add variables one at a time with the method call `.add(`*variable_name, parent_names, cpt*`)`, where the names are strings, and each of the `parent_names` must already have been `.add`ed.\n",
+ "\n",
+ "## `Variable(`*name, cpt, parents*`)`\n",
+ "\n",
+ "A random variable; the ovals in the diagram above. The value of a variable depends on the value of the parents, in a probabilistic way specified by the variable's conditional probability table (CPT). Given the parents, the variable is independent of all the other variables. For example, if I know whether *Alarm* is true or false, then I know the probability of *JohnCalls*, and evidence about the other variables won't give me any more information about *JohnCalls*. Each row of the CPT uses the same order of variables as the list of parents.\n",
+ "We will only allow variables with a finite discrete domain; not continuous values. \n",
+ "\n",
+ "## `ProbDist(`*mapping*`)` `Factor(`*mapping*`)`\n",
+ "\n",
+ "A probability distribution is a mapping of `{outcome: probability}` for every outcome of a random variable. \n",
+ "You can give `ProbDist` the same arguments that you would give to the `dict` initializer, for example\n",
+ "`ProbDist(sun=0.6, rain=0.1, cloudy=0.3)`.\n",
+ "As a shortcut for Boolean Variables, you can say `ProbDist(0.95)` instead of `ProbDist({T: 0.95, F: 0.05})`. \n",
+ "In a probability distribution, every value is between 0 and 1, and the values sum to 1.\n",
+ "A `Factor` is similar to a probability distribution, except that the values need not sum to 1. Factors\n",
+ "are used in the variable elimination inference method.\n",
+ "\n",
+ "## `Evidence(`*mapping*`)`\n",
+ "\n",
+ "A mapping of `{Variable: value, ...}` pairs, describing the exact values for a set of variables—the things we know for sure.\n",
+ "\n",
+ "## `CPTable(`*rows, parents*`)`\n",
+ "\n",
+ "A conditional probability table (or *CPT*) describes the probability of each possible outcome value of a random variable, given the values of the parent variables. A `CPTable` is a a mapping, `{tuple: probdist, ...}`, where each tuple lists the values of each of the parent variables, in order, and each probability distribution says what the possible outcomes are, given those values of the parents. The `CPTable` for *Alarm* in the diagram above would be represented as follows:\n",
+ "\n",
+ " CPTable({(T, T): .95,\n",
+ " (T, F): .94,\n",
+ " (F, T): .29,\n",
+ " (F, F): .001},\n",
+ " [Burglary, Earthquake])\n",
+ " \n",
+ "How do you read this? Take the second row, \"`(T, F): .94`\". This means that when the first parent (`Burglary`) is true, and the second parent (`Earthquake`) is fale, then the probability of `Alarm` being true is .94. Note that the .94 is an abbreviation for `ProbDist({T: .94, F: .06})`.\n",
+ " \n",
+ "## `T = Bool(True); F = Bool(False)`\n",
+ "\n",
+ "When I used `bool` values (`True` and `False`), it became hard to read rows in CPTables, because the columns didn't line up:\n",
+ "\n",
+ " (True, True, False, False, False)\n",
+ " (False, False, False, False, True)\n",
+ " (True, False, False, True, True)\n",
+ " \n",
+ "Therefore, I created the `Bool` class, with constants `T` and `F` such that `T == True` and `F == False`, and now rows are easier to read:\n",
+ "\n",
+ " (T, T, F, F, F)\n",
+ " (F, F, F, F, T)\n",
+ " (T, F, F, T, T)\n",
+ " \n",
+ "Here is the code for these classes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "button": false,
+ "collapsed": true,
+ "deletable": true,
+ "new_sheet": false,
+ "run_control": {
+ "read_only": false
+ }
+ },
+ "outputs": [],
+ "source": [
+ "from collections import defaultdict, Counter\n",
+ "import itertools\n",
+ "import math\n",
+ "import random\n",
+ "\n",
+ "class BayesNet(object):\n",
+ " \"Bayesian network: a graph of variables connected by parent links.\"\n",
+ " \n",
+ " def __init__(self): \n",
+ " self.variables = [] # List of variables, in parent-first topological sort order\n",
+ " self.lookup = {} # Mapping of {variable_name: variable} pairs\n",
+ " \n",
+ " def add(self, name, parentnames, cpt):\n",
+ " \"Add a new Variable to the BayesNet. Parentnames must have been added previously.\"\n",
+ " parents = [self.lookup[name] for name in parentnames]\n",
+ " var = Variable(name, cpt, parents)\n",
+ " self.variables.append(var)\n",
+ " self.lookup[name] = var\n",
+ " return self\n",
+ " \n",
+ "class Variable(object):\n",
+ " \"A discrete random variable; conditional on zero or more parent Variables.\"\n",
+ " \n",
+ " def __init__(self, name, cpt, parents=()):\n",
+ " \"A variable has a name, list of parent variables, and a Conditional Probability Table.\"\n",
+ " self.__name__ = name\n",
+ " self.parents = parents\n",
+ " self.cpt = CPTable(cpt, parents)\n",
+ " self.domain = set(itertools.chain(*self.cpt.values())) # All the outcomes in the CPT\n",
+ " \n",
+ " def __repr__(self): return self.__name__\n",
+ " \n",
+ "class Factor(dict): \"An {outcome: frequency} mapping.\"\n",
+ "\n",
+ "class ProbDist(Factor):\n",
+ " \"\"\"A Probability Distribution is an {outcome: probability} mapping. \n",
+ " The values are normalized to sum to 1.\n",
+ " ProbDist(0.75) is an abbreviation for ProbDist({T: 0.75, F: 0.25}).\"\"\"\n",
+ " def __init__(self, mapping=(), **kwargs):\n",
+ " if isinstance(mapping, float):\n",
+ " mapping = {T: mapping, F: 1 - mapping}\n",
+ " self.update(mapping, **kwargs)\n",
+ " normalize(self)\n",
+ " \n",
+ "class Evidence(dict): \n",
+ " \"A {variable: value} mapping, describing what we know for sure.\"\n",
+ " \n",
+ "class CPTable(dict):\n",
+ " \"A mapping of {row: ProbDist, ...} where each row is a tuple of values of the parent variables.\"\n",
+ " \n",
+ " def __init__(self, mapping, parents=()):\n",
+ " \"\"\"Provides two shortcuts for writing a Conditional Probability Table. \n",
+ " With no parents, CPTable(dist) means CPTable({(): dist}).\n",
+ " With one parent, CPTable({val: dist,...}) means CPTable({(val,): dist,...}).\"\"\"\n",
+ " if len(parents) == 0 and not (isinstance(mapping, dict) and set(mapping.keys()) == {()}):\n",
+ " mapping = {(): mapping}\n",
+ " for (row, dist) in mapping.items():\n",
+ " if len(parents) == 1 and not isinstance(row, tuple): \n",
+ " row = (row,)\n",
+ " self[row] = ProbDist(dist)\n",
+ "\n",
+ "class Bool(int):\n",
+ " \"Just like `bool`, except values display as 'T' and 'F' instead of 'True' and 'False'\"\n",
+ " __str__ = __repr__ = lambda self: 'T' if self else 'F'\n",
+ " \n",
+ "T = Bool(True)\n",
+ "F = Bool(False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "And here are some associated functions:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def P(var, evidence={}):\n",
+ " \"The probability distribution for P(variable | evidence), when all parent variables are known (in evidence).\"\n",
+ " row = tuple(evidence[parent] for parent in var.parents)\n",
+ " return var.cpt[row]\n",
+ "\n",
+ "def normalize(dist):\n",
+ " \"Normalize a {key: value} distribution so values sum to 1.0. Mutates dist and returns it.\"\n",
+ " total = sum(dist.values())\n",
+ " for key in dist:\n",
+ " dist[key] = dist[key] / total\n",
+ " assert 0 <= dist[key] <= 1, \"Probabilities must be between 0 and 1.\"\n",
+ " return dist\n",
+ "\n",
+ "def sample(probdist):\n",
+ " \"Randomly sample an outcome from a probability distribution.\"\n",
+ " r = random.random() # r is a random point in the probability distribution\n",
+ " c = 0.0 # c is the cumulative probability of outcomes seen so far\n",
+ " for outcome in probdist:\n",
+ " c += probdist[outcome]\n",
+ " if r <= c:\n",
+ " return outcome\n",
+ " \n",
+ "def globalize(mapping):\n",
+ " \"Given a {name: value} mapping, export all the names to the `globals()` namespace.\"\n",
+ " globals().update(mapping)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Sample Usage\n",
+ "\n",
+ "Here are some examples of using the classes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "# Example random variable: Earthquake:\n",
+ "# An earthquake occurs on 0.002 of days, independent of any other variables.\n",
+ "Earthquake = Variable('Earthquake', 0.002)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.998, T: 0.002}"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# The probability distribution for Earthquake\n",
+ "P(Earthquake)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.002"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Get the probability of a specific outcome by subscripting the probability distribution\n",
+ "P(Earthquake)[T]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "F"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Randomly sample from the distribution:\n",
+ "sample(P(Earthquake))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Counter({F: 99793, T: 207})"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Randomly sample 100,000 times, and count up the results:\n",
+ "Counter(sample(P(Earthquake)) for i in range(100000))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "# Two equivalent ways of specifying the same Boolean probability distribution:\n",
+ "assert ProbDist(0.75) == ProbDist({T: 0.75, F: 0.25})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'lose': 0.15, 'tie': 0.1, 'win': 0.75}"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Two equivalent ways of specifying the same non-Boolean probability distribution:\n",
+ "assert ProbDist(win=15, lose=3, tie=2) == ProbDist({'win': 15, 'lose': 3, 'tie': 2})\n",
+ "ProbDist(win=15, lose=3, tie=2)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'a': 1, 'b': 2, 'c': 3, 'd': 4}"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# The difference between a Factor and a ProbDist--the ProbDist is normalized:\n",
+ "Factor(a=1, b=2, c=3, d=4)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'a': 0.1, 'b': 0.2, 'c': 0.3, 'd': 0.4}"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "ProbDist(a=1, b=2, c=3, d=4)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Example: Alarm Bayes Net\n",
+ "\n",
+ "Here is how we define the Bayes net from the diagram above:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "alarm_net = (BayesNet()\n",
+ " .add('Burglary', [], 0.001)\n",
+ " .add('Earthquake', [], 0.002)\n",
+ " .add('Alarm', ['Burglary', 'Earthquake'], {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001})\n",
+ " .add('JohnCalls', ['Alarm'], {T: 0.90, F: 0.05})\n",
+ " .add('MaryCalls', ['Alarm'], {T: 0.70, F: 0.01})) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Burglary, Earthquake, Alarm, JohnCalls, MaryCalls]"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Make Burglary, Earthquake, etc. be global variables\n",
+ "globalize(alarm_net.lookup) \n",
+ "alarm_net.variables"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.999, T: 0.001}"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Probability distribution of a Burglary\n",
+ "P(Burglary)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.06000000000000005, T: 0.94}"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Probability of Alarm going off, given a Burglary and not an Earthquake:\n",
+ "P(Alarm, {Burglary: T, Earthquake: F})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{(F, F): {F: 0.999, T: 0.001},\n",
+ " (F, T): {F: 0.71, T: 0.29},\n",
+ " (T, F): {F: 0.06000000000000005, T: 0.94},\n",
+ " (T, T): {F: 0.050000000000000044, T: 0.95}}"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Where that came from: the (T, F) row of Alarm's CPT:\n",
+ "Alarm.cpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "button": false,
+ "deletable": true,
+ "new_sheet": false,
+ "run_control": {
+ "read_only": false
+ }
+ },
+ "source": [
+ "# Bayes Nets as Joint Probability Distributions\n",
+ "\n",
+ "A Bayes net is a compact way of specifying a full joint distribution over all the variables in the network. Given a set of variables {*X*1 , ..., *X**n* }, the full joint distribution is:\n",
+ "\n",
+ "P(*X*1 =*x*1 , ..., *X**n* =*x**n* ) = Π *i* P(*X**i* = *x**i* | parents(*X**i* ))\n",
+ "\n",
+ "For a network with *n* variables, each of which has *b* values, there are *bn * rows in the joint distribution (for example, a billion rows for 30 Boolean variables), making it impractical to explicitly create the joint distribution for large networks. But for small networks, the function `joint_distribution` creates the distribution, which can be instructive to look at, and can be used to do inference. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def joint_distribution(net):\n",
+ " \"Given a Bayes net, create the joint distribution over all variables.\"\n",
+ " return ProbDist({row: prod(P_xi_given_parents(var, row, net)\n",
+ " for var in net.variables)\n",
+ " for row in all_rows(net)})\n",
+ "\n",
+ "def all_rows(net): return itertools.product(*[var.domain for var in net.variables])\n",
+ "\n",
+ "def P_xi_given_parents(var, row, net):\n",
+ " \"The probability that var = xi, given the values in this row.\"\n",
+ " dist = P(var, Evidence(zip(net.variables, row)))\n",
+ " xi = row[net.variables.index(var)]\n",
+ " return dist[xi]\n",
+ "\n",
+ "def prod(numbers):\n",
+ " \"The product of numbers: prod([2, 3, 5]) == 30. Analogous to `sum([2, 3, 5]) == 10`.\"\n",
+ " result = 1\n",
+ " for x in numbers:\n",
+ " result *= x\n",
+ " return result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{(F, F, F, F, F),\n",
+ " (F, F, F, F, T),\n",
+ " (F, F, F, T, F),\n",
+ " (F, F, F, T, T),\n",
+ " (F, F, T, F, F),\n",
+ " (F, F, T, F, T),\n",
+ " (F, F, T, T, F),\n",
+ " (F, F, T, T, T),\n",
+ " (F, T, F, F, F),\n",
+ " (F, T, F, F, T),\n",
+ " (F, T, F, T, F),\n",
+ " (F, T, F, T, T),\n",
+ " (F, T, T, F, F),\n",
+ " (F, T, T, F, T),\n",
+ " (F, T, T, T, F),\n",
+ " (F, T, T, T, T),\n",
+ " (T, F, F, F, F),\n",
+ " (T, F, F, F, T),\n",
+ " (T, F, F, T, F),\n",
+ " (T, F, F, T, T),\n",
+ " (T, F, T, F, F),\n",
+ " (T, F, T, F, T),\n",
+ " (T, F, T, T, F),\n",
+ " (T, F, T, T, T),\n",
+ " (T, T, F, F, F),\n",
+ " (T, T, F, F, T),\n",
+ " (T, T, F, T, F),\n",
+ " (T, T, F, T, T),\n",
+ " (T, T, T, F, F),\n",
+ " (T, T, T, F, T),\n",
+ " (T, T, T, T, F),\n",
+ " (T, T, T, T, T)}"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# All rows in the joint distribution (2**5 == 32 rows)\n",
+ "set(all_rows(alarm_net))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "# Let's work through just one row of the table:\n",
+ "row = (F, F, F, F, F)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.999, T: 0.001}"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# This is the probability distribution for Alarm\n",
+ "P(Alarm, {Burglary: F, Earthquake: F})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.999"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Here's the probability that Alarm is false, given the parent values in this row:\n",
+ "P_xi_given_parents(Alarm, row, alarm_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{(F, F, F, F, F): 0.9367427006190001,\n",
+ " (F, F, F, F, T): 0.009462047481000001,\n",
+ " (F, F, F, T, F): 0.04930224740100002,\n",
+ " (F, F, F, T, T): 0.0004980024990000002,\n",
+ " (F, F, T, F, F): 2.9910060000000004e-05,\n",
+ " (F, F, T, F, T): 6.979013999999999e-05,\n",
+ " (F, F, T, T, F): 0.00026919054000000005,\n",
+ " (F, F, T, T, T): 0.00062811126,\n",
+ " (F, T, F, F, F): 0.0013341744900000002,\n",
+ " (F, T, F, F, T): 1.3476510000000005e-05,\n",
+ " (F, T, F, T, F): 7.021971000000001e-05,\n",
+ " (F, T, F, T, T): 7.092900000000001e-07,\n",
+ " (F, T, T, F, F): 1.7382600000000002e-05,\n",
+ " (F, T, T, F, T): 4.0559399999999997e-05,\n",
+ " (F, T, T, T, F): 0.00015644340000000006,\n",
+ " (F, T, T, T, T): 0.00036503460000000007,\n",
+ " (T, F, F, F, F): 5.631714000000006e-05,\n",
+ " (T, F, F, F, T): 5.688600000000006e-07,\n",
+ " (T, F, F, T, F): 2.9640600000000033e-06,\n",
+ " (T, F, F, T, T): 2.9940000000000035e-08,\n",
+ " (T, F, T, F, F): 2.8143600000000003e-05,\n",
+ " (T, F, T, F, T): 6.56684e-05,\n",
+ " (T, F, T, T, F): 0.0002532924000000001,\n",
+ " (T, F, T, T, T): 0.0005910156000000001,\n",
+ " (T, T, F, F, F): 9.40500000000001e-08,\n",
+ " (T, T, F, F, T): 9.50000000000001e-10,\n",
+ " (T, T, F, T, F): 4.9500000000000054e-09,\n",
+ " (T, T, F, T, T): 5.0000000000000066e-11,\n",
+ " (T, T, T, F, F): 5.7e-08,\n",
+ " (T, T, T, F, T): 1.3299999999999996e-07,\n",
+ " (T, T, T, T, F): 5.130000000000002e-07,\n",
+ " (T, T, T, T, T): 1.1970000000000001e-06}"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# The full joint distribution:\n",
+ "joint_distribution(alarm_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[Burglary, Earthquake, Alarm, JohnCalls, MaryCalls]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "0.00062811126"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Probability that \"the alarm has sounded, but neither a burglary nor an earthquake has occurred, \n",
+ "# and both John and Mary call\" (page 514 says it should be 0.000628)\n",
+ "\n",
+ "print(alarm_net.variables)\n",
+ "joint_distribution(alarm_net)[F, F, T, T, T]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Inference by Querying the Joint Distribution\n",
+ "\n",
+ "We can use `P(variable, evidence)` to get the probability of aa variable, if we know the vaues of all the parent variables. But what if we don't know? Bayes nets allow us to calculate the probability, but the calculation is not just a lookup in the CPT; it is a global calculation across the whole net. One inefficient but straightforward way of doing the calculation is to create the joint probability distribution, then pick out just the rows that\n",
+ "match the evidence variables, and for each row check what the value of the query variable is, and increment the probability for that value accordningly:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "def enumeration_ask(X, evidence, net):\n",
+ " \"The probability distribution for query variable X in a belief net, given evidence.\"\n",
+ " i = net.variables.index(X) # The index of the query variable X in the row\n",
+ " dist = defaultdict(float) # The resulting probability distribution over X\n",
+ " for (row, p) in joint_distribution(net).items():\n",
+ " if matches_evidence(row, evidence, net):\n",
+ " dist[row[i]] += p\n",
+ " return ProbDist(dist)\n",
+ "\n",
+ "def matches_evidence(row, evidence, net):\n",
+ " \"Does the tuple of values for this row agree with the evidence?\"\n",
+ " return all(evidence[v] == row[net.variables.index(v)]\n",
+ " for v in evidence)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.9931237539265789, T: 0.006876246073421024}"
+ ]
+ },
+ "execution_count": 25,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# The probability of a Burgalry, given that John calls but Mary does not: \n",
+ "enumeration_ask(Burglary, {JohnCalls: F, MaryCalls: T}, alarm_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.03368899586522123, T: 0.9663110041347788}"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# The probability of an Alarm, given that there is an Earthquake and Mary calls:\n",
+ "enumeration_ask(Alarm, {MaryCalls: T, Earthquake: T}, alarm_net)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Variable Elimination\n",
+ "\n",
+ "The `enumeration_ask` algorithm takes time and space that is exponential in the number of variables. That is, first it creates the joint distribution, of size *bn *, and then it sums out the values for the rows that match the evidence. We can do better than that if we interleave the joining of variables with the summing out of values.\n",
+ "This approach is called *variable elimination*. The key insight is that\n",
+ "when we compute\n",
+ "\n",
+ "P(*X*1 =*x*1 , ..., *X**n* =*x**n* ) = Π *i* P(*X**i* = *x**i* | parents(*X**i* ))\n",
+ "\n",
+ "we are repeating the calculation of, say, P(*X**3* = *x**4* | parents(*X**3* ))\n",
+ "multiple times, across multiple rows of the joint distribution.\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "# TODO: Copy over and update Variable Elimination algorithm. Also, sampling algorithms."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "button": false,
+ "deletable": true,
+ "new_sheet": false,
+ "run_control": {
+ "read_only": false
+ }
+ },
+ "source": [
+ "# Example: Flu Net\n",
+ "\n",
+ "In this net, whether a patient gets the flu is dependent on whether they were vaccinated, and having the flu influences whether they get a fever or headache. Here `Fever` is a non-Boolean variable, with three values, `no`, `mild`, and `high`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {
+ "button": false,
+ "collapsed": false,
+ "deletable": true,
+ "new_sheet": false,
+ "run_control": {
+ "read_only": false
+ }
+ },
+ "outputs": [],
+ "source": [
+ "flu_net = (BayesNet()\n",
+ " .add('Vaccinated', [], 0.60)\n",
+ " .add('Flu', ['Vaccinated'], {T: 0.002, F: 0.02})\n",
+ " .add('Fever', ['Flu'], {T: ProbDist(no=25, mild=25, high=50),\n",
+ " F: ProbDist(no=97, mild=2, high=1)})\n",
+ " .add('Headache', ['Flu'], {T: 0.5, F: 0.03}))\n",
+ "\n",
+ "globalize(flu_net.lookup)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.9616440110625343, T: 0.03835598893746573}"
+ ]
+ },
+ "execution_count": 29,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# If you just have a headache, you probably don't have the Flu.\n",
+ "enumeration_ask(Flu, {Headache: T, Fever: 'no'}, flu_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {
+ "button": false,
+ "collapsed": false,
+ "deletable": true,
+ "new_sheet": false,
+ "run_control": {
+ "read_only": false
+ }
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.9914651882096696, T: 0.008534811790330398}"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Even more so if you were vaccinated.\n",
+ "enumeration_ask(Flu, {Headache: T, Fever: 'no', Vaccinated: T}, flu_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.9194016377587207, T: 0.08059836224127925}"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# But if you were not vaccinated, there is a higher chance you have the flu.\n",
+ "enumeration_ask(Flu, {Headache: T, Fever: 'no', Vaccinated: F}, flu_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.1904145077720207, T: 0.8095854922279793}"
+ ]
+ },
+ "execution_count": 32,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# And if you have both headache and fever, and were not vaccinated, \n",
+ "# then the flu is very likely, especially if it is a high fever.\n",
+ "enumeration_ask(Flu, {Headache: T, Fever: 'mild', Vaccinated: F}, flu_net)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{F: 0.055534567434831886, T: 0.9444654325651682}"
+ ]
+ },
+ "execution_count": 33,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "enumeration_ask(Flu, {Headache: T, Fever: 'high', Vaccinated: F}, flu_net)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Entropy\n",
+ "\n",
+ "We can compute the entropy of a probability distribution:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "def entropy(probdist):\n",
+ " \"The entropy of a probability distribution.\"\n",
+ " return - sum(p * math.log(p, 2)\n",
+ " for p in probdist.values())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1.0"
+ ]
+ },
+ "execution_count": 35,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "entropy(ProbDist(heads=0.5, tails=0.5))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.011397802630112312"
+ ]
+ },
+ "execution_count": 36,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "entropy(ProbDist(yes=1000, no=1))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.8687212463394045"
+ ]
+ },
+ "execution_count": 37,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "entropy(P(Alarm, {Earthquake: T, Burglary: F}))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.011407757737461138"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "entropy(P(Alarm, {Earthquake: F, Burglary: F}))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For non-Boolean variables, the entropy can be greater than 1 bit:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1.5"
+ ]
+ },
+ "execution_count": 39,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "entropy(P(Fever, {Flu: T}))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": false
+ },
+ "source": [
+ "# Unknown Outcomes: Smoothing\n",
+ "\n",
+ "So far we have dealt with discrete distributions where we know all the possible outcomes in advance. For Boolean variables, the only outcomes are `T` and `F`. For `Fever`, we modeled exactly three outcomes. However, in some applications we will encounter new, previously unknown outcomes over time. For example, we could train a model on the distribution of words in English, and then somebody could coin a brand new word. To deal with this, we introduce\n",
+ "the `DefaultProbDist` distribution, which uses the key `None` to stand as a placeholder for any unknown outcome(s)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "class DefaultProbDist(ProbDist):\n",
+ " \"\"\"A Probability Distribution that supports smoothing for unknown outcomes (keys).\n",
+ " The default_value represents the probability of an unknown (previously unseen) key. \n",
+ " The key `None` stands for unknown outcomes.\"\"\"\n",
+ " def __init__(self, default_value, mapping=(), **kwargs):\n",
+ " self[None] = default_value\n",
+ " self.update(mapping, **kwargs)\n",
+ " normalize(self)\n",
+ " \n",
+ " def __missing__(self, key): return self[None] "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "import re\n",
+ "\n",
+ "def words(text): return re.findall(r'\\w+', text.lower())\n",
+ "\n",
+ "english = words('''This is a sample corpus of English prose. To get a better model, we would train on much\n",
+ "more text. But this should give you an idea of the process. So far we have dealt with discrete \n",
+ "distributions where we know all the possible outcomes in advance. For Boolean variables, the only \n",
+ "outcomes are T and F. For Fever, we modeled exactly three outcomes. However, in some applications we \n",
+ "will encounter new, previously unknown outcomes over time. For example, when we could train a model on the \n",
+ "words in this text, we get a distribution, but somebody could coin a brand new word. To deal with this, \n",
+ "we introduce the DefaultProbDist distribution, which uses the key `None` to stand as a placeholder for any \n",
+ "unknown outcomes. Probability theory allows us to compute the likelihood of certain events, given \n",
+ "assumptions about the components of the event. A Bayesian network, or Bayes net for short, is a data \n",
+ "structure to represent a joint probability distribution over several random variables, and do inference on it.''')\n",
+ "\n",
+ "E = DefaultProbDist(0.1, Counter(english))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.052295177222545036"
+ ]
+ },
+ "execution_count": 42,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# 'the' is a common word:\n",
+ "E['the']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.005810575246949448"
+ ]
+ },
+ "execution_count": 43,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# 'possible' is a less-common word:\n",
+ "E['possible']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.0005810575246949449"
+ ]
+ },
+ "execution_count": 44,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# 'impossible' was not seen in the training data, but still gets a non-zero probability ...\n",
+ "E['impossible']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.0005810575246949449"
+ ]
+ },
+ "execution_count": 45,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# ... as do other rare, previously unseen words:\n",
+ "E['llanfairpwllgwyngyll']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that this does not mean that 'impossible' and 'llanfairpwllgwyngyll' and all the other unknown words\n",
+ "*each* have probability 0.004.\n",
+ "Rather, it means that together, all the unknown words total probability 0.004. With that\n",
+ "interpretation, the sum of all the probabilities is still 1, as it should be. In the `DefaultProbDist`, the\n",
+ "unknown words are all represented by the key `None`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.0005810575246949449"
+ ]
+ },
+ "execution_count": 46,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "E[None]"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/probability4e.py b/probability4e.py
new file mode 100644
index 000000000..d413a55ae
--- /dev/null
+++ b/probability4e.py
@@ -0,0 +1,776 @@
+"""Probability models (Chapter 12-13)"""
+
+import copy
+import random
+from collections import defaultdict
+from functools import reduce
+
+import numpy as np
+
+from utils4e import product, probability, extend
+
+
+# ______________________________________________________________________________
+# Chapter 12 Qualifying Uncertainty
+# 12.1 Acting Under Uncertainty
+
+
+def DTAgentProgram(belief_state):
+ """A decision-theoretic agent. [Figure 12.1]"""
+
+ def program(percept):
+ belief_state.observe(program.action, percept)
+ program.action = max(belief_state.actions(), key=belief_state.expected_outcome_utility)
+ return program.action
+
+ program.action = None
+ return program
+
+
+# ______________________________________________________________________________
+# 12.2 Basic Probability Notation
+
+
+class ProbDist:
+ """A discrete probability distribution. You name the random variable
+ in the constructor, then assign and query probability of values.
+ >>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
+ 0.25
+ >>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500})
+ >>> P['lo'], P['med'], P['hi']
+ (0.125, 0.375, 0.5)
+ """
+
+ def __init__(self, varname='?', freqs=None):
+ """If freqs is given, it is a dictionary of values - frequency pairs,
+ then ProbDist is normalized."""
+ self.prob = {}
+ self.varname = varname
+ self.values = []
+ if freqs:
+ for (v, p) in freqs.items():
+ self[v] = p
+ self.normalize()
+
+ def __getitem__(self, val):
+ """Given a value, return P(value)."""
+ try:
+ return self.prob[val]
+ except KeyError:
+ return 0
+
+ def __setitem__(self, val, p):
+ """Set P(val) = p."""
+ if val not in self.values:
+ self.values.append(val)
+ self.prob[val] = p
+
+ def normalize(self):
+ """Make sure the probabilities of all values sum to 1.
+ Returns the normalized distribution.
+ Raises a ZeroDivisionError if the sum of the values is 0."""
+ total = sum(self.prob.values())
+ if not np.isclose(total, 1.0):
+ for val in self.prob:
+ self.prob[val] /= total
+ return self
+
+ def show_approx(self, numfmt='{:.3g}'):
+ """Show the probabilities rounded and sorted by key, for the
+ sake of portable doctests."""
+ return ', '.join([('{}: ' + numfmt).format(v, p)
+ for (v, p) in sorted(self.prob.items())])
+
+ def __repr__(self):
+ return "P({})".format(self.varname)
+
+
+# ______________________________________________________________________________
+# 12.3 Inference Using Full Joint Distributions
+
+
+class JointProbDist(ProbDist):
+ """A discrete probability distribute over a set of variables.
+ >>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25
+ >>> P[1, 1]
+ 0.25
+ >>> P[dict(X=0, Y=1)] = 0.5
+ >>> P[dict(X=0, Y=1)]
+ 0.5"""
+
+ def __init__(self, variables):
+ self.prob = {}
+ self.variables = variables
+ self.vals = defaultdict(list)
+
+ def __getitem__(self, values):
+ """Given a tuple or dict of values, return P(values)."""
+ values = event_values(values, self.variables)
+ return ProbDist.__getitem__(self, values)
+
+ def __setitem__(self, values, p):
+ """Set P(values) = p. Values can be a tuple or a dict; it must
+ have a value for each of the variables in the joint. Also keep track
+ of the values we have seen so far for each variable."""
+ values = event_values(values, self.variables)
+ self.prob[values] = p
+ for var, val in zip(self.variables, values):
+ if val not in self.vals[var]:
+ self.vals[var].append(val)
+
+ def values(self, var):
+ """Return the set of possible values for a variable."""
+ return self.vals[var]
+
+ def __repr__(self):
+ return "P({})".format(self.variables)
+
+
+def event_values(event, variables):
+ """Return a tuple of the values of variables in event.
+ >>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
+ (8, 10)
+ >>> event_values ((1, 2), ['C', 'A'])
+ (1, 2)
+ """
+ if isinstance(event, tuple) and len(event) == len(variables):
+ return event
+ else:
+ return tuple([event[var] for var in variables])
+
+
+def enumerate_joint_ask(X, e, P):
+ """Return a probability distribution over the values of the variable X,
+ given the {var:val} observations e, in the JointProbDist P. [Section 12.3]
+ >>> P = JointProbDist(['X', 'Y'])
+ >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125
+ >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()
+ '0: 0.667, 1: 0.167, 2: 0.167'
+ """
+ assert X not in e, "Query variable must be distinct from evidence"
+ Q = ProbDist(X) # probability distribution for X, initially empty
+ Y = [v for v in P.variables if v != X and v not in e] # hidden variables.
+ for xi in P.values(X):
+ Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)
+ return Q.normalize()
+
+
+def enumerate_joint(variables, e, P):
+ """Return the sum of those entries in P consistent with e,
+ provided variables is P's remaining variables (the ones not in e)."""
+ if not variables:
+ return P[e]
+ Y, rest = variables[0], variables[1:]
+ return sum([enumerate_joint(rest, extend(e, Y, y), P)
+ for y in P.values(Y)])
+
+
+# ______________________________________________________________________________
+# 12.4 Independence
+
+
+def is_independent(variables, P):
+ """
+ Return whether a list of variables are independent given their distribution P
+ P is an instance of JoinProbDist
+ >>> P = JointProbDist(['X', 'Y'])
+ >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[1,0] = 0.125
+ >>> is_independent(['X', 'Y'], P)
+ False
+ """
+ for var in variables:
+ event_vars = variables[:]
+ event_vars.remove(var)
+ event = {}
+ distribution = enumerate_joint_ask(var, event, P)
+ events = gen_possible_events(event_vars, P)
+ for e in events:
+ conditional_distr = enumerate_joint_ask(var, e, P)
+ if conditional_distr.prob != distribution.prob:
+ return False
+ return True
+
+
+def gen_possible_events(vars, P):
+ """Generate all possible events of a collection of vars according to distribution of P"""
+ events = []
+
+ def backtrack(vars, P, temp):
+ if not vars:
+ events.append(temp)
+ return
+ var = vars[0]
+ for val in P.values(var):
+ temp[var] = val
+ backtrack([v for v in vars if v != var], P, copy.copy(temp))
+
+ backtrack(vars, P, {})
+ return events
+
+
+# ______________________________________________________________________________
+# Chapter 13 Probabilistic Reasoning
+# 13.1 Representing Knowledge in an Uncertain Domain
+
+
+class BayesNet:
+ """Bayesian network containing only boolean-variable nodes."""
+
+ def __init__(self, node_specs=None):
+ """
+ Nodes must be ordered with parents before children.
+ :param node_specs: an nested iterable object, each element contains (variable name, parents name, cpt)
+ for each node
+ """
+
+ self.nodes = []
+ self.variables = []
+ node_specs = node_specs or []
+ for node_spec in node_specs:
+ self.add(node_spec)
+
+ def add(self, node_spec):
+ """
+ Add a node to the net. Its parents must already be in the
+ net, and its variable must not.
+ Initialize Bayes nodes by detecting the length of input node specs
+ """
+ if len(node_spec) >= 5:
+ node = ContinuousBayesNode(*node_spec)
+ else:
+ node = BayesNode(*node_spec)
+ assert node.variable not in self.variables
+ assert all((parent in self.variables) for parent in node.parents)
+ self.nodes.append(node)
+ self.variables.append(node.variable)
+ for parent in node.parents:
+ self.variable_node(parent).children.append(node)
+
+ def variable_node(self, var):
+ """
+ Return the node for the variable named var.
+ >>> burglary.variable_node('Burglary').variable
+ 'Burglary'
+ """
+ for n in self.nodes:
+ if n.variable == var:
+ return n
+ raise Exception("No such variable: {}".format(var))
+
+ def variable_values(self, var):
+ """Return the domain of var."""
+ return [True, False]
+
+ def __repr__(self):
+ return 'BayesNet({0!r})'.format(self.nodes)
+
+
+class BayesNode:
+ """
+ A conditional probability distribution for a boolean variable,
+ P(X | parents). Part of a BayesNet.
+ """
+
+ def __init__(self, X, parents, cpt):
+ """
+ :param X: variable name,
+ :param parents: a sequence of variable names or a space-separated string. Representing the names of parent nodes
+ :param cpt: the conditional probability table, takes one of these forms:
+
+ * A number, the unconditional probability P(X=true). You can
+ use this form when there are no parents.
+
+ * A dict {v: p, ...}, the conditional probability distribution
+ P(X=true | parent=v) = p. When there's just one parent.
+
+ * A dict {(v1, v2, ...): p, ...}, the distribution P(X=true |
+ parent1=v1, parent2=v2, ...) = p. Each key must have as many
+ values as there are parents. You can use this form always;
+ the first two are just conveniences.
+
+ In all cases the probability of X being false is left implicit,
+ since it follows from P(X=true).
+
+ >>> X = BayesNode('X', '', 0.2)
+ >>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7})
+ >>> Z = BayesNode('Z', 'P Q',
+ ... {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7})
+ """
+ if isinstance(parents, str):
+ parents = parents.split()
+
+ # We store the table always in the third form above.
+ if isinstance(cpt, (float, int)): # no parents, 0-tuple
+ cpt = {(): cpt}
+ elif isinstance(cpt, dict):
+ # one parent, 1-tuple
+ if cpt and isinstance(list(cpt.keys())[0], bool):
+ cpt = {(v,): p for v, p in cpt.items()}
+
+ assert isinstance(cpt, dict)
+ for vs, p in cpt.items():
+ assert isinstance(vs, tuple) and len(vs) == len(parents)
+ assert all(isinstance(v, bool) for v in vs)
+ assert 0 <= p <= 1
+
+ self.variable = X
+ self.parents = parents
+ self.cpt = cpt
+ self.children = []
+
+ def p(self, value, event):
+ """
+ Return the conditional probability
+ P(X=value | parents=parent_values), where parent_values
+ are the values of parents in event. (event must assign each
+ parent a value.)
+ >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
+ >>> bn.p(False, {'Burglary': False, 'Earthquake': True})
+ 0.375
+ """
+ assert isinstance(value, bool)
+ ptrue = self.cpt[event_values(event, self.parents)]
+ return ptrue if value else 1 - ptrue
+
+ def sample(self, event):
+ """
+ Sample from the distribution for this variable conditioned
+ on event's values for parent_variables. That is, return True/False
+ at random according with the conditional probability given the
+ parents.
+ """
+ return probability(self.p(True, event))
+
+ def __repr__(self):
+ return repr((self.variable, ' '.join(self.parents)))
+
+
+# Burglary example [Figure 13 .2]
+
+
+T, F = True, False
+
+burglary = BayesNet([
+ ('Burglary', '', 0.001),
+ ('Earthquake', '', 0.002),
+ ('Alarm', 'Burglary Earthquake',
+ {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
+ ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
+ ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
+])
+
+
+# ______________________________________________________________________________
+# Section 13.2. The Semantics of Bayesian Networks
+# Bayesian nets with continuous variables
+
+
+def gaussian_probability(param, event, value):
+ """
+ Gaussian probability of a continuous Bayesian network node on condition of
+ certain event and the parameters determined by the event
+ :param param: parameters determined by discrete parent events of current node
+ :param event: a dict, continuous event of current node, the values are used
+ as parameters in calculating distribution
+ :param value: float, the value of current continuous node
+ :return: float, the calculated probability
+ >>> param = {'sigma':0.5, 'b':1, 'a':{'h1':0.5, 'h2': 1.5}}
+ >>> event = {'h1':0.6, 'h2': 0.3}
+ >>> gaussian_probability(param, event, 1)
+ 0.2590351913317835
+ """
+
+ assert isinstance(event, dict)
+ assert isinstance(param, dict)
+ buff = 0
+ for k, v in event.items():
+ # buffer varianle to calculate h1*a_h1 + h2*a_h2
+ buff += param['a'][k] * v
+ res = 1 / (param['sigma'] * np.sqrt(2 * np.pi)) * np.exp(-0.5 * ((value - buff - param['b']) / param['sigma']) ** 2)
+ return res
+
+
+def logistic_probability(param, event, value):
+ """
+ Logistic probability of a discrete node in Bayesian network with continuous parents,
+ :param param: a dict, parameters determined by discrete parents of current node
+ :param event: a dict, names and values of continuous parent variables of current node
+ :param value: boolean, True or False
+ :return: int, probability
+ """
+
+ buff = 1
+ for _, v in event.items():
+ # buffer variable to calculate (value-mu)/sigma
+
+ buff *= (v - param['mu']) / param['sigma']
+ p = 1 - 1 / (1 + np.exp(-4 / np.sqrt(2 * np.pi) * buff))
+ return p if value else 1 - p
+
+
+class ContinuousBayesNode:
+ """ A Bayesian network node with continuous distribution or with continuous distributed parents """
+
+ def __init__(self, name, d_parents, c_parents, parameters, type):
+ """
+ A continuous Bayesian node has two types of parents: discrete and continuous.
+ :param d_parents: str, name of discrete parents, value of which determines distribution parameters
+ :param c_parents: str, name of continuous parents, value of which is used to calculate distribution
+ :param parameters: a dict, parameters for distribution of current node, keys corresponds to discrete parents
+ :param type: str, type of current node's value, either 'd' (discrete) or 'c'(continuous)
+ """
+
+ self.parameters = parameters
+ self.type = type
+ self.d_parents = d_parents.split()
+ self.c_parents = c_parents.split()
+ self.parents = self.d_parents + self.c_parents
+ self.variable = name
+ self.children = []
+
+ def continuous_p(self, value, c_event, d_event):
+ """
+ Probability given the value of current node and its parents
+ :param c_event: event of continuous nodes
+ :param d_event: event of discrete nodes
+ """
+ assert isinstance(c_event, dict)
+ assert isinstance(d_event, dict)
+
+ d_event_vals = event_values(d_event, self.d_parents)
+ if len(d_event_vals) == 1:
+ d_event_vals = d_event_vals[0]
+ param = self.parameters[d_event_vals]
+ if self.type == "c":
+ p = gaussian_probability(param, c_event, value)
+ if self.type == "d":
+ p = logistic_probability(param, c_event, value)
+ return p
+
+
+# harvest-buy example. Figure 13.5
+
+
+harvest_buy = BayesNet([
+ ('Subsidy', '', 0.001),
+ ('Harvest', '', 0.002),
+ ('Cost', 'Subsidy', 'Harvest',
+ {True: {'sigma': 0.5, 'b': 1, 'a': {'Harvest': 0.5}},
+ False: {'sigma': 0.6, 'b': 1, 'a': {'Harvest': 0.5}}}, 'c'),
+ ('Buys', '', 'Cost', {T: {'mu': 0.5, 'sigma': 0.5}, F: {'mu': 0.6, 'sigma': 0.6}}, 'd')])
+
+
+# ______________________________________________________________________________
+# 13.3 Exact Inference in Bayesian Networks
+# 13.3.1 Inference by enumeration
+
+
+def enumeration_ask(X, e, bn):
+ """
+ Return the conditional probability distribution of variable X
+ given evidence e, from BayesNet bn. [Figure 13.10]
+ >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
+ ... ).show_approx()
+ 'False: 0.716, True: 0.284'
+ """
+
+ assert X not in e, "Query variable must be distinct from evidence"
+ Q = ProbDist(X)
+ for xi in bn.variable_values(X):
+ Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)
+ return Q.normalize()
+
+
+def enumerate_all(variables, e, bn):
+ """
+ Return the sum of those entries in P(variables | e{others})
+ consistent with e, where P is the joint distribution represented
+ by bn, and e{others} means e restricted to bn's other variables
+ (the ones other than variables). Parents must precede children in variables.
+ """
+
+ if not variables:
+ return 1.0
+ Y, rest = variables[0], variables[1:]
+ Ynode = bn.variable_node(Y)
+ if Y in e:
+ return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)
+ else:
+ return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)
+ for y in bn.variable_values(Y))
+
+
+# ______________________________________________________________________________
+# 13.3.2 The variable elimination algorithm
+
+
+def elimination_ask(X, e, bn):
+ """
+ Compute bn's P(X|e) by variable elimination. [Figure 13.12]
+ >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
+ ... ).show_approx()
+ 'False: 0.716, True: 0.284'
+ """
+ assert X not in e, "Query variable must be distinct from evidence"
+ factors = []
+ for var in reversed(bn.variables):
+ factors.append(make_factor(var, e, bn))
+ if is_hidden(var, X, e):
+ factors = sum_out(var, factors, bn)
+ return pointwise_product(factors, bn).normalize()
+
+
+def is_hidden(var, X, e):
+ """Is var a hidden variable when querying P(X|e)?"""
+ return var != X and var not in e
+
+
+def make_factor(var, e, bn):
+ """
+ Return the factor for var in bn's joint distribution given e.
+ That is, bn's full joint distribution, projected to accord with e,
+ is the pointwise product of these factors for bn's variables.
+ """
+ node = bn.variable_node(var)
+ variables = [X for X in [var] + node.parents if X not in e]
+ cpt = {event_values(e1, variables): node.p(e1[var], e1)
+ for e1 in all_events(variables, bn, e)}
+ return Factor(variables, cpt)
+
+
+def pointwise_product(factors, bn):
+ return reduce(lambda f, g: f.pointwise_product(g, bn), factors)
+
+
+def sum_out(var, factors, bn):
+ """Eliminate var from all factors by summing over its values."""
+ result, var_factors = [], []
+ for f in factors:
+ (var_factors if var in f.variables else result).append(f)
+ result.append(pointwise_product(var_factors, bn).sum_out(var, bn))
+ return result
+
+
+class Factor:
+ """A factor in a joint distribution."""
+
+ def __init__(self, variables, cpt):
+ self.variables = variables
+ self.cpt = cpt
+
+ def pointwise_product(self, other, bn):
+ """Multiply two factors, combining their variables."""
+ variables = list(set(self.variables) | set(other.variables))
+ cpt = {event_values(e, variables): self.p(e) * other.p(e)
+ for e in all_events(variables, bn, {})}
+ return Factor(variables, cpt)
+
+ def sum_out(self, var, bn):
+ """Make a factor eliminating var by summing over its values."""
+ variables = [X for X in self.variables if X != var]
+ cpt = {event_values(e, variables): sum(self.p(extend(e, var, val))
+ for val in bn.variable_values(var))
+ for e in all_events(variables, bn, {})}
+ return Factor(variables, cpt)
+
+ def normalize(self):
+ """Return my probabilities; must be down to one variable."""
+ assert len(self.variables) == 1
+ return ProbDist(self.variables[0],
+ {k: v for ((k,), v) in self.cpt.items()})
+
+ def p(self, e):
+ """Look up my value tabulated for e."""
+ return self.cpt[event_values(e, self.variables)]
+
+
+def all_events(variables, bn, e):
+ """Yield every way of extending e with values for all variables."""
+ if not variables:
+ yield e
+ else:
+ X, rest = variables[0], variables[1:]
+ for e1 in all_events(rest, bn, e):
+ for x in bn.variable_values(X):
+ yield extend(e1, X, x)
+
+
+# ______________________________________________________________________________
+# 13.3.4 Clustering algorithms
+# [Figure 13.14a]: sprinkler network
+
+
+sprinkler = BayesNet([
+ ('Cloudy', '', 0.5),
+ ('Sprinkler', 'Cloudy', {T: 0.10, F: 0.50}),
+ ('Rain', 'Cloudy', {T: 0.80, F: 0.20}),
+ ('WetGrass', 'Sprinkler Rain',
+ {(T, T): 0.99, (T, F): 0.90, (F, T): 0.90, (F, F): 0.00})])
+
+
+# ______________________________________________________________________________
+# 13.4 Approximate Inference for Bayesian Networks
+# 13.4.1 Direct sampling methods
+
+
+def prior_sample(bn):
+ """
+ Randomly sample from bn's full joint distribution. The result
+ is a {variable: value} dict. [Figure 13.15]
+ """
+ event = {}
+ for node in bn.nodes:
+ event[node.variable] = node.sample(event)
+ return event
+
+
+# _________________________________________________________________________
+
+
+def rejection_sampling(X, e, bn, N=10000):
+ """
+ [Figure 13.16]
+ Estimate the probability distribution of variable X given
+ evidence e in BayesNet bn, using N samples.
+ Raises a ZeroDivisionError if all the N samples are rejected,
+ i.e., inconsistent with e.
+ >>> random.seed(47)
+ >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T),
+ ... burglary, 10000).show_approx()
+ 'False: 0.7, True: 0.3'
+ """
+ counts = {x: 0 for x in bn.variable_values(X)} # bold N in [Figure 13.16]
+ for j in range(N):
+ sample = prior_sample(bn) # boldface x in [Figure 13.16]
+ if consistent_with(sample, e):
+ counts[sample[X]] += 1
+ return ProbDist(X, counts)
+
+
+def consistent_with(event, evidence):
+ """Is event consistent with the given evidence?"""
+ return all(evidence.get(k, v) == v
+ for k, v in event.items())
+
+
+# _________________________________________________________________________
+
+
+def likelihood_weighting(X, e, bn, N=10000):
+ """
+ [Figure 13.17]
+ Estimate the probability distribution of variable X given
+ evidence e in BayesNet bn.
+ >>> random.seed(1017)
+ >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T),
+ ... burglary, 10000).show_approx()
+ 'False: 0.702, True: 0.298'
+ """
+
+ W = {x: 0 for x in bn.variable_values(X)}
+ for j in range(N):
+ sample, weight = weighted_sample(bn, e) # boldface x, w in [Figure 14.15]
+ W[sample[X]] += weight
+ return ProbDist(X, W)
+
+
+def weighted_sample(bn, e):
+ """
+ Sample an event from bn that's consistent with the evidence e;
+ return the event and its weight, the likelihood that the event
+ accords to the evidence.
+ """
+
+ w = 1
+ event = dict(e) # boldface x in [Figure 13.17]
+ for node in bn.nodes:
+ Xi = node.variable
+ if Xi in e:
+ w *= node.p(e[Xi], event)
+ else:
+ event[Xi] = node.sample(event)
+ return event, w
+
+
+# _________________________________________________________________________
+# 13.4.2 Inference by Markov chain simulation
+
+
+def gibbs_ask(X, e, bn, N=1000):
+ """[Figure 13.19]"""
+ assert X not in e, "Query variable must be distinct from evidence"
+ counts = {x: 0 for x in bn.variable_values(X)} # bold N in [Figure 14.16]
+ Z = [var for var in bn.variables if var not in e]
+ state = dict(e) # boldface x in [Figure 14.16]
+ for Zi in Z:
+ state[Zi] = random.choice(bn.variable_values(Zi))
+ for j in range(N):
+ for Zi in Z:
+ state[Zi] = markov_blanket_sample(Zi, state, bn)
+ counts[state[X]] += 1
+ return ProbDist(X, counts)
+
+
+def markov_blanket_sample(X, e, bn):
+ """
+ Return a sample from P(X | mb) where mb denotes that the
+ variables in the Markov blanket of X take their values from event
+ e (which must assign a value to each). The Markov blanket of X is
+ X's parents, children, and children's parents.
+ """
+ Xnode = bn.variable_node(X)
+ Q = ProbDist(X)
+ for xi in bn.variable_values(X):
+ ei = extend(e, X, xi)
+ # [Equation 13.12:]
+ Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)
+ for Yj in Xnode.children)
+ # (assuming a Boolean variable here)
+ return probability(Q.normalize()[True])
+
+
+# _________________________________________________________________________
+# 13.4.3 Compiling approximate inference
+
+
+class complied_burglary:
+ """compiled version of burglary network"""
+
+ def Burglary(self, sample):
+ if sample['Alarm']:
+ if sample['Earthquake']:
+ return probability(0.00327)
+ else:
+ return probability(0.485)
+ else:
+ if sample['Earthquake']:
+ return probability(7.05e-05)
+ else:
+ return probability(6.01e-05)
+
+ def Earthquake(self, sample):
+ if sample['Alarm']:
+ if sample['Burglary']:
+ return probability(0.0020212)
+ else:
+ return probability(0.36755)
+ else:
+ if sample['Burglary']:
+ return probability(0.0016672)
+ else:
+ return probability(0.0014222)
+
+ def MaryCalls(self, sample):
+ if sample['Alarm']:
+ return probability(0.7)
+ else:
+ return probability(0.01)
+
+ def JongCalls(self, sample):
+ if sample['Alarm']:
+ return probability(0.9)
+ else:
+ return probability(0.05)
+
+ def Alarm(self, sample):
+ raise NotImplementedError
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 000000000..1561b6fe6
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+filterwarnings =
+ ignore::DeprecationWarning
+ ignore::UserWarning
+ ignore::RuntimeWarning
diff --git a/reinforcement_learning.ipynb b/reinforcement_learning.ipynb
new file mode 100644
index 000000000..ee3b6a5eb
--- /dev/null
+++ b/reinforcement_learning.ipynb
@@ -0,0 +1,644 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Reinforcement Learning\n",
+ "\n",
+ "This Jupyter notebook acts as supporting material for **Chapter 21 Reinforcement Learning** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in `rl.py` module. We also make use of implementation of MDPs in the `mdp.py` module to test our agents. It might be helpful if you have already gone through the Jupyter notebook dealing with Markov decision process. Let us import everything from the `rl` module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory Jupyter notebook for more details."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "from reinforcement_learning import *"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## CONTENTS\n",
+ "\n",
+ "* Overview\n",
+ "* Passive Reinforcement Learning\n",
+ " - Direct Utility Estimation\n",
+ " - Adaptive Dynamic Programming\n",
+ " - Temporal-Difference Agent\n",
+ "* Active Reinforcement Learning\n",
+ " - Q learning"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "## OVERVIEW\n",
+ "\n",
+ "Before we start playing with the actual implementations let us review a couple of things about RL.\n",
+ "\n",
+ "1. Reinforcement Learning is concerned with how software agents ought to take actions in an environment so as to maximize some notion of cumulative reward. \n",
+ "\n",
+ "2. Reinforcement learning differs from standard supervised learning in that correct input/output pairs are never presented, nor sub-optimal actions explicitly corrected. Further, there is a focus on on-line performance, which involves finding a balance between exploration (of uncharted territory) and exploitation (of current knowledge).\n",
+ "\n",
+ "-- Source: [Wikipedia](https://en.wikipedia.org/wiki/Reinforcement_learning)\n",
+ "\n",
+ "In summary we have a sequence of state action transitions with rewards associated with some states. Our goal is to find the optimal policy $\\pi$ which tells us what action to take in each state."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## PASSIVE REINFORCEMENT LEARNING\n",
+ "\n",
+ "In passive Reinforcement Learning the agent follows a fixed policy $\\pi$. Passive learning attempts to evaluate the given policy $pi$ - without any knowledge of the Reward function $R(s)$ and the Transition model $P(s'\\ |\\ s, a)$.\n",
+ "\n",
+ "This is usually done by some method of **utility estimation**. The agent attempts to directly learn the utility of each state that would result from following the policy. Note that at each step, it has to *perceive* the reward and the state - it has no global knowledge of these. Thus, if a certain the entire set of actions offers a very low probability of attaining some state $s_+$ - the agent may never perceive the reward $R(s_+)$.\n",
+ "\n",
+ "Consider a situation where an agent is given a policy to follow. Thus, at any point it knows only its current state and current reward, and the action it must take next. This action may lead it to more than one state, with different probabilities.\n",
+ "\n",
+ "For a series of actions given by $\\pi$, the estimated utility $U$:\n",
+ "$$U^{\\pi}(s) = E(\\sum_{t=0}^\\inf \\gamma^t R^t(s')$$)\n",
+ "Or the expected value of summed discounted rewards until termination.\n",
+ "\n",
+ "Based on this concept, we discuss three methods of estimating utility:\n",
+ "\n",
+ "1. **Direct Utility Estimation (DUE)**\n",
+ " \n",
+ " The first, most naive method of estimating utility comes from the simplest interpretation of the above definition. We construct an agent that follows the policy until it reaches the terminal state. At each step, it logs its current state, reward. Once it reaches the terminal state, it can estimate the utility for each state for *that* iteration, by simply summing the discounted rewards from that state to the terminal one.\n",
+ "\n",
+ " It can now run this 'simulation' $n$ times, and calculate the average utility of each state. If a state occurs more than once in a simulation, both its utility values are counted separately.\n",
+ " \n",
+ " Note that this method may be prohibitively slow for very large statespaces. Besides, **it pays no attention to the transition probability $P(s'\\ |\\ s, a)$.** It misses out on information that it is capable of collecting (say, by recording the number of times an action from one state led to another state). The next method addresses this issue.\n",
+ " \n",
+ "2. **Adaptive Dynamic Programming (ADP)**\n",
+ " \n",
+ " This method makes use of knowledge of the past state $s$, the action $a$, and the new perceived state $s'$ to estimate the transition probability $P(s'\\ |\\ s,a)$. It does this by the simple counting of new states resulting from previous states and actions. \n",
+ " The program runs through the policy a number of times, keeping track of:\n",
+ " - each occurrence of state $s$ and the policy-recommended action $a$ in $N_{sa}$\n",
+ " - each occurrence of $s'$ resulting from $a$ on $s$ in $N_{s'|sa}$.\n",
+ " \n",
+ " It can thus estimate $P(s'\\ |\\ s,a)$ as $N_{s'|sa}/N_{sa}$, which in the limit of infinite trials, will converge to the true value. \n",
+ " Using the transition probabilities thus estimated, it can apply `POLICY-EVALUATION` to estimate the utilities $U(s)$ using properties of convergence of the Bellman functions.\n",
+ "\n",
+ "3. **Temporal-difference learning (TD)**\n",
+ " \n",
+ " Instead of explicitly building the transition model $P$, the temporal-difference model makes use of the expected closeness between the utilities of two consecutive states $s$ and $s'$.\n",
+ " For the transition $s$ to $s'$, the update is written as:\n",
+ "$$U^{\\pi}(s) \\leftarrow U^{\\pi}(s) + \\alpha \\left( R(s) + \\gamma U^{\\pi}(s') - U^{\\pi}(s) \\right)$$\n",
+ " This model implicitly incorporates the transition probabilities by being weighed for each state by the number of times it is achieved from the current state. Thus, over a number of iterations, it converges similarly to the Bellman equations.\n",
+ " The advantage of the TD learning model is its relatively simple computation at each step, rather than having to keep track of various counts.\n",
+ " For $n_s$ states and $n_a$ actions the ADP model would have $n_s \\times n_a$ numbers $N_{sa}$ and $n_s^2 \\times n_a$ numbers $N_{s'|sa}$ to keep track of. The TD model must only keep track of a utility $U(s)$ for each state."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Demonstrating Passive agents\n",
+ "\n",
+ "Passive agents are implemented in `rl.py` as various `Agent-Class`es.\n",
+ "\n",
+ "To demonstrate these agents, we make use of the `GridMDP` object from the `MDP` module. `sequential_decision_environment` is similar to that used for the `MDP` notebook but has discounting with $\\gamma = 0.9$.\n",
+ "\n",
+ "The `Agent-Program` can be obtained by creating an instance of the relevant `Agent-Class`. The `__call__` method allows the `Agent-Class` to be called as a function. The class needs to be instantiated with a policy ($\\pi$) and an `MDP` whose utility of states will be estimated."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "from mdp import sequential_decision_environment"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `sequential_decision_environment` is a GridMDP object as shown below. The rewards are **+1** and **-1** in the terminal states, and **-0.04** in the rest. Now we define actions and a policy similar to **Fig 21.1** in the book."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "# Action Directions\n",
+ "north = (0, 1)\n",
+ "south = (0,-1)\n",
+ "west = (-1, 0)\n",
+ "east = (1, 0)\n",
+ "\n",
+ "policy = {\n",
+ " (0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None,\n",
+ " (0, 1): north, (2, 1): north, (3, 1): None,\n",
+ " (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west, \n",
+ "}\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Direction Utility Estimation Agent\n",
+ "\n",
+ "The `PassiveDEUAgent` class in the `rl` module implements the Agent Program described in **Fig 21.2** of the AIMA Book. `PassiveDEUAgent` sums over rewards to find the estimated utility for each state. It thus requires the running of a number of iterations."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%psource PassiveDUEAgent"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "DUEagent = PassiveDUEAgent(policy, sequential_decision_environment)\n",
+ "for i in range(200):\n",
+ " run_single_trial(DUEagent, sequential_decision_environment)\n",
+ " DUEagent.estimate_U()\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The calculated utilities are:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('\\n'.join([str(k)+':'+str(v) for k, v in DUEagent.U.items()]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Adaptive Dynamic Programming Agent\n",
+ "\n",
+ "The `PassiveADPAgent` class in the `rl` module implements the Agent Program described in **Fig 21.2** of the AIMA Book. `PassiveADPAgent` uses state transition and occurrence counts to estimate $P$, and then $U$. Go through the source below to understand the agent."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%psource PassiveADPAgent"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We instantiate a `PassiveADPAgent` below with the `GridMDP` shown and train it over 200 iterations. The `rl` module has a simple implementation to simulate iterations. The function is called **run_single_trial**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "ADPagent = PassiveADPAgent(policy, sequential_decision_environment)\n",
+ "for i in range(200):\n",
+ " run_single_trial(ADPagent, sequential_decision_environment)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The calculated utilities are:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "print('\\n'.join([str(k)+':'+str(v) for k, v in ADPagent.U.items()]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Passive Temporal Difference Agent\n",
+ "\n",
+ "`PassiveTDAgent` uses temporal differences to learn utility estimates. We learn the difference between the states and backup the values to previous states. Let us look into the source before we see some usage examples."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%psource PassiveTDAgent"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In creating the `TDAgent`, we use the **same learning rate** $\\alpha$ as given in the footnote of the book on **page 837**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "TDagent = PassiveTDAgent(policy, sequential_decision_environment, alpha = lambda n: 60./(59+n))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we run **200 trials** for the agent to estimate Utilities."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "for i in range(200):\n",
+ " run_single_trial(TDagent,sequential_decision_environment)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The calculated utilities are:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print('\\n'.join([str(k)+':'+str(v) for k, v in TDagent.U.items()]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Comparison with value iteration method\n",
+ "\n",
+ "We can also compare the utility estimates learned by our agent to those obtained via **value iteration**.\n",
+ "\n",
+ "**Note that value iteration has a priori knowledge of the transition table $P$, the rewards $R$, and all the states $s$.**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "from mdp import value_iteration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The values calculated by value iteration:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "U_values = value_iteration(sequential_decision_environment)\n",
+ "print('\\n'.join([str(k)+':'+str(v) for k, v in U_values.items()]))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Evolution of utility estimates over iterations\n",
+ "\n",
+ "We can explore how these estimates vary with time by using plots similar to **Fig 21.5a**. We will first enable matplotlib using the inline backend. We also define a function to collect the values of utilities at each iteration."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "def graph_utility_estimates(agent_program, mdp, no_of_iterations, states_to_graph):\n",
+ " graphs = {state:[] for state in states_to_graph}\n",
+ " for iteration in range(1,no_of_iterations+1):\n",
+ " run_single_trial(agent_program, mdp)\n",
+ " for state in states_to_graph:\n",
+ " graphs[state].append((iteration, agent_program.U[state]))\n",
+ " for state, value in graphs.items():\n",
+ " state_x, state_y = zip(*value)\n",
+ " plt.plot(state_x, state_y, label=str(state))\n",
+ " plt.ylim([0,1.2])\n",
+ " plt.legend(loc='lower right')\n",
+ " plt.xlabel('Iterations')\n",
+ " plt.ylabel('U')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here is a plot of state $(2,2)$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60./(59+n))\n",
+ "graph_utility_estimates(agent, sequential_decision_environment, 500, [(2,2)])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is also possible to plot multiple states on the same plot. As expected, the utility of the finite state $(3,2)$ stays constant and is equal to $R((3,2)) = 1$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "graph_utility_estimates(agent, sequential_decision_environment, 500, [(2,2), (3,2)])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "## ACTIVE REINFORCEMENT LEARNING\n",
+ "\n",
+ "Unlike Passive Reinforcement Learning in Active Reinforcement Learning we are not bound by a policy pi and we need to select our actions. In other words the agent needs to learn an optimal policy. The fundamental tradeoff the agent needs to face is that of exploration vs. exploitation. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### QLearning Agent\n",
+ "\n",
+ "The QLearningAgent class in the rl module implements the Agent Program described in **Fig 21.8** of the AIMA Book. In Q-Learning the agent learns an action-value function Q which gives the utility of taking a given action in a particular state. Q-Learning does not required a transition model and hence is a model free method. Let us look into the source before we see some usage examples."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%psource QLearningAgent"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The Agent Program can be obtained by creating the instance of the class by passing the appropriate parameters. Because of the __ call __ method the object that is created behaves like a callable and returns an appropriate action as most Agent Programs do. To instantiate the object we need a mdp similar to the PassiveTDAgent.\n",
+ "\n",
+ " Let us use the same GridMDP object we used above. **Figure 17.1 (sequential_decision_environment)** is similar to **Figure 21.1** but has some discounting as **gamma = 0.9**. The class also implements an exploration function **f** which returns fixed **Rplus** until agent has visited state, action **Ne** number of times. This is the same as the one defined on page **842** of the book. The method **actions_in_state** returns actions possible in given state. It is useful when applying max and argmax operations."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us create our object now. We also use the **same alpha** as given in the footnote of the book on **page 837**. We use **Rplus = 2** and **Ne = 5** as defined on page 843. **Fig 21.7** "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, \n",
+ " alpha=lambda n: 60./(59+n))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now to try out the q_agent we make use of the **run_single_trial** function in rl.py (which was also used above). Let us use **200** iterations."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "for i in range(200):\n",
+ " run_single_trial(q_agent,sequential_decision_environment)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let us see the Q Values. The keys are state-action pairs. Where different actions correspond according to:\n",
+ "\n",
+ "north = (0, 1)\n",
+ "south = (0,-1)\n",
+ "west = (-1, 0)\n",
+ "east = (1, 0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "q_agent.Q"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The Utility **U** of each state is related to **Q** by the following equation.\n",
+ "\n",
+ "**U (s) = max a Q(s, a)**\n",
+ "\n",
+ "Let us convert the Q Values above into U estimates.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "U = defaultdict(lambda: -1000.) # Very Large Negative Value for Comparison see below.\n",
+ "for state_action, value in q_agent.Q.items():\n",
+ " state, action = state_action\n",
+ " if U[state] < value:\n",
+ " U[state] = value"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "U"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us finally compare these estimates to value_iteration results."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(value_iteration(sequential_decision_environment))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.3"
+ },
+ "pycharm": {
+ "stem_cell": {
+ "cell_type": "raw",
+ "source": [],
+ "metadata": {
+ "collapsed": false
+ }
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
\ No newline at end of file
diff --git a/reinforcement_learning.py b/reinforcement_learning.py
new file mode 100644
index 000000000..4cb91af0f
--- /dev/null
+++ b/reinforcement_learning.py
@@ -0,0 +1,337 @@
+"""Reinforcement Learning (Chapter 21)"""
+
+import random
+from collections import defaultdict
+
+from mdp import MDP, policy_evaluation
+
+
+class PassiveDUEAgent:
+ """
+ Passive (non-learning) agent that uses direct utility estimation
+ on a given MDP and policy.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ agent = PassiveDUEAgent(policy, sequential_decision_environment)
+ for i in range(200):
+ run_single_trial(agent,sequential_decision_environment)
+ agent.estimate_U()
+ agent.U[(0, 0)] > 0.2
+ True
+ """
+
+ def __init__(self, pi, mdp):
+ self.pi = pi
+ self.mdp = mdp
+ self.U = {}
+ self.s = None
+ self.a = None
+ self.s_history = []
+ self.r_history = []
+ self.init = mdp.init
+
+ def __call__(self, percept):
+ s1, r1 = percept
+ self.s_history.append(s1)
+ self.r_history.append(r1)
+ ##
+ ##
+ if s1 in self.mdp.terminals:
+ self.s = self.a = None
+ else:
+ self.s, self.a = s1, self.pi[s1]
+ return self.a
+
+ def estimate_U(self):
+ # this function can be called only if the MDP has reached a terminal state
+ # it will also reset the mdp history
+ assert self.a is None, 'MDP is not in terminal state'
+ assert len(self.s_history) == len(self.r_history)
+ # calculating the utilities based on the current iteration
+ U2 = {s: [] for s in set(self.s_history)}
+ for i in range(len(self.s_history)):
+ s = self.s_history[i]
+ U2[s] += [sum(self.r_history[i:])]
+ U2 = {k: sum(v) / max(len(v), 1) for k, v in U2.items()}
+ # resetting history
+ self.s_history, self.r_history = [], []
+ # setting the new utilities to the average of the previous
+ # iteration and this one
+ for k in U2.keys():
+ if k in self.U.keys():
+ self.U[k] = (self.U[k] + U2[k]) / 2
+ else:
+ self.U[k] = U2[k]
+ return self.U
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)"""
+ return percept
+
+
+class PassiveADPAgent:
+ """
+ [Figure 21.2]
+ Passive (non-learning) agent that uses adaptive dynamic programming
+ on a given MDP and policy.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ agent = PassiveADPAgent(policy, sequential_decision_environment)
+ for i in range(100):
+ run_single_trial(agent,sequential_decision_environment)
+
+ agent.U[(0, 0)] > 0.2
+ True
+ agent.U[(0, 1)] > 0.2
+ True
+ """
+
+ class ModelMDP(MDP):
+ """Class for implementing modified Version of input MDP with
+ an editable transition model P and a custom function T."""
+
+ def __init__(self, init, actlist, terminals, gamma, states):
+ super().__init__(init, actlist, terminals, states=states, gamma=gamma)
+ nested_dict = lambda: defaultdict(nested_dict)
+ # StackOverflow:whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
+ self.P = nested_dict()
+
+ def T(self, s, a):
+ """Return a list of tuples with probabilities for states
+ based on the learnt model P."""
+ return [(prob, res) for (res, prob) in self.P[(s, a)].items()]
+
+ def __init__(self, pi, mdp):
+ self.pi = pi
+ self.mdp = PassiveADPAgent.ModelMDP(mdp.init, mdp.actlist,
+ mdp.terminals, mdp.gamma, mdp.states)
+ self.U = {}
+ self.Nsa = defaultdict(int)
+ self.Ns1_sa = defaultdict(int)
+ self.s = None
+ self.a = None
+ self.visited = set() # keeping track of visited states
+
+ def __call__(self, percept):
+ s1, r1 = percept
+ mdp = self.mdp
+ R, P, terminals, pi = mdp.reward, mdp.P, mdp.terminals, self.pi
+ s, a, Nsa, Ns1_sa, U = self.s, self.a, self.Nsa, self.Ns1_sa, self.U
+
+ if s1 not in self.visited: # Reward is only known for visited state.
+ U[s1] = R[s1] = r1
+ self.visited.add(s1)
+ if s is not None:
+ Nsa[(s, a)] += 1
+ Ns1_sa[(s1, s, a)] += 1
+ # for each t such that Ns′|sa [t, s, a] is nonzero
+ for t in [res for (res, state, act), freq in Ns1_sa.items()
+ if (state, act) == (s, a) and freq != 0]:
+ P[(s, a)][t] = Ns1_sa[(t, s, a)] / Nsa[(s, a)]
+
+ self.U = policy_evaluation(pi, U, mdp)
+ ##
+ ##
+ self.Nsa, self.Ns1_sa = Nsa, Ns1_sa
+ if s1 in terminals:
+ self.s = self.a = None
+ else:
+ self.s, self.a = s1, self.pi[s1]
+ return self.a
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)."""
+ return percept
+
+
+class PassiveTDAgent:
+ """
+ [Figure 21.4]
+ The abstract class for a Passive (non-learning) agent that uses
+ temporal differences to learn utility estimates. Override update_state
+ method to convert percept to state and reward. The mdp being provided
+ should be an instance of a subclass of the MDP Class.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60./(59+n))
+ for i in range(200):
+ run_single_trial(agent,sequential_decision_environment)
+
+ agent.U[(0, 0)] > 0.2
+ True
+ agent.U[(0, 1)] > 0.2
+ True
+ """
+
+ def __init__(self, pi, mdp, alpha=None):
+
+ self.pi = pi
+ self.U = {s: 0. for s in mdp.states}
+ self.Ns = {s: 0 for s in mdp.states}
+ self.s = None
+ self.a = None
+ self.r = None
+ self.gamma = mdp.gamma
+ self.terminals = mdp.terminals
+
+ if alpha:
+ self.alpha = alpha
+ else:
+ self.alpha = lambda n: 1 / (1 + n) # udacity video
+
+ def __call__(self, percept):
+ s1, r1 = self.update_state(percept)
+ pi, U, Ns, s, r = self.pi, self.U, self.Ns, self.s, self.r
+ alpha, gamma, terminals = self.alpha, self.gamma, self.terminals
+ if not Ns[s1]:
+ U[s1] = r1
+ if s is not None:
+ Ns[s] += 1
+ U[s] += alpha(Ns[s]) * (r + gamma * U[s1] - U[s])
+ if s1 in terminals:
+ self.s = self.a = self.r = None
+ else:
+ self.s, self.a, self.r = s1, pi[s1], r1
+ return self.a
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)."""
+ return percept
+
+
+class QLearningAgent:
+ """
+ [Figure 21.8]
+ An exploratory Q-learning agent. It avoids having to learn the transition
+ model because the Q-value of a state can be related directly to those of
+ its neighbors.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, alpha=lambda n: 60./(59+n))
+ for i in range(200):
+ run_single_trial(q_agent,sequential_decision_environment)
+
+ q_agent.Q[((0, 1), (0, 1))] >= -0.5
+ True
+ q_agent.Q[((1, 0), (0, -1))] <= 0.5
+ True
+ """
+
+ def __init__(self, mdp, Ne, Rplus, alpha=None):
+
+ self.gamma = mdp.gamma
+ self.terminals = mdp.terminals
+ self.all_act = mdp.actlist
+ self.Ne = Ne # iteration limit in exploration function
+ self.Rplus = Rplus # large value to assign before iteration limit
+ self.Q = defaultdict(float)
+ self.Nsa = defaultdict(float)
+ self.s = None
+ self.a = None
+ self.r = None
+
+ if alpha:
+ self.alpha = alpha
+ else:
+ self.alpha = lambda n: 1. / (1 + n) # udacity video
+
+ def f(self, u, n):
+ """Exploration function. Returns fixed Rplus until
+ agent has visited state, action a Ne number of times.
+ Same as ADP agent in book."""
+ if n < self.Ne:
+ return self.Rplus
+ else:
+ return u
+
+ def actions_in_state(self, state):
+ """Return actions possible in given state.
+ Useful for max and argmax."""
+ if state in self.terminals:
+ return [None]
+ else:
+ return self.all_act
+
+ def __call__(self, percept):
+ s1, r1 = self.update_state(percept)
+ Q, Nsa, s, a, r = self.Q, self.Nsa, self.s, self.a, self.r
+ alpha, gamma, terminals = self.alpha, self.gamma, self.terminals,
+ actions_in_state = self.actions_in_state
+
+ if s in terminals:
+ Q[s, None] = r1
+ if s is not None:
+ Nsa[s, a] += 1
+ Q[s, a] += alpha(Nsa[s, a]) * (r + gamma * max(Q[s1, a1]
+ for a1 in actions_in_state(s1)) - Q[s, a])
+ if s in terminals:
+ self.s = self.a = self.r = None
+ else:
+ self.s, self.r = s1, r1
+ self.a = max(actions_in_state(s1), key=lambda a1: self.f(Q[s1, a1], Nsa[s1, a1]))
+ return self.a
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)."""
+ return percept
+
+
+def run_single_trial(agent_program, mdp):
+ """Execute trial for given agent_program
+ and mdp. mdp should be an instance of subclass
+ of mdp.MDP """
+
+ def take_single_action(mdp, s, a):
+ """
+ Select outcome of taking action a
+ in state s. Weighted Sampling.
+ """
+ x = random.uniform(0, 1)
+ cumulative_probability = 0.0
+ for probability_state in mdp.T(s, a):
+ probability, state = probability_state
+ cumulative_probability += probability
+ if x < cumulative_probability:
+ break
+ return state
+
+ current_state = mdp.init
+ while True:
+ current_reward = mdp.R(current_state)
+ percept = (current_state, current_reward)
+ next_action = agent_program(percept)
+ if next_action is None:
+ break
+ current_state = take_single_action(mdp, current_state, next_action)
diff --git a/reinforcement_learning4e.py b/reinforcement_learning4e.py
new file mode 100644
index 000000000..eaaba3e5a
--- /dev/null
+++ b/reinforcement_learning4e.py
@@ -0,0 +1,353 @@
+"""Reinforcement Learning (Chapter 21)"""
+
+import random
+from collections import defaultdict
+
+from mdp4e import MDP, policy_evaluation
+
+
+# _________________________________________
+# 21.2 Passive Reinforcement Learning
+# 21.2.1 Direct utility estimation
+
+
+class PassiveDUEAgent:
+ """
+ Passive (non-learning) agent that uses direct utility estimation
+ on a given MDP and policy.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ agent = PassiveDUEAgent(policy, sequential_decision_environment)
+ for i in range(200):
+ run_single_trial(agent,sequential_decision_environment)
+ agent.estimate_U()
+ agent.U[(0, 0)] > 0.2
+ True
+ """
+
+ def __init__(self, pi, mdp):
+ self.pi = pi
+ self.mdp = mdp
+ self.U = {}
+ self.s = None
+ self.a = None
+ self.s_history = []
+ self.r_history = []
+ self.init = mdp.init
+
+ def __call__(self, percept):
+ s1, r1 = percept
+ self.s_history.append(s1)
+ self.r_history.append(r1)
+ ##
+ ##
+ if s1 in self.mdp.terminals:
+ self.s = self.a = None
+ else:
+ self.s, self.a = s1, self.pi[s1]
+ return self.a
+
+ def estimate_U(self):
+ # this function can be called only if the MDP has reached a terminal state
+ # it will also reset the mdp history
+ assert self.a is None, 'MDP is not in terminal state'
+ assert len(self.s_history) == len(self.r_history)
+ # calculating the utilities based on the current iteration
+ U2 = {s: [] for s in set(self.s_history)}
+ for i in range(len(self.s_history)):
+ s = self.s_history[i]
+ U2[s] += [sum(self.r_history[i:])]
+ U2 = {k: sum(v) / max(len(v), 1) for k, v in U2.items()}
+ # resetting history
+ self.s_history, self.r_history = [], []
+ # setting the new utilities to the average of the previous
+ # iteration and this one
+ for k in U2.keys():
+ if k in self.U.keys():
+ self.U[k] = (self.U[k] + U2[k]) / 2
+ else:
+ self.U[k] = U2[k]
+ return self.U
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)"""
+ return percept
+
+
+# 21.2.2 Adaptive dynamic programming
+
+
+class PassiveADPAgent:
+ """
+ [Figure 21.2]
+ Passive (non-learning) agent that uses adaptive dynamic programming
+ on a given MDP and policy.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ agent = PassiveADPAgent(policy, sequential_decision_environment)
+ for i in range(100):
+ run_single_trial(agent,sequential_decision_environment)
+
+ agent.U[(0, 0)] > 0.2
+ True
+ agent.U[(0, 1)] > 0.2
+ True
+ """
+
+ class ModelMDP(MDP):
+ """Class for implementing modified Version of input MDP with
+ an editable transition model P and a custom function T."""
+
+ def __init__(self, init, actlist, terminals, gamma, states):
+ super().__init__(init, actlist, terminals, states=states, gamma=gamma)
+ nested_dict = lambda: defaultdict(nested_dict)
+ # StackOverflow:whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
+ self.P = nested_dict()
+
+ def T(self, s, a):
+ """Return a list of tuples with probabilities for states
+ based on the learnt model P."""
+ return [(prob, res) for (res, prob) in self.P[(s, a)].items()]
+
+ def __init__(self, pi, mdp):
+ self.pi = pi
+ self.mdp = PassiveADPAgent.ModelMDP(mdp.init, mdp.actlist,
+ mdp.terminals, mdp.gamma, mdp.states)
+ self.U = {}
+ self.Nsa = defaultdict(int)
+ self.Ns1_sa = defaultdict(int)
+ self.s = None
+ self.a = None
+ self.visited = set() # keeping track of visited states
+
+ def __call__(self, percept):
+ s1, r1 = percept
+ mdp = self.mdp
+ R, P, terminals, pi = mdp.reward, mdp.P, mdp.terminals, self.pi
+ s, a, Nsa, Ns1_sa, U = self.s, self.a, self.Nsa, self.Ns1_sa, self.U
+
+ if s1 not in self.visited: # Reward is only known for visited state.
+ U[s1] = R[s1] = r1
+ self.visited.add(s1)
+ if s is not None:
+ Nsa[(s, a)] += 1
+ Ns1_sa[(s1, s, a)] += 1
+ # for each t such that Ns′|sa [t, s, a] is nonzero
+ for t in [res for (res, state, act), freq in Ns1_sa.items()
+ if (state, act) == (s, a) and freq != 0]:
+ P[(s, a)][t] = Ns1_sa[(t, s, a)] / Nsa[(s, a)]
+
+ self.U = policy_evaluation(pi, U, mdp)
+ ##
+ ##
+ self.Nsa, self.Ns1_sa = Nsa, Ns1_sa
+ if s1 in terminals:
+ self.s = self.a = None
+ else:
+ self.s, self.a = s1, self.pi[s1]
+ return self.a
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)."""
+ return percept
+
+
+# 21.2.3 Temporal-difference learning
+
+
+class PassiveTDAgent:
+ """
+ [Figure 21.4]
+ The abstract class for a Passive (non-learning) agent that uses
+ temporal differences to learn utility estimates. Override update_state
+ method to convert percept to state and reward. The mdp being provided
+ should be an instance of a subclass of the MDP Class.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60./(59+n))
+ for i in range(200):
+ run_single_trial(agent,sequential_decision_environment)
+
+ agent.U[(0, 0)] > 0.2
+ True
+ agent.U[(0, 1)] > 0.2
+ True
+ """
+
+ def __init__(self, pi, mdp, alpha=None):
+
+ self.pi = pi
+ self.U = {s: 0. for s in mdp.states}
+ self.Ns = {s: 0 for s in mdp.states}
+ self.s = None
+ self.a = None
+ self.r = None
+ self.gamma = mdp.gamma
+ self.terminals = mdp.terminals
+
+ if alpha:
+ self.alpha = alpha
+ else:
+ self.alpha = lambda n: 1 / (1 + n) # udacity video
+
+ def __call__(self, percept):
+ s1, r1 = self.update_state(percept)
+ pi, U, Ns, s, r = self.pi, self.U, self.Ns, self.s, self.r
+ alpha, gamma, terminals = self.alpha, self.gamma, self.terminals
+ if not Ns[s1]:
+ U[s1] = r1
+ if s is not None:
+ Ns[s] += 1
+ U[s] += alpha(Ns[s]) * (r + gamma * U[s1] - U[s])
+ if s1 in terminals:
+ self.s = self.a = self.r = None
+ else:
+ self.s, self.a, self.r = s1, pi[s1], r1
+ return self.a
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)."""
+ return percept
+
+
+# __________________________________________
+# 21.3. Active Reinforcement Learning
+# 21.3.2 Learning an action-utility function
+
+
+class QLearningAgent:
+ """
+ [Figure 21.8]
+ An exploratory Q-learning agent. It avoids having to learn the transition
+ model because the Q-value of a state can be related directly to those of
+ its neighbors.
+
+ import sys
+ from mdp import sequential_decision_environment
+ north = (0, 1)
+ south = (0,-1)
+ west = (-1, 0)
+ east = (1, 0)
+ policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
+ (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
+ q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, alpha=lambda n: 60./(59+n))
+ for i in range(200):
+ run_single_trial(q_agent,sequential_decision_environment)
+
+ q_agent.Q[((0, 1), (0, 1))] >= -0.5
+ True
+ q_agent.Q[((1, 0), (0, -1))] <= 0.5
+ True
+ """
+
+ def __init__(self, mdp, Ne, Rplus, alpha=None):
+
+ self.gamma = mdp.gamma
+ self.terminals = mdp.terminals
+ self.all_act = mdp.actlist
+ self.Ne = Ne # iteration limit in exploration function
+ self.Rplus = Rplus # large value to assign before iteration limit
+ self.Q = defaultdict(float)
+ self.Nsa = defaultdict(float)
+ self.s = None
+ self.a = None
+ self.r = None
+
+ if alpha:
+ self.alpha = alpha
+ else:
+ self.alpha = lambda n: 1. / (1 + n) # udacity video
+
+ def f(self, u, n):
+ """Exploration function. Returns fixed Rplus until
+ agent has visited state, action a Ne number of times.
+ Same as ADP agent in book."""
+ if n < self.Ne:
+ return self.Rplus
+ else:
+ return u
+
+ def actions_in_state(self, state):
+ """Return actions possible in given state.
+ Useful for max and argmax."""
+ if state in self.terminals:
+ return [None]
+ else:
+ return self.all_act
+
+ def __call__(self, percept):
+ s1, r1 = self.update_state(percept)
+ Q, Nsa, s, a, r = self.Q, self.Nsa, self.s, self.a, self.r
+ alpha, gamma, terminals = self.alpha, self.gamma, self.terminals,
+ actions_in_state = self.actions_in_state
+
+ if s in terminals:
+ Q[s, None] = r1
+ if s is not None:
+ Nsa[s, a] += 1
+ Q[s, a] += alpha(Nsa[s, a]) * (r + gamma * max(Q[s1, a1]
+ for a1 in actions_in_state(s1)) - Q[s, a])
+ if s in terminals:
+ self.s = self.a = self.r = None
+ else:
+ self.s, self.r = s1, r1
+ self.a = max(actions_in_state(s1), key=lambda a1: self.f(Q[s1, a1], Nsa[s1, a1]))
+ return self.a
+
+ def update_state(self, percept):
+ """To be overridden in most cases. The default case
+ assumes the percept to be of type (state, reward)."""
+ return percept
+
+
+def run_single_trial(agent_program, mdp):
+ """Execute trial for given agent_program
+ and mdp. mdp should be an instance of subclass
+ of mdp.MDP """
+
+ def take_single_action(mdp, s, a):
+ """
+ Select outcome of taking action a
+ in state s. Weighted Sampling.
+ """
+ x = random.uniform(0, 1)
+ cumulative_probability = 0.0
+ for probability_state in mdp.T(s, a):
+ probability, state = probability_state
+ cumulative_probability += probability
+ if x < cumulative_probability:
+ break
+ return state
+
+ current_state = mdp.init
+ while True:
+ current_reward = mdp.R(current_state)
+ percept = (current_state, current_reward)
+ next_action = agent_program(percept)
+ if next_action is None:
+ break
+ current_state = take_single_action(mdp, current_state, next_action)
diff --git a/requirements.txt b/requirements.txt
index e69de29bb..dd6b1be8a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -0,0 +1,18 @@
+cvxopt
+image
+ipython
+ipythonblocks
+ipywidgets
+jupyter
+keras
+matplotlib
+networkx
+numpy
+opencv-python
+pandas
+pillow
+pytest-cov
+qpsolvers
+scipy
+sortedcontainers
+tensorflow
\ No newline at end of file
diff --git a/rl.ipynb b/rl.ipynb
deleted file mode 100644
index cc0c0b59e..000000000
--- a/rl.ipynb
+++ /dev/null
@@ -1,354 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "collapsed": false
- },
- "source": [
- "# Reinforcement Learning\n",
- "\n",
- "This IPy notebook acts as supporting material for **Chapter 21 Reinforcement Learning** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in rl.py module. We also make use of implementation of MDPs in the mdp.py module to test our agents. It might be helpful if you have already gone through the IPy notebook dealing with Markov decision process. Let us import everything from the rl module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from rl import *"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "collapsed": true
- },
- "source": [
- "## Review\n",
- "Before we start playing with the actual implementations let us review a couple of things about RL.\n",
- "\n",
- "1. Reinforcement Learning is concerned with how software agents ought to take actions in an environment so as to maximize some notion of cumulative reward. \n",
- "\n",
- "2. Reinforcement learning differs from standard supervised learning in that correct input/output pairs are never presented, nor sub-optimal actions explicitly corrected. Further, there is a focus on on-line performance, which involves finding a balance between exploration (of uncharted territory) and exploitation (of current knowledge).\n",
- "\n",
- "-- Source: [Wikipedia](https://en.wikipedia.org/wiki/Reinforcement_learning)\n",
- "\n",
- "In summary we have a sequence of state action transitions with rewards associated with some states. Our goal is to find the optimal policy (pi) which tells us what action to take in each state."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Passive Reinforcement Learning\n",
- "\n",
- "In passive Reinforcement Learning the agent follows a fixed policy and tries to learn the Reward function and the Transition model (if it is not aware of that).\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Passive Temporal Difference Agent\n",
- "\n",
- "The PassiveTDAgent class in the rl module implements the Agent Program (notice the usage of word Program) described in **Fig 21.4** of the AIMA Book. PassiveTDAgent uses temporal differences to learn utility estimates. In simple terms we learn the difference between the states and backup the values to previous states while following a fixed policy. Let us look into the source before we see some usage examples."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "%psource PassiveTDAgent"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The Agent Program can be obtained by creating the instance of the class by passing the appropriate parameters. Because of the __ call __ method the object that is created behaves like a callable and returns an appropriate action as most Agent Programs do. To instantiate the object we need a policy(pi) and a mdp whose utility of states will be estimated. Let us import a GridMDP object from the mdp module. **Fig[17, 1]** is similar to **Fig[21, 1]** but has some discounting as **gamma = 0.9**."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from mdp import Fig"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {
- "collapsed": false
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- ""
- ]
- },
- "execution_count": 4,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "Fig[17,1]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "**Fig[17,1]** is a GridMDP object and is similar to the grid shown in **Fig 21.1**. The rewards in the terminal states are **+1** and **-1** and **-0.04** in rest of the states. Now we define a policy similar to **Fig 21.1** in the book."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Action Directions\n",
- "north = (0, 1)\n",
- "south = (0,-1)\n",
- "west = (-1, 0)\n",
- "east = (1, 0)\n",
- "\n",
- "policy = {\n",
- " (0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None,\n",
- " (0, 1): north, (2, 1): north, (3, 1): None,\n",
- " (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west, \n",
- "}\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Let us create our object now. We also use the **same alpha** as given in the footnote of the book on **page 837**."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "our_agent = PassiveTDAgent(policy, Fig[17,1], alpha=lambda n: 60./(59+n))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The rl module also has a simple implementation to simulate iterations. The function is called **run_single_trial**. Now we can try our implementation. We can also compare the utility estimates learned by our agent to those obtained via **value iteration**.\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from mdp import value_iteration"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The values calculated by value iteration:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {
- "collapsed": false
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{(0, 1): 0.3984432178350045, (1, 2): 0.649585681261095, (3, 2): 1.0, (0, 0): 0.2962883154554812, (3, 0): 0.12987274656746342, (3, 1): -1.0, (2, 1): 0.48644001739269643, (2, 0): 0.3447542300124158, (2, 2): 0.7953620878466678, (1, 0): 0.25386699846479516, (0, 2): 0.5093943765842497}\n"
- ]
- }
- ],
- "source": [
- "print(value_iteration(Fig[17,1]))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now the values estimated by our agent after **200 trials**."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "collapsed": false
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{(0, 1): 0.43655093803808254, (1, 2): 0.7111433090760988, (3, 2): 1, (0, 0): 0.3220542204171776, (2, 0): 0.0, (3, 0): 0.0, (1, 0): 0.20098994088292488, (3, 1): 0.0, (2, 2): 0.8560074788087413, (2, 1): 0.6639270026362584, (0, 2): 0.5629080090683166}\n"
- ]
- }
- ],
- "source": [
- "for i in range(200):\n",
- " run_single_trial(our_agent,Fig[17,1])\n",
- "print(our_agent.U)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can also explore how these estimates vary with time by using plots similar to **Fig 21.5a**. To do so we define a function to help us with the same. We will first enable matplotlib using the inline backend."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline\n",
- "import matplotlib.pyplot as plt\n",
- "\n",
- "def graph_utility_estimates(agent_program, mdp, no_of_iterations, states_to_graph):\n",
- " graphs = {state:[] for state in states_to_graph}\n",
- " for iteration in range(1,no_of_iterations+1):\n",
- " run_single_trial(agent_program, mdp)\n",
- " for state in states_to_graph:\n",
- " graphs[state].append((iteration, agent_program.U[state]))\n",
- " for state, value in graphs.items():\n",
- " state_x, state_y = zip(*value)\n",
- " plt.plot(state_x, state_y, label=str(state))\n",
- " plt.ylim([0,1.2])\n",
- " plt.legend(loc='lower right')\n",
- " plt.xlabel('Iterations')\n",
- " plt.ylabel('U')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here is a plot of state (2,2)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {
- "collapsed": false
- },
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAEPCAYAAACp/QjLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXmYFNW5h99hFhZngAFZIrs77pq4RFGHEFFxTWJUXJMY\nlyQmepMYNLkxaG5y45WYiMZd401cuCpxQVFj1EEjAjGyyaYoIJswCMgOM1D3j2+OVd1VvU73LN2/\n93n66e6q6urTp6vO73zLOQeEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhChIHgJWAbMT7L8QmAnM\nAt4CDmmmcgkhhGhBjgcOJ7E4fBno0vj6FGBKcxRKCCFEyzOQxOIQpBpYlt+iCCGESId2LV2AAJcB\nE1u6EEIIIZqHgaS2HIYCczHrQQghRAtT1tIFwILQ92Mxh3VRBxx66KHezJkzm7VQQghRAMwEDsvm\ngy3tVuoP/A24CFiY6KCZM2fieZ4ensevfvWrFi9Da3moLlQXqovkD+DQbBvnfFsOjwMnArsDS4Ff\nAeWN++4FbsRcSXc3bqsHjspzmYQQQqQg3+IwMsX+7zY+hBBCtCJa2q0kMqSmpqali9BqUF34qC58\nVBe5oaSlC5AmXqP/TAghRJqUlJRAlu28LAchhBAhJA5CCCFCSByEEEKEkDgIIYQIIXEQQggRQuIg\nhBAihMRBCCFECImDEEKIEBIHIYQQISQOQgghQkgchBBChJA4CCGECCFxEEIIEULiIIQQIoTEQQgh\nRAiJgxBCiBASByGEECEkDkIIIUJIHIQQQoSQOAghhAghcRBCCBFC4iCEECKExEEIIUQIiYMQQogQ\nEgchhBAhJA5CCCFC5FscHgJWAbOTHDMW+ACYCRye5/IIIYRIg3yLw5+BU5LsHwHsDewDXAHcnefy\nCCGESIN8i8ObwLok+88E/rfx9VSgK9Arz2USQgiRgpaOOfQBlgbeLwP6tlBZhBBCNNLS4gBQEvfe\na5FSCCGE+JyyFv7+5UC/wPu+jdtCjB49+vPXNTU11NTU5LNcQgjR5qitraW2tjYn54rvteeDgcAE\n4OCIfSOAqxufjwH+2Pgcj+d5MiiEECITSkpKIMt2Pt+Ww+PAicDuWGzhV0B54757gYmYMCwENgPf\nznN5hBBCpEFzWA65QJaDEEJkSFMsh9YQkBZCCNHKkDgIIYQIIXEQQggRQuIghBAihMRBCCFECImD\nEEKIEBIHIYQQISQOQgghQkgchBBChJA4CCGECCFxEEIIEULiIIQQIoTEQQghRAiJgxBCiBASByGE\nECEkDkIIIUJIHIQQQoSQOAghhAghcRBCCBFC4iCEECKExEEIIUSIspYuQLqMGQPf/z785jeweDHs\n2gVlZXDXXVBV5R/nebavtDR8jsmT4aOP4KKLwvveew9eeMH2b98ODQ328Dz/mHPPhW98I+c/rSjZ\nsQMWLoT582H1ali/3h6bNsHOnf6jocF/HfwvvvpV+O53W678QhQ6JS1dgDTx+vb1uPlmeOAB+N73\nrPG/7jp45RUYPNg/8MgjYf/94b77oGNHf/vjj1tjsmULrFoFt99uQuP2XXstXHAB7LOPfa683L6j\nXaNt9frrUF8PDz4YXcAVK2CPPZL/iH/+08o+fboJWxTbtsHatanPBdZggpWzvh7+9Cd7TJ0K3bql\n/nxz43lWj3fcAa++Cr1723+1xx7Qtas9dtvNfk9pqdWRex38L2bNgn//G15+uWV/jxCtnZKSEmg7\n7XxWeNXVnjdihOc99pj3OYce6nkzZvjv16zxPGuC7OF5nrdyped9/LHn7bWX5735pud16uR555zj\n79+yxfO6d489TxR/+YvnXXRR9L6XXvLPl4gPPvC8Hj3suMWLo4/Ztcvz+vTxvM6dY7d//eue98IL\nsdvWr7dz/fa3ntfQ4HlnneV5Q4d6Xs+envf++8nL0hJs2GD1vu++nnf//Z5XV5f9ud54w/OGDMld\n2URx0NDgeRMnet4vf2n3Wkuya5fnTZ7seT/9qecdeaTnrViRn+8BvJStawLaTMxhwwZ46y34ylf8\nbRUV5gJyzJoFlZX++02b4Pe/h29+044dMgQGDICnnvKPGT/erI1DD03+/RUV5gqJ4sYbU5f/xhvN\nOhkyBBYtij7m0Udh+XI48EB/24QJ8Le/wbvvxh7705/a86JFcOutVj8vvww9e8LWranL05xs3Agn\nnmiWwcyZZsHtvnv25+vUySxAUTzs3Anvv594X6rPPvKI3Vc33gi33QZr1uS+jOlQX29ejUMOgW99\ny7wUDQ3WdrU22ow4dO9uLpeePf1t8Q32kiXmi3ZUVcEf/2hulsMPt239+9tz5872/NZbcMopqb8/\nkTgsXw7z5iX/bF0dTJwIV18NgwaFxeG66+DFF+GWW8zVVV9v2z0PbroJDjgg9viFC+GZZ0zYpkwx\ncXjgAXOFdezY+sTh+9+3+r/vPujQoenna43isH07fO1rFj8pVBYssPtr7drm/d5Fi6wDt99+sXGn\nbdvgJz+Bfv0Sf3buXPjyl+Huu83lOm2auaE//DB8bEODxTb79MnsHtq+HX79a4tpJuP55+Ggg+DJ\nJ82tPX8+3Hyz/TZXnoYG//5vadqMOPToYQ17ScB71r59rOWwZIn1DoL++oYGe3a98QEDzEpwvY1Z\ns1JbDZBYHF58EU47LbZc8bz0EgwdaoIULw47dljD/sc/wubNdq5t22zf3LkWHxk50vY57r4bLrsM\nvvhFmD0bTj0V9tzT9nXo4H++NTBpksVaxo5NXkeZkK44PPUUnHNO8mOWL4dPP216ma691gR78eKm\nn6s1EN/h+fe/4YQTLF62YEHzleONN+Dooy2JpHNnWLfOtq9ebffUokX2OqpBHT/eLNbLLrNrcNgw\nuwb33NMST4KsXWv30QsvWELLsmXplW/hQvjSl+DOOxPHwLZsMWv5P/7DROHvfzcPiLsf9trLxGHG\nDBOPMWPS++5802bEoWfPcA8hynIYMMACzAMG2LayMnNnOHHYd1+7yOvr7SKYPdtMvFQkEofp0+Go\no/xIRxQvvwwjRtjrgQNjxeHNN8399fe/w1lnWcPnei3jx1vjVlnpi4PnwdNPm2C4+jj3XP98ySyH\ns86CUaNS/9amsHkz/OpX/vvRo8362W233H1Hx46pxaG+3tyJ48cnPsbzLPvs/vubVp6XXzbLcP/9\n7ZpqCzzyiFl0UYwbZ9aqE7qPPoIzzoB777UGdskS+5+nTMlvGV991f6fxx6DH//Y7p0lS8wSP/54\nK8v48dCli2W6BXn4YbjmGuuYXXllbMfENcaOTz6B446zduCVV6yNWL7c9r33nlkSmzaFy/f22/Y5\nl0X58cfhY+rqTAi2bLG24pRTwp2kvfaCZ5+F4cOtA1xXl01t5Z42Iw7OcgjSvr012B9/DKef7ovD\nCSdYJkxZGZx/vjVOxx9vn7n2WlPm+nq7ACor08vsSSQOM2ea5dGuXeKGYfp0Mx3Bvuuzz/x9U6b4\nwnHiibGN++uvw8knW8PqxGHePLOGDjnEvvO118zacCQTh+eey79vc9w4M5U9D+bMMdN55Mjcfkc6\nlsOjj9pN3ru3vf/Tn6wXGmTCBHM5ut5oNtTXWwbaAw9AdXVq/3drYNUquPhiuOee8L5Fi+CHP7T7\nYskS+33nnAPXXw9nn2331+LF1uCedFJ0o5kL3HXz5JO+q7h/f9t+6qkm/P/1X9bQdusW+x8+8wz8\n/OcmLl/8Yvjce+7pi8P69SYyI0dafLKsDPr2Ncth8WK7/1asCN83//qXdbb+/Gf7//v3h6VLY49Z\nu9aEYehQux6D8dAgBx9s9TxxotV1vuo0U/ItDqcA84EPgKg+6+7AS8AM4D3gW4lO1LNnWBxcQHrV\nKgtWBdNJu3eHI46Av/4VfvQjXwBKS+1zACtX+o1HKqLEwfPM8nDi4BqGJ580cxOsfB995Kfbtm8f\n6/Z55x047zwrx/HH+26hnTvNlD/qKF8cHnzQGt6hQ/3ex9ChsWM6ErmVnLWSjpXUFJ580p63brXe\n6SWXWCwklzgBTGSpeZ656X7/ews8bttm8Z4//Sn2mF/+0oS5KeIwbpw1mCedZP9Dc4nD5MnRjXs6\n/OQn1ugddFDsds+DSy816/LMM63T9dvf2rX5wx/aMQMHWq986lQ49tjY5I5kZFIv27ZZ/Oa//xtq\navztAwZY527ffc3H76iu9v/D99+Hyy+3nvh++0Wf/4ADzGW7cydceKE14MGkkr59fREaNcpcQs89\nZ4kVV1xhscGzzzaL03Xs+vePtRx27LA6HD7c6jCZS3XffU2IvvQlE5CNG9Ovq3yST3EoBe7EBOIA\nYCQwOO6Yq4HpwGFADfB7EgzMu+46U+ggrsHets0eW7f67otu3ZI3/OXlJiY9eqT3Y6LEYfVqO0/3\n7tYwOMvh3HMtEwHsIhs0yEQBwnGSd96BY44xK6Z7d7/hmzMHvvAF+x1OHB57DP7v/+wiSkQiy8EF\ny1wMJh9s2mQB/qoq6zU980x+Bg06gd+2zXpks2fH7n/nHbvBRoywuvvLX2x79+7+MW+/bZ+/+OKw\nSyIVs2b5LqTf/Q5uuMEvV3OJw6hRFu8K8r3vQW2tP3gwKjj+5pv2GDPGMtyCPP201duPf2yN3Ztv\nWqzo/vv9xm3AAIs53HOP1e+0aSbAyepwwwbYe2/4xz/S+2033WTCddllsdsHDbLG8557Yhvb6mq7\n3hoazAJwQd5EHHyw3V+uDm67LXZ/376W5PHlL1vH8oADTBCOOMKskeuvhx/8wCwHR79+8MEHfpl/\n9jO73saMSS/W5o6pqioOy+EoYCGwGKgHxgFnxR2zEmjMG6Iz8CkQ2Xz17x9u7F1D68Rh2zY/G6Z7\n99TisHJl+imV8WmzYJ/v08dex7uVpk+357lzY1NTO3Twz7NxowVDBw3yB3h17Gi/Y/ZsOOww2+bE\nwTWC6YjD88/HNlRTp9oFnCgdNxe89poFD/v3N6tn/frkZW0KLjZz0UXWGAR54gkb0NiunYn/7bdb\nL/CTT/xjHn4YvvOdsEsiHe67zxrISZNMEE46ybZnKw7PPJNZauWbb5rYBxuRhQstJjBpkjVaFRXQ\nq1fs5xoarBf7n/9pHY+gODQ0wC9+YfvbtbP/8IEH4Kqr/GsczMd+zz3mihk82Nyc551nIpmI//kf\nu9/coNNkzJljFvKdd4b3XXml/W6XaehwlsNdd9m+q65K/h1VVdY2/OY3dh3EW7b77msW0tix9v68\n8+x1v34myP/6l98hcLhO6UMPWVD72Wft3JkmYbQmyyGf02f0AYJeuGXA0XHH3A+8BqwAqoBzyQDX\nm9++3R7t2vni8KUvJU8Jy0Yc4hvWFSvsJgPfrbRzp712PvFly2ID6UHLYfFiuwiDF1C7dla2OXNs\ntDbYhffRR3bu0aP9tNwoOnSw7z7jDOvhHnywbZ82zdxWuRSHW24xP6u7kSdNMhP9pZeswaup8UUv\n13TqZNkdYI3D5s1WT55n40JcILqqyoT6llus4QOrx2eftXhPXV1m4uB59lkw19X55/v/XzbisGWL\nuVD+8hezYlatsg5DfPpykDFjzDINWky332698+eeM3+65/mN3osv2m995BET1Keftv9lwwY7rqTE\n6qx7dz+te8AAa2jdeBpHdbU10mDiMHmyCYuzjONZu9ay655/Hr7+9cS/adcu60iNHm297nhhA2s4\no/z23bpZr/2OO0w402mQTzjBOl977RXeN3y41a1zP++xh7nVnGstETt2WEfw4ostKaa6OnU54qms\nbD2WQz7FIZ2ReT/H4g01wF7AK8ChQEg7R48e/fnrmpoaampqQm6loDhEzZ8UxImDy2pKRSJxcDEO\n51ZaudIu7E8+sYZi+fLE4rBokVkN8XToYBenc8nstpsFB084ITYTKIqOHf14h6sLz7Osi/POswB6\nrrj+ent24vD229YbmzrVxME1xvmgUydrpHv1skZv6VJrBD/80BpAl548d649Dx7sWw5vv22iPmiQ\nNWyZuJVcozFkiDXEt97q78tGHJzQuLjRrbdag/rQQ9HHf/qpuY7+/nffdblpk9XBI49YYsaoUdY5\nGDnSft/ll/vX4U9/6l8XZWV233TsaFbHD3/oN6zDh1vDn6yB69vX6uJHPzJ3ZxR33GH++cMPt9/l\nxCieCRNMPHr18t2A6VJdbUJ97rmxU+kk46GHEotISYkvDJlQXm6W1ZYtFtPJhqa6lWpra6mtrc3+\nBAHyKQ7LgWDyaT/MeghyLOCMzQ+BRcB+wDvxJwuKgyPoVtqxw/7URD2YeFzMISqbIYoocVi5Mmw5\nLF1qN+Gnn5rlsmKFxRTiywwmDgMHhr+rY0drhFzaqTNZo4Qk6rPOpeUaqhUr7By9ejXNcoi6sZ3L\nYccOE54jj7Se3Pr15rPNF506mRh861tmFbjRs6++atktQR/54MG+YHuepSu6mzcYzEyHiRPN1755\ns/W8993X35etOJSXWwabS1MOXi/xjB9vvfsvfMF3P0ycaJ8ZOtQa/iuuMLfJ6tX2W/fYw0R75MhY\nF0rnzvYbli2zzsPXvubvKytLbr2A1fFzz/mDzNavt7Rxx44dlgTwz39aucrKrN6iev9/+IMlDZx8\nsv23mVBdbXXhOivpkKsxN/Hcfnv6bVAU6bqVEoms6zg7brrppqzLks+YwzvAPsBAoAI4D3gu7pj5\ngBvT3AsThrjhKYkJupXALvx03RiZupVc2iyYn3Xr1mjLwYlDWZmfLhv02ca7laIa/I4dzVLYe297\n78QhPlsrig4dfHFwwef337fMjWRTgKTDBRdYui34QW/32+bNs4a4stLvmaZqXJpCx47mSrjkEnu/\ndKk1zJMmWSPpmDHDMqgqK+0/2rjRBGTYMNvfpYs1zOmOT3jxRROHYcOsMQuSqTjU15sFcMEFVoZZ\ns8x9mOw/euwxa+SDPUw32K9TJ7ve9tzTXpeXW6rl+edbFlJFRWyD4sThwQf9/ZniBOnQQ8NTvDz/\nvF0DTkC7d48ecDh9ulm7Y8b42T+ZcOCBNsAsnc5TvuncuenikI7lcPPN5oKLx43PgPSm9UlGPsWh\nActGehmYC/wfMA+4svEB8FvgS8BM4B/Az4C0B+cHLQfIbGqGbGMOnmcX/OOPhy2HXbsssNijh50/\nkTi48johicfdwG6qECcO6bjA3Fwt4DdUCxbYDdpUcRg3zh8r4EbJutjKnDl+aqS7QLNpbNLFTeEw\neLC5Yqqr7f+YOjW25+1m2AXrTS9ZYo3RccfZNjflSDo35Pr19tmaGkuBvPzy2P2ZisPkyebz3m8/\nO/fTT9v7+MQHR12did2pp/o9zC1bbBCey5wJjtnp1cvce2efHX0+Jw7jxzd9LMoxx4QHxf35z/Dt\nb/vvE4nDAw9YHCPblOdTT41177VlnDgkStMG60jcdpu5TD3P2oVZsywhpF8/c5ktWGDWXFPI93oO\nLzY+gtwbeL0GOCPbk1dUWEVmKw6ffhrOfEj2XTt2+D7s3Xc3d4RLj3RupXXrzLQuL7fjV66Mnc4j\naDnU1cXOFeVwWSTOCnJmdt++qcsZrAPXUC1caMHtpooD+EK3YIGNwVi0yJ/IzGVl7bVX+uNHssW5\nkUpKzI8+frylsK5Zkzi/vXdvC5bvu2/siG1nPaS6FiZPNrdZcCr4IJmKw/PP2wDGLl0sR/6118wC\ncIH2eGprLanA9UzLy80tdeSR0Z2cXr3sd7qpVeKpqrLMm23b/My4bDnmGIt5OFatMnfSuHH+tm7d\nwvMyNTSY5ZNqXqJioazM7tOtW+2+/8EPrN6C4zruuss6pR9/bANlP/7YsgMff9xcg088YXG1q6+O\nti7Spc2MkI4i3q2UiThUVFjPPtGNHo9r7N96y943NFgj7hoU51Zav956seXlJhQVFbHlCqayrlkT\nfVMHR1C7c0O0kMQT/D3Ogli2zA8ebt9uvvdVq6I/P3ducn+sE6gFCyxtta7O0vree88Xh9//Pjxa\nNB8EM00GDrRG5otfTOxa7N3bGtOj43Lm0hXNKVOSx1EyFYd//MN87F26WCdizhxLOkhkObz2WqzL\nrKrKfP4ulTaenj0TWw1g1+4jj1jwuqk++CFDLFPIZQg+/7w/ut/RvbsFnZcs8be9/rr1fKOyhoqV\nykqLXXztayYEwZH99fUW5L/tNrvH7rjD7skJE8x6uPVWs56feio8LixT2rQ4NNWtBOmLg1tsxgUv\n6+tjxSFoOThxWLMmNkAHvijt2mUNa9QgvKjUwCuvTC8Tw/2evn39hmr5cnvv4ib77Wc3cxRO/OJx\nPT53sy9Y4Afzd+ywnorrsbdrl3gxo1zxhz/YeAPHAQdYw59sEsXeva2c8WMvnAswGR9+aA1xsmBx\nJuKwZYtZP0ccYdfI669b2nGXLomF6rXXYqesr6oyS+iEE6KP//WvLZMoEZ072/995pnplTkZLvvL\nWQAvvBA7rQvY79q40QLojnHjLCYifKqqLKZw8MEmnMFY48SJZgkOH27tx6RJNmjw6adtJPegQXbv\nnX12ep3JZLRpcQimskJ24pCpteEayXhxcJZDUBzq6sLi4NLk3IpvwVG7Qbp0iX1/zz3pldVZGfHi\n0KePX18bNiTOiAgGtIK4SdjcHE/OVeVYs6Z5A4LXXhvbUB56qLkYgwMO4+nd2/4jN/bDUV6eeuT4\nmDHWALqAfBSZiMO771pZ27e3/3rDBouDRA22BLP+1qyJnf6kqsqOTZRxd9BBia8v8OMmQWukKQwb\nZtbD9u0mZPFT4btU4meftWt/1y4/hVX4nH66Nf433WQpukFPwv33mwiUlZnb8NxzrbPTrp1lqZWU\n2Aj3TDK3EtFm1pCOwt1I2biVMrUc3PclEgcXkE4lDmANwief2M2dKAiXbiwkHpezX1Hhr4G9YoWJ\nw+bNfq800fkTicPHH5vrxgVuP/44NkDeo0fiicWaA+czj58vKIiLg8QLiMssS8asWTZuI7heeTyZ\niMO0aRazAb8jcOyxsVlxQd54IzyosKrK3FzZBv5HjMguOygRfftaBtmbb5qVG28Vjx9v19ell9qM\nqT/5id0fUencxYwbmQ1WP04cVqwwy8yNKTn7bBsNPniwpSw7CyN+9Ha2tGlxcDdSc7iVIFYcXEaB\nc/9EuZWSicOyZcnndco2oLv//vadZWVWnjVrzBXUsWOsbz1TcVi61J8uYft2C+YHy9jSaYT9+pnv\nPZU47LlnWMRSuZXc1O7xFkc8mYqDa5jdNXLssdYQRFkO774bni+osjK/Y0kypUcPc1O9/LJlEMXT\np489Xn/drtN99kkcLxGGS5YAG8V++um+a/eOO/zjglZ0rihqt5KbqiKT71u71rII1q6NbWCjAtJ1\ndWH3EKQWh1mz0p/tMp6jjjKBKi01yyGYSht0WWQrDps22TF77GHf8eGHNmq7pcWhpMTGDCSzXo49\nNnohlVTisGSJ1Veqqd2ztRx69zYXwhe+kNhymDEjnFF04YWty1/fs6dd82+95U+RH0Xv3iZ0t94q\ncUhFcK2K8ePzM5FlItq05eAau2zFoUOHzLI0nDhUV4fTYKNSWdesiZ5+oH17fxbWKFL1UNPBNVTB\noHdFhZ9nnuh3r1wZvX3pUuudbNpkLiU3PmPPPc3v2VJr8mZCdXXsKGBHUBzq6836ePttXwzmz09v\nQF+64vDpp/Zwg8PKy/0BS1ExB8+z8RXx4nDxxam/qznp2dOukxUrks+KCjbw7/XXcxfvKFSc5VBX\nZ9fA8OHN991tWhxcL2v7dnOjZCoOmbiUILk4lJZaw7Jpk/2hznJINHfSmjXRVkWucG6lDRv8dNn2\n7f0xFMFlR4MkWjJz8WLLRtqxw14HB+/lcyR0c1BW5gekH3zQsojq6nxxeP/92GkyEpGuOMybZ1ZY\nVMptlOWwbJldT/keO9JUevSwRIUjjkgdf/rmN0308nkPFAIuWWHsWBOGTNusptCmxSHoVurSpXnE\nYelSa/CjLIdNm+yczl2VLOawZk3y4GZTcW6lNWt8C8UFLhNN7rVrlz3ib+zgFOK77WaNZToD8toK\nQcvBpWIGG/n33zcfeSrSFYcFCxIP1IuyHKJcSq0R1wk59tjUx1ZW2pTpIjllZXZPjh0bnp4k37Tp\nmENwnEM24pDJ8e77tmyxBj+ROLjRzGVlLS8OO3daOd1N68Shf/9ocVi71j4X73KaOtUyfDp39qcP\nj5pSua0SFAe38Hy8OOTSckgmDkHL4ec/t+yftiIOZWXWEUlHHERmnHxy8w8UbNPi0LGjNX7btlkj\n3ByWAyR2KznLwZ0/keuofXsTjnyKg3Mrffpp2HIYMCBaHFav9scCBJk2zc+Kqay0KTOaOsCmNREv\nDrvvHjvu4YMP/EkQk5GuOLiJEKNw1vBHH9kymZMm+euUtwW+8Q3FEfJBSwTu27Q4HHigNVZTpzaf\nWwn83OPg1ADt2tnAMmc5lJebVRM8xtFSbiU3QK5PH18cgv7tujoTh/gGbvVqf36oykpruApNHBoa\nzCpcu9YsKycODQ0WpI+aIDGeZOLw3nsW5IbklkNJiZXHLVYE6bu1WgP33tv6YyNtjUWLbOBbc9Om\nxaG01J95sFu3zKbKzUYc3Ays1dXWuAbFyFkOQXGA6O9oDnEIWg7OreTcRT16WE/ZTdPx73/b9tWr\n7TfGWw5BgenZ094XklvJDYJbvNisKjeAECyrrFev9AaaBdcRj+f4483d0tBg35PMEmnf3oLWJSWW\n/fbhh5p7qJiJXy2yuWjTAWkwd8eiRXZTZpqWmmnMYcgQm/nQzX8TFKN27SwDKOhWguiFSzp0sJu+\nuS0HR1WVWQAuc8n1dhNZDkGBOeggG09QaJZDfb0/6nvLFl8cnGCkQzLLYf16uz4XLTIBTnbtVVRY\n+uwRR9hzp07Zj5gXIlvatOXgGDjQ8u0zGYiVjeXg/O6uFxm8waPcShD9HW7cQXMEpNeuDQ/ecmvx\nujUZXFlXrUptObi5fZKN7m5rOHFws9cGU1vdOt/pEC8OvXrFTr/dq1dyl5LDWQ5HHGFWXTrxDiFy\nTUGIQzbGT/TOAAAVUElEQVRkIw6HH24pna4xDVoOUQFpiP4O579uDrfShg3hoLgTB7dAvRODJUus\nl+x5sYuNBC2Hgw/2R4AXClHi4Bp5VyfpEBSHzz4zN11w6vKePcMTFkZRUWGWxuGH2zTecimJlqCo\nxSFTtxKYWyVqRtf4VNZkbqXmEAfnVtqwIfw9lZUWKJ81y967Bs25Vdwkgo5gxtNhh8UGSwsBZylE\nWQ5uuvN0CIqDm4N/+3Z/wGHnzvYdqZZ7bd/eLLN+/awcEgfREhS1OGQ72tCtVRBlOaTjVmoucdi8\n2Z6D5bzpJkuLq6y0XimEe8luKhC3z80XBbav0FIVk7mV3DxS6RCsNycOGzbYecFiGcuWxS4bG0VF\nhblJ3RgZiYNoCdp8QDpbmpJtk8xycL74lrYcysos6B0fyHRz+FRWmosDrEHbtcsarn79/B7wE0+Y\nz71z5/wv3tOSJBOHFSvSF4eg5TBtmqVaf/aZnaNHDxNr9x3JaN9e4iBangK+5ZNz2WXZfzZdt1JJ\nSXR6res55ttyWLs28XdUVlqjBSYMn3xi1kGHDr5baeZMW5c51WykbR0nDitXWkDeueTAXwsjHZw4\nNDTYVAff+Y4vDvvsY267bdtSi0O85aCAtGgJitat1BTSDUgnmvW1osIygzIZl5EpiSwHR2Wl7wt3\ns7e69FTXyH32mQVUC31ytPJyW9B9wwaLrbiAdH29CWy6abuu3hYtMkuhf/9Ycdi0yQQolSXiLIfu\n3W2Vt0JKGxZtB4lDFqRrOUS5lBz5vuFLS1OLg2PnTkvDdVaGG8y1YUNxiENZmTXgu+/ur3/d0GAC\n3qOHP7I8FU4cnLXhplt24rB+vVkDqToFw4fbgLmOHc1ya4kBUEIUrVupKURZDlET7zXn9LrxOHFI\n1Et14tCpkwlBUBxcYPWzz+w3FfoArPJyiwW4OJQTh5UrM5sKwomDc0+56ZY3b7aFfUpK0nNR5WqZ\nRyGagiyHLHDB2ajpM4JupZYUh3TcSmA9WTcewh0btByg8C2HROKwenVmiQtR4uAshz59LH24kKY6\nF4WNxCEL0rEcUrmV8o2zHJIFpMGC0PFupaDlAMVrOaxendlI8GTisMceEgfRtpA4ZEFUzME1DK3F\ncigtNR93oobdzRYbJQ7OcnDiUOiWQ1mZjUFw4uCylYJB+nRIJA7ufadOEgfRdpA4ZEEiyyG4raXF\nwbm+UlkOXbokjjk4t1IxWA7gC4HLVmqq5dCrl40TKS83MZblINoSEocsSJStFNzXGtxKkHgt38pK\nE6/27aMth6BbqdAtB/efufmjnFspW8vBxSqqqy2m45ICunZNPXWGEK2FfIvDKcB84ANgVIJjaoDp\nwHtAbZ7LkxMSjXMAf8bWiorWYTkkEqjKShMD16AF52Bq187cLCUlJgzFYjm4+aOCMYdsxCE4i+0B\nB/ji8MQTcMIJuSu3EPkkn6mspcCdwFeB5cC/gOeAeYFjugJ/Ak4GlgG757E8OSMqWynecjj1VH96\n65bAiVUycejc2W/QNm6MzVZat85fXa9YLId4cairy86tFJyocPBgq1vwF4sSoi2QTBx+EvfeA+qA\nfwKL0jj3UcBCYHHj+3HAWcSKwwXAeEwYANakcd4WJx3LoVu3lp12wpUnkfVy4IHwu9/B3/4WHXNw\nmU4dOxa+5eDEPricqlsoafcMuiulpWaBlZb69T5ihImFEG2NZG6lKqAy8KgCjgReAkamce4+QGA2\ne5Y1bguyD9ANeB14B7g4rVK3MOnEHFqaVG6lDh1sMfig5RCMOWzcaNbFzTfbNN2FTCLLYe3a8Cp6\nySgtNVdU8DOnnw6XXpq7sgrRXCSzHEYn2N4NeBV4PMW5vRT7AcqBI4BhQCfgbWAKFqOILcxovzg1\nNTXU1NSkcfr8UF7uT7PgcOKQzlrDzUEqt1LwuPiYg+sBd+oEX/96fsvZGnB15aYlLyuzUc1bt2Y2\nOaITB7mPREtRW1tLbW1tTs6VTcxhbZrHLQf6Bd73w3cfOZZirqStjY83gENJIQ4tTfv2/jgBh2tg\nWovlkMqtFDxu1y5rDN1vckuexv/GQsVNQOjEvqzM4g3V1ZnNa1Raaq6ogw7KfRmFSIf4jvNNN92U\n9bmyyVYaCqxL47h3MLfRQKACOA8LSAd5FhiCBa87AUcDc7MoU7NSVQXTp8dua22WQyq3ksONadi6\n1RcD51YqFnFw4zkcZWVmAWQaM3JCm4krSojWSjLLYXbEtmpgJXBJGuduAK4GXsYa/wexYPSVjfvv\nxdJcXwJmAbuA+2kD4gDhBVhaq+WQrltpyxbfyig2yyF+TeemiANIHERhkEwczoh77wGfApsyOP+L\njY8g98a9H9P4aNO0VsshHbeSEwcnJMGYQzFw3HHgBSJkLnZw8MGZnUfiIAqJZOKwuLkKUQi0tmyl\nTC2HbdvClkMm01UXEi7mkK3lUOgr54niQNNn5Ii26lZyo6ErKnyBK7aYQzxlZRaDkVtJFDMShxzR\n2iwHR6ryOCEIikixxRzicS45l9qaLhIHUUhIHHKE81mnu6Rkvtmxw55TpWI6cQjGJlzModjFoWvX\nzD4ncRCFhMQhR+zc2dIliGX79vSOi1+kCIovIB2Pa+QznVNK4iAKCYlDjmht4uAsh1S45U3lVvJx\nlkOmc0pJHEQhIXHIEW1ZHKLcShKH7CyHkpLM3VFCtEYkDjmitYlDulNNJ7IcduyQOGQjDtXVrSfu\nJERTkDjkiNYmDmeeaWtIp8LFHOItByjemEO24tC7N3z3u7kvjxAtgcQhR7Q2cXCruKUikeUAEodM\nYw5VVXDLLbkvjxAtgcQhR7Q2cUiXqHEO6c7oWqhkm60kRCEhccgRbVkc4t1KznIILmZUTLj/slh/\nvxAgccgZbVUc2rWzqSKiLIdibRzd+g5CFDMShxzRVsUhyoVU7JaDxEEIiUPOaGho6RJkR5SVUOyW\nw4EHFm8arxAOiUOO2LWrpUuQHc5KaN/e3+bmYwpuKyYOO8ziMEIUMxKHHNHW3UpBIaivt+dM1k8W\nQhQWEoccUYjiIIQoXiQOOaKQxCHdeZmEEIWLxCFHtNWAdFTMQZaDECLZGtIiA+66Cz74oKVLkTmy\nHIQQUUgccsTee9ujrRGVtirLQQght1KRI8tBCBGFxKHIUcxBCBGFxKHIkeUghIhC4lDkaJyDECIK\niUORI7eSECIKiUORI7eSECKKfIvDKcB84ANgVJLjjgQagK/nuTwiDrmVhBBR5FMcSoE7MYE4ABgJ\nDE5w3C3AS4CmemtmZDkIIaLIpzgcBSwEFgP1wDjgrIjjfgg8BdTlsSwiAVExh8GDYc89W6Y8QojW\nQT5HSPcBlgbeLwOOjjjmLOArmGvJy2N5RARRlsOrr7bd9SmEELkhn+KQTkP/R+D6xmNLSOJWGj16\n9Oeva2pqqKmpaVrpRAxlgSshuGSoEKLtUFtbS21tbU7OlU8f/zHAaCzmAHADsAuLLzg+CpRhd2AL\ncDnwXNy5PM+TUZEPXnsNhg0DVa8QhUeJrdiVVTufT8vhHWAfYCCwAjgPC0oHCXq2/wxMICwMIo+0\n1anGhRD5JZ/i0ABcDbyMZSQ9CMwDrmzcf28ev1ukicRBCBFFW0kdlVspTzz7LJx9ttxKQhQiTXEr\naYR0kSPLQQgRhcShyPnqV+E3v2npUgghWhtyKwkhRIEit5IQQoicInEQQggRQuIghBAihMRBCCFE\nCImDEEKIEBIHIYQQISQOQgghQkgchBBChJA4CCGECCFxEEIIEULiIIQQIoTEQQghRAiJgxBCiBAS\nByGEECEkDkIIIUJIHIQQQoSQOAghhAghcRBCCBFC4iCEECKExEEIIUQIiYMQQogQEgchhBAhJA5C\nCCFCSByEEEKEkDgIIYQI0RzicAowH/gAGBWx/0JgJjALeAs4pBnKJIQQIgkleT5/KbAA+CqwHPgX\nMBKYFzjmy8Bc4DNMSEYDx8Sdx/M8L89FFUKIwqKkpASybOfzbTkcBSwEFgP1wDjgrLhj3saEAWAq\n0DfPZRJCCJGCfItDH2Bp4P2yxm2JuAyYmNcSCSGESElZns+fiS9oKPAd4Lg8lUUIIUSa5FsclgP9\nAu/7YdZDPIcA92Mxh3VRJxo9evTnr2tqaqipqclVGYUQoiCora2ltrY2J+fKd0C6DAtIDwNWANMI\nB6T7A68BFwFTEpxHAWkhhMiQpgSk8205NABXAy9jmUsPYsJwZeP+e4EbgWrg7sZt9VggWwghRAuR\nb8shV8hyEEKIDGnNqaxCCCHaIBIHIYQQISQOQgghQkgchBBChMh3tpIQQmRFt27dWLcuctiTiKO6\nupq1a9fm9JzKVhJCtEpKSkrQfZ8eiepK2UpCCCFyisRBCCFECImDEEKIEBIHIYQQISQOQgiRJTfc\ncAO333573r9nwoQJnH/++Xn/niASByGEyIK6ujr++te/ctVVVwEwZcoUTjrpJLp3707Pnj0599xz\n+eSTT9I+18iRI+nTpw9du3ZlyJAhTJs27fP9Z5xxBnPmzGH27Nl5+S1RSByEECILHn74YU477TTa\nt28PwPr167nqqqtYsmQJS5Ysoaqqim9/+9tpnWvTpk0cffTRvPvuu6xbt45LL72U0047jc2bN39+\nzMiRI7nvvvvy8lui0DgHIUSrpLWPcxg2bBiXXXYZF1xwQeT+d999l5qaGjZs2JDV+bt06UJtbS2H\nH344AJMnT+aiiy7io48+Ch2rcQ5CCNFKmD17Nvvtt1/C/W+88QYHHXRQVueeMWMGO3bsYO+99/58\n2/7778/ixYvZtGlTVufMFE2fIYRos5TkyPeRjYGyfv16qqqqIvfNmjWLX//61zz33HMZn3fDhg1c\nfPHFjB49Oub87vX69euprKzMvMAZInEQQrRZWtLrVF1dzcaNG0PbFy5cyIgRIxg7dizHHXdcRufc\nunUrZ5xxBsceeyyjRo2K2ee+q2vXrtkXOgPkVhJCiCw45JBDWLBgQcy2JUuWcNJJJ3HjjTdy4YUX\nZnS+7du3c/bZZ9O/f3/uvffe0P558+YxcODAZrEaQOIghBBZMWLECCZNmvT5++XLl/OVr3yFq6++\nmiuuuCJ0/MMPP8ygQYMiz1VfX88555xDp06dePjhhyOPmTRpEiNGjMhJ2dNB4iCEEFlwySWXMHHi\nRLZt2wbAAw88wKJFiz6PFVRVVdG5c+fPj1+6dClDhgyJPNfkyZN54YUXeOWVV+jatevnn3/rrbc+\nP2bcuHFceeWV+f1RAZTKKoRolbT2VFaAX/ziF/Ts2ZNrrrkm5bEnn3wyY8eOTZrhlIgJEybw6KOP\nMm7cuMj9+UhllTgIIVolbUEcWgsa5yCEEKJZkDgIIYQIIXEQQggRQuIghBAihMRBCCFECE2fIYRo\nlVRXV7tsG5GC6urqnJ8z3zV/CvBHoBR4ALgl4pixwKnAFuBbwPSIY5TKKoQQGdJaU1lLgTsxgTgA\nGAkMjjtmBLA3sA9wBXB3HstTENTW1rZ0EVoNqgsf1YWP6iI35FMcjgIWAouBemAccFbcMWcC/9v4\neirQFeiVxzK1eXTh+6gufFQXPqqL3JBPcegDLA28X9a4LdUxffNYJiGEEGmQT3FIN0gQ7w9TcEEI\nIVqYfAakjwFGYzEHgBuAXcQGpe8BajGXE8B84ERgVdy5FgJ75amcQghRqHyIxXVbFWVYwQYCFcAM\nogPSExtfHwNMaa7CCSGEaDlOBRZgPf8bGrdd2fhw3Nm4fyZwRLOWTgghhBBCCFEYnILFIT4ARqU4\nthB4CIu3zA5s6wa8ArwP/B1L93XcgNXNfGB4M5WxuegHvA7MAd4DftS4vRjrowOW6j0DmAv8d+P2\nYqwLRyk2YHZC4/tirYvFwCysLqY1biv4uijF3E0DgXKiYxaFxvHA4cSKw/8AP2t8PQr4XePrA7A6\nKcfqaCGFNVdWb+CwxteVmHtyMMVbH50an8uw2NwQircuAH4MPAo81/i+WOtiESYGQQq+Lr4MvBR4\nf33jo9AZSKw4zMcfGNi78T1YDyBoTb2EBfULlWeAr6L66AT8CziQ4q2LvsA/gKH4lkOx1sUioHvc\ntpzURWtWjXQG0RUDvfBTe1fh/+l7YHXiKOT6GYhZVFMp3vpoh/X6VuG724q1Lv4AXIelxjuKtS48\nTCjfAS5v3JaTumjNs7JqMFwYj+T1Uoh1VgmMB64BNsbtK6b62IW52boAL2O95iDFUhenA6sxH3tN\ngmOKpS4AjgNWAj2wOMP8uP1Z10VrthyWY0FJRz9iVa9YWIWZhgBfwG4MCNdP38ZthUQ5Jgx/xdxK\nUNz1AfAZ8ALwRYqzLo7F5mRbBDwOfAW7PoqxLsCEAaAOeBqb067g6yKdQXSFyEDCAWnnJ7yecHCp\nAhiE1VUhTX5fAvwFcyEEKcb62B0/46Qj8AYwjOKsiyAn4sccirEuOgFVja93A97CMpCKoi6iBtEV\nMo8DK4AdWLzl21gmwj+ITkv7OVY384GTm7Wk+WcI5kqZgbkQpmOpzcVYHwcD72J1MQvzt0Nx1kWQ\nE/GzlYqxLgZh18QMLN3btZHFWBdCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgjRltjU+DwAGJnj\nc/887v1bOT6/EEKIPOHmZKrBH1GbLqnmH4uf70kIIUQbwTXgU4D12Gjra7C5xW7FFkmZCVzReFwN\n8CbwLP5EZs9gM1++hz/75e+Ahsbz/bVxm7NSShrPPRsb1Xxu4Ny1wJPAPOCRQDl/h822OrPxs0II\nIfKIE4fgXDxgYvCLxtftsXUSBmIN+CbMDeWobnzuiDX47n285eDefwObuqAE6AkswSZDq8EEao/G\nfZOxmTW7EzujZud0f5wQ+aA1z8oqRK6Jn2RsOHAJ1vOfgs1Js3fjvmlYg+64BpvD5m1sZst9UnzX\nEOAxbErk1cAk4MjG99OwObS8xnMOwARjG/Ag8DVga6Y/TohcInEQxc7V2EJChwN7YROWAWwOHFOD\nzYJ6DLamwnRsXedkeITFyM2dvz2wbSc2NflObLrlp7A1C15CiBZE4iCKiY34UxyDLZrzffyg8774\nazUH6Qysw3r2+xO7tGI90UHrN4HzsHusB3ACZjEkmiJ5N2z2zBex9ZEPTflrhMgjrXklOCFyheux\nz8R66DOAPwNjsRjDu1ijvRpz6cSvnvUScBUwF5tC/u3AvvuwgPO/gYsDn3saWwd9ZuO26xrPP5jw\n6lseJlrPYhZJCfAfWf9aIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQohC5v8BJFxH\nUX/IRW0AAAAASUVORK5CYII=\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "agent = PassiveTDAgent(policy, Fig[17,1], alpha=lambda n: 60./(59+n))\n",
- "graph_utility_estimates(agent, Fig[17,1], 500, [(2,2)])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "It is also possible to plot multiple states on the same plot."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {
- "collapsed": false
- },
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAEPCAYAAACp/QjLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XdYVMf+BvAXFVCKglJUbNg1XokxUWP5iTFGYzSmeqPG\nGE1ii/cmmpuY5OYGEmMDe43dxMQSGxYM2MDeC6KAWLBhQwFFpO/398dZFpazwFIWkH0/z8PD7jmz\ns8MA++7MnD0HICIiIiIiIiIiIiIiIiIiIiIiIiIiKpeWA7gHIDSX/YMAhAA4B+AQgNYl1C4iIipF\nXQC0Qe7h8DKAatrbvQAcLYlGERFR6WuA3MMhO0cAt0zbFCIiMkaF0m5ANp8A2FHajSAiopLRAPmP\nHLoBCIMyeiAiolJWqbQbAGURegmUNYc4QwU8PDwkJCSkRBtFRFQOhAB4vjAPLO1ppXoANgH4EMDl\n3AqFhIRARPglAi8vr1JvQ1n5Yl+wL9gXeX8B8Cjsi7OpRw5rAHQF4ATgJgAvAJbafYsA/AhlKmmh\ndlsagHYmbhMREeXD1OEwIJ/9n2q/iIioDCntaSUqIE9Pz9JuQpnBvsjCvsjCvigeFqXdACOJdv6M\niIiMZGFhARTydZ4jByIiUmE4EBGRCsOBiIhUGA5ERKTCcCAiIhWGAxERqTAciIhIheFAREQqDAci\nIlJhOBARkQrDgYiIVBgORESkwnAgIiIVhgMREakwHIiISIXhQEREKgwHIiJSYTgQEZEKw4GIiFQY\nDkREpMJwICIiFYYDERGpMByIiEiF4UBERCoMByIiUmE4EBGRiqnDYTmAewBC8ygzB8AlACEA2pi4\nPUREZARTh8MKAL3y2N8bQGMATQAMB7DQxO0hIiIjmDocDgCIy2P/mwB+094+BsABgKuJ20RERPko\n7TUHNwA3s92/BaBOKbWFiIi0SjscAMAix30plVYQEZFOpVJ+/mgAdbPdr6PdpmJh4Z3tnqf2i4iI\nsgRrv4ou57t2U2gAYBuAfxjY1xvAGO33DgBmab/nJCIcUBARFYSFhQVQyNd5U48c1gDoCsAJytqC\nFwBL7b5FAHZACYbLABIBDDVxe4iIyAglMXIoDhw5EBEVUFFGDmVhQZqIiMoYhgMREakwHIiISIXh\nQEREKgwHIiJSYTgQEZEKw4GIiFQYDkREpMJwICIiFYYDERGpMByIiEiF4UBERCoMByIiUmE4EBGR\nCsOBiIhUGA5ERKTCcCAiIhWGAxERqTAciIhIheFAREQqDAciIlJhOBARkQrDgYiIVBgORESkwnAg\nIiIVswyHE9EnkKHJKNU2PE17ivuJ90u1DUREuTGLcAiPCUeGJgP3ntzDW2vfQrul7XDqzqlSa8+2\ni9tgO8kWvf/sXWptKIzHKY9Luwnl2v3E+/CL8CvtZhABeIbD4UT0CYTHhOdZRkQw6cAkPLfgOfzr\n73+h7eK2eM75ObR3a4+ktKQSaqnSDgBISEnAwI0DMW7nOPi86lNiz19UMYkxeGfdO6gzow6iH0eb\n9LlORJ9AfHK8UWWfpj01aVtKSkJKAr7f8z1azG+B/uv7QyOaPMuLCNadX4cW81tgz9U9JdTKkiMi\neiP7lPQUrDizAvee3CvFVj077j25h8DLgUWu55kMB78IP7zy+ysY4jdE98KbSUSwIWwD4pPjMXDT\nQGy5uAWb/rkJWy9uxZK+SzCx+0RUta6KlIyUEmnrjUc38MLiF+AV5IWOyzvCzsoOISND0P+5/oWe\nVtp2cRvO3TtXzC01bPfV3Xh+0fNoUr0JhrUZhhlHZhRr/akZqfgq8CsM8RuC/+z8D7qs6IJhW4bl\n+Zh0TTq+3/M9bCfZ4knqk3yfQyMa1agnKCoINafVRFxSXJHaX1Sbwjeh5YKWuJ1wG2dGnIFjFcc8\n/y7CYsLQaXkn+Bz2QW372rgQc6EEW6uWkJKAkLshRpcXEfhH+uN49HGD+3dd2YXWv7bGaP/REBGs\nClmFZvOaYcbRGXhv/XvF1exicz/xvt6blLSMNCw8sRA/Bf9U5LojH0ZiTeiaArXlq8Cv0GRuE/T6\nsxdSM1KL3AZT6gUgAsAlAOMN7HcCEADgLIDzAD7OpR7J5B/pLy6+LnIi+oQ0mNVAzt87L9l5B3lL\npZ8rSa1ptaTv6r6SlJYkIiIajUZXps/qPrIlYouY2sUHF6XOjDoyLmCcVPmlisw5OkfXjqepT8V6\ngrVeu4wx68gsqTG1hjSf11zSMtJM0WydRScXSc1pNWX3ld0iIhJ4OVA8V3oWqc4L9y/ofif3n9yX\nriu6Sp/VfeSFRS/IW2vfkluPbomTj5PciL9h8PF3Eu5I1xVd5bVVr4nNRBu5Gns1z+eLS4qTNr+2\nkWqTq4mISIYmQ37Z94vUmlZL4A05c+dMkX6ewkpISZAhm4dIs7nNZN+1fbrtbX5tIyeiT6jKZ2gy\nZOrBqeLk4yQLji+QDE2GTD4wWb7e+bVeGa8gL/k5+OcS+Rl2Xt4p9WfWF7fpbkaVv5twV95Z947A\nG9J+SXt5mvpUt+/+k/vy/l/vS6PZjWTxycXiOMVRuq7oKm0XtZUD1w9IhiZD3Ka7SXhMeJ7PEZMY\nI/OOzZPU9NQi/Wz5SU1PlUn7Jwm8IT/s+UFElP5oOrepvLT4JXGf5V7oumOfxsrn/p9Ljak1xHai\nbb7/50lpSeId5C2OUxxljP8YiX4cLS3mtZCzd84KAMnlNbVUVQRwGUADAJZQAqBFjjLeACZrbzsB\neAigkoG6RETk9O3T4uzjLIdvHBYRkd5/9patEVt1nbTs9DJxn+UuZ++clW93fSvJackGO/O9v96T\nv87/VehfnjGi4qKk7oy6svz0chERg22xn2Qv8UnxRtc568gscZ/lLtfironbdDe5Hn+92Nqb00/B\nP0mj2Y3k0sNLum13E+6K4xRHXaBlaDIK9E/41/m/BN6QX0/8Kldir4j7LHf5dte3kp6RLhmaDF25\n9/96Xz7Z8onEJMboPT7yQaTUn1lfvIK8JD0jXdouaivHbx3XK5OclizpGekiovwOWs5vKZ9u+VSs\nJlhJXFKcvLPuHem0rJPcenRLXv39VQm8HFjgvimqSw8vSfN5zeVjv4/lScoTvX19V/eVzeGbdffX\nnV8nv+z7Rfqt6ScdlnaQa3HXdPtWhaySARsGiIjIo+RH8tbat8R6grUuCDNdfHBRHic/Lrb2p2Wk\nyVeBX0ndGXXl70t/i5OPk0TERKh+F9n5R/qLq6+rfLf7O4lPihd4Q+ANSc9Il20Xt0nNaTXlm53f\n6N44jPEfI3OOztH9LkVERm8fLVMOTMn1OTLrabWglfRb009S0lN0+24/vq37Hzx265je60b2n8uY\n/8fjt45L64WtpdcfvWTFmRXSemFr+WTLJ1JvZj3ZdnGbJKcli9UEK722G8sv3E9qT68to7aPkpjE\nGGk6t6mcu3su1/I7L++UxnMay9tr39b72xi4caCsOLOizIbDy1BGBZm+1X5lNwLAfO3thgAic6lL\n4pPipdHsRrI2dK2uA0ZvHy1zjs4REZGT0Sd1f6T5GbRxkKwKWVXgX5yxHiU/kufmPyezj87Os1yj\n2Y0k8kFkvvWlpqfKijMrdMEgItJqQSsJuRtSLO3NacbhGdJ8XnO5m3BXtc/F10WO3Dwi5++dlzfX\nvCntl7TX/UPnZVPYJnH1dZUv/v5Cuq3sJvVn1pf5x+cbLDvv2DyBN+R/e/+n2xZyN0RqT68tS08t\n1W3ruaqn7Ijcobt/89FNgTdk4v6JEhETIW7T3XS/g7aL2kqzuc3k/b/e171oDNw40KR/B4aciD4h\ntabVkoUnFhrcP3LbSJl3bJ6IiPgc9JH6M+uLx0IPGbV9lN6LnYhIUFSQdFneRa7HX5cW81rIiG0j\nJD4pXir/UlkSUhJkmN8wGbV9lFhPsNb9nxRVTGKMdF3RVXqu6ikPEh+IiDISd5jiIA5THCQ1PVU0\nGo2sCV0jSWlJutGM23Q3OXj9oK6e07dPS+M5jaX/+v5SZ0YdvX252RKxRV5b9ZretssPL8vn/p/r\nwurA9QOSkp4iXZZ3kd/P/i4iIqvPrRa7SXbSeXlnmXxgsjj5OImrr6tef956dEs6LusoPVf1zPX5\nNRqN+B7yFRdfF/kj5A/RaDSSkp4iTj5OMmr7KHmU/EhXtta0WrmOfg15nPxYBm4cKE3mNJH91/br\ntg/aOEiWnV6mVzY+KV6epj6V0dtH6wIpJ99DvvK5/+dlNhzeA7Ak2/0PAczNUaYCgGAAtwEkAHg9\nl7pk4MaBMnr7aFUHjA0YKwkpCdJ4TmO94MjLJ1s+kSWnlhj9iysIjUYj76x7R4ZvHZ7vlFGHpR3k\ny7+/zLOcz0EfcZvuJs4+zhJ2P0y3vfPyznp/RMXR7s+2fiYdl3WUOjPq6L0LyW7U9lFS8aeKYvmz\npfT6o5e0W9JOAi4F5Fl34OVAcfF1kVO3T0lUXJTAGzL32Nxcy6dnpMuGCxsE3pDPtn4mV2OvSq1p\ntWRN6Bq9coM2DtK9ADx8+lBazm8pXZZ3kdYLW+uN2kREvIK8ZPT20Xrv5r78+0uZfnh6vn2Tl/ik\neIlLitPbdvr2aYPvGg9ePyjOPs7iF+6Xa33f7f5O4A3ps7qPtJjXQm4+upnr38elh5fEycdJ6s+s\nr/dzNJvbTFovbC19V/eVPqv7yKdbPlX974goL0jZ363nJTE1UW7E35Dm85rLNzu/0fv5tkRskUUn\nF0mj2Y0E3pBef/QSC28L+SPkDxm4caB0WtZJ7iTcUdXpe8hXXvntFYNvQgyJSYwR+0n2uv+D/df2\ni6uvq7yz7h3pt6af3khzztE58smWT+SHPT9Iw9kN5dTtUzJ+13jpsryLRMVFiedKT1l3fp2IKL+X\nWtNqydiAseLk46TX31djr8rj5MfyJOWJvL32bWm3pJ1qxG5o9NxhaQfZFLZJuq3sJotOLsrz57pw\n/4I0m9tMhm8dLompiXr75h2bJ0P9hurub7iwQWwn2kr1qdWl//r+qr+9TOfunpMGsxqU2XB4F/mH\nww8AZmlvNwJwFYC9gbrEsZejfP/D9+Ll5SVBQUEiIrL+wnp5e+3b8lXgVzJ40+A8fwHZjd4+Wvfu\nrLgtP71cPBZ65DqllV31qdUF3pDbj2+r9v157k9pvbC1uPi6yMT9E3Xz/pne+PMNg0Pjgvhm5ze6\ndyULji8Qj4UeMsZ/TJ7D2NT0VNl7da+ciD4hiamJMtRvqCw+uTjX8ldir4iLr4tekEXFReXbttT0\nVHlzzZtiP8lems9rbvCd7xd/fyEzDs+Qp6lPpdOyTvJV4FeSmJoothNtjfr9Tto/ScbvGp9vudzE\nPo0VeEPeXfeubptfuJ9U+KmCKrjP3jkrLr4u+QbpwesHZajfUBm+dbhqWi2np6lPpcovVVT9P9Rv\nqHy65VPdXHXg5UDptrKbbn9qeqokpCRI5+WdxWqCVb5t2nN1j1T5pYpUn1o9zzBdf2G9jN81XsYG\njJVZR2aJ9QRr6bO6j97aQlFlTkctObVEnH2cZeflnQbLnb1zVir8VEHaL2kv957cU+1fE7pGuq3s\nJn7hfuLk4yQ7IneIRqORGlNr6KZSd1/ZLXaT7GTwpsHy0uKXZKjfUNXoLTcfbPhArCZYyYuLX5SP\n/T7OtdzuK7vF2cdZVpxZYXB/5gj4gw0fSKsFraTezHpy6MYhOXzjsME3DUFBQeLl5SU//vijOPRy\nKLPh0AH600rfQb0ovQNAp2z39wB40UBdBv+Az909J04+TuLs4yz3n9w36pcmIjIuYJxMOzTN6PLG\nylxMDb0XalT5c3fPifssdzlw/YDe9puPboqzj7OMDRirCoVMRZ0a23dtn8Ab0nVFVwm5GyJOPk5y\n8cHFAtfjFeQl/9v7P4lPipeZR2aKiDIKSc9Il8TURPFY6FGkKY13170r4wLGGdw3Yd8E+X739zLM\nb5j8c/0/desW2Yf3eVl6aqm8u+5deWvtWxL7NLZA7UrPSJdef/SS5+Y/J12WdxERZWrT2cdZWi1o\npbemdSP+htSeXlv3TrU4JaQkqLblfNG4Hn9dak2rJSLKSMdjoYdUnVxVhvoNlQn7Jsi/d/xbRMTg\nwueRm0fEycdJNodvlqCoIKPbFZcUJ5MPTC72heGImAj52O9jcZjiIEduHsm1XIYmQ+Yem6t6J54p\nOS1Znv/1eXH2cdZbK8k8UGH64eni7OMsq8+tFsufLeWHPT8U6OCRrRFbZU3oGjl4/aC0W9JOLj64\nKB4LPeTmo5u6MuvOrxMXXxcJjgrOtR6NRiP1Z9aXDzZ8IOsvrDcYdLk5evNomQ2HSgCuQFmQtoLh\nBekZALy0t10B3AJQ3UBduXbA/OPzZf2F9UZ3mIgydJ+4f2KBHmOMARsG6I5cMNbAjQNlxuEZ0nVF\nV907rH5r+olXkFeejxu9fXSeUzO5SUlPkT1X90jjOY1l4v6J0nB2Q2nzaxu9KZiCWHZ6mQzZPEQ+\n9vtY4A3xPeQrgzcNlqF+Q2X09tEycOPAAh+RlV32heqcFp5YKPVn1pemc5sWasF1a8RW3TvRvBZT\nDRm/a7x0W9lN7iTckWqTq8mDxAfSYFYD2XBhg97vJjktWdotaSdTD04tcPuKi0ajEYcpDhL9OFr6\nrO4jI7eNlIBLAZKekS6h90Kl7oy64h3kLa/+/qre46LioqTWtFoG57RLU3xSfLEcjJGanioPnz7U\n23boxiEZtX2UVJtcTY7ePCoiku8ILi+xT2PFZqKNuE13kzoz6ujeNKwNXSu1ptUyat0wJjEmz/+D\nvKAI4WDoyKDikg5gDIBAKEcuLQMQDmURGgAWAZgEYAWAECjrD98AiC3Ik4x+aXSBG2Zd0RrJ6ckF\nflxeDlw/gEM3D2Hpm0sL9LhGjo3gFeyFhNQEXIu/huiEaITeD8Xa99bm+TiHyg54lPyowO30OeSD\nn/f9jP7P9cc3nb7Bz/t+RqP6jfDx8x8XuC4AqFu1LnZc2gEbSxsM+scgTDwwEU1rNMX5++dRvUp1\nhI4KhYWFRaHqBoAKFrl/FMfJxgl3n9zF0U+Pwt7a0Gxk3lo6t8Sbzd5EUloSohOi8RJeMupx/pH+\nWHt+LU4OPwknGyfYWtni9T9fxzvN38G7Ld/FhZgLug9sfbXzK9S2r42vO35d4PYVFwsLC7zW6DX0\nWd0HdlZ22NR/EywrWgIAWrm0gkNlByw8uRApGSkQEaRp0jA2YCyCrgVhfKfx6NO0T6m13ZBqlauh\nWuVqRa7HsqIlqlfRfy/asW5HtHNrB6+uXnC1cwWg/J0VlmMVRzR0bIixHcbidsJtrDi7Ajce3YDv\nYV/sHLwTrV1b51tHUZ7fHBQqNXMz5cAU+WbnN8VaZ9cVXWXlmZUFftzKMyul2uRq0m5JO9kSsUVa\nLWglm8I25fu4qQenyn8C/2NwvUJEJDwmXCbsm6C3LfpxtFSfWl2uxF7RLSh6B3nn+1mBvETERAi8\nIf6R/nI19qruMOPhW4fnO5ddVAkpCbrnK4oR20bI/OPzJSouSmYdmaXbfvHBRdFoNHIn4Y4sOL5A\nRJR3gm7T3WTv1b26clsjtsrIbSN1Uyi/nvhVPt3yqWy/uF0azm5YoMOVTeXPc39KnRl1DC7+7r+2\nXy7cvyAuvi4S+SBS/r3j3+K50lNmHplZpFEfKTL78O9Lf+sW7As6Ui0sFGHkUPi3dCVL+3MWj1lH\nZ+Fa/DXM6jUr/8JGCL4WjM+2fYbwz8NRqULBBmMJKQmIeBCB30J+Q8i9EKRr0nF42OF8320vPrUY\nWy9uxZ6oPYj6Igo17Wrq7e+yogsO3jgI8crqt+HbhsOhsgN8ehTfqTs0osGuK7vQs3HPYquzpP28\n72ekZqTi7N2zOHLrCB58/QCXYi+h1YJWCP44GPOOz8PG8I0IHRWKCfsnoKpVVcx/Y36u9flF+GH2\nsdm4EnsFv731G7q5dyvBn8YwEUFiWiLsrOxyLVNrei3cfXIXjas3xvFPj8OximMJtrD804gGl2Mv\no2mNpiX2nNrXkUK9zptyWqnMqlypcrFOK/2y/xf80OWHAgcDANhb2+Mlt5dw4MYBzD8xH/4D/Y2a\nhqlmXQ3+l/wBAI+SH+mFw4X7FxD5MBJWFa0QkxgDZ1tnRD6MxOaIzbg45mKB25iXChYVnulgAIDa\n9rUx6cAkWFa0RAWLCohOiMaXAV/CobIDphycgnP3zqGVSyu0WtAKHjU9EDQkKM/6atrVRPC1YAx7\nfliZCAZAeZHIKxgA4OuOX8O6ojVGvjgSFStULKGWmY8KFhVKNBiKyizDwbqite7cSompiUjTpMGh\nskOh6gqLCcOFmAsY8I8BRWqTh6sHXq7zMl5vnNtHPfQ5VHaAdUVrONs6IzEtUW/f5IOT8WX7L7H9\n0naExYShq21X+Bzywecvfa6aYyUlHKLioxD4YSCmHpqKyQcm42rcVUx9dSqGbR2Gzf/cjNsJt5GQ\nkoDxnQ2dBUafu4M7Wrm0gu9rviXQ+uIz7uVxpd0EKkPMMxwqZS1I/7D3B1SsUBHTXptWqLrmH5+P\n4S8Mh1VFqyK1qXvD7vBs4Gn04m37Ou3x1/t/weeQj97J56LiohBwOQDze89HVHwUwmLC0KRGE2wK\n34TIf+X2AXTz1s6tHXx7+OK1Rq9hz9U98D3si78H/Y22tdtiQrcJ6NesX4EW1V3tXBE6KtSELSYy\nvWfyrKxFVblSZaSkpyAtIw2rz68u9HUKnqQ+wZrzazDixRH5FzZCQYbyDpUd8GazN2FnZYfE1KyR\nw68nf8UQjyGoVrkaWjq3RFhMGOYem4vBrQfzqIdcONk44T8d/wMAmNh9Ih588wA9G/eEk40Tfvi/\nH4p0tBXRs8o8Rw7aaaWdV3aqTrlbEJvDN6NTvU6obV+7mFtoPFsrW920UnJ6MlacXYFDww4BUA7V\n3Bi+EZceXsL+oftLrY3PkkoVKnHqjQhmPHJITk/G7+d+R4+GPVThMOPIDKMuavNH6B/48B8fmqqZ\nRrG1zLqmwYawDfCo6YEmNZoAUMLh4I2DaOHc4plaCCOi0meW4WBdyRqPkh8h4HIAPvL4SC8cElMT\n8d+9/0VYTFieddxJuIPj0cfRt1lfUzc3T9mnlVaeXYnPXvhMt8/N3g32VvYY/sLw0moeET2jzDMc\nKlrj9J3T8HD1QL1q9fTCIfBKIJLTk/OdatoQtgF9m/aFjaWNqZubp8yRw52EOzh5+yT6Ns0KKwsL\nC6x/fz3ea1n2rqBFRGWbWYZD5UqVIRD0a9YPNpY2ekHgF+GHihYV8w2HrZFb8Xbzt03d1HzZWdkh\nMS0R6y6sQ7/m/VDFsore/p6Ne+pOlUBEZCyzDAfrStYAgH7NlXDIXNBNy0iD/yV/dG/YPc9weJT8\nCMduHUOPRj1KpL15sbWyRWJqIlaHrsbAVgNLuzlEVE6YZTjUtKuJ4S8MR+PqjfVGDseij6F+tfpo\nWr2pKhzSNel4c82biHwYicArgehcr3O+nzgtCbaWtrj48CIux17GK+6vlHZziKicMMtwqGpdFYv6\nLgKgvLhmBsGeq3vQo2EP1VQTACw9vRTbIrfhyM0j2B65XW9uvzTZWdlh55WdeL3J65w+IqJiY5bh\nkF32INgdtRvdG3bXm2rKFHglELXta+Ne4j3suroLvRr3Ko3mqtha2SJNk1ZmwoqIygezD4cqllWQ\nlJaEhJQEnLlzBp3rdVaNHDI0Gdh3bR8G/WMQ9kTtgXVFa7g7updiq7PYWtqiUoVKZSasiKh8MPtw\nqGBRAdaVrLHzyk60c2sHG0sb2FrZ6oVDyL0Q1LSriba12mL31d1lam6/UfVGGNF2RKFPHEhEZIjZ\nhwOgTC1NPTRV93mAnCOHvVF78Yr7K3Cr6gaNaNCtQdk4DTMANK7eGPN6zyvtZhBROWOW51bKKTYp\nFrFJsTg47CAAdTgEXQvCsOeHwc3eDQDKzDn6iYhMhSMHrTY12+hOu509HEQER28dRad6nVC3Wl3M\neG0G6lStU5pNJSIyOY4ctDxqeuhuZw+HS7GXYG9lr7vS2tiXx5ZK+4iIShJHDgA61+uMEW2zrslg\na5l1Guxjt46hfZ32pdU0IqJSwZEDgANDD+jdzz5yOHrrKNq7MRyIyLxw5GBAzlNqdKjToZRbRERU\nshgOBthY2iAxNRFJaUkIiwlDm5ptSrtJREQliuFgQA2bGohNikXo/VA0rdFUdRpsIqLyjuFggFVF\nK9hZ2SH4WrDeUUxEROaC4ZALF1sX7Lq6C61dWpd2U4iIShzDIReudq44cP0ARw5EZJZMHQ69AEQA\nuARgfC5lPAGcAXAeQLCJ22M0V1tXpGSkwMOV4UBE5seUn3OoCGAegFcBRAM4AWArgPBsZRwAzAfQ\nE8AtAE4mbE+BuNi6oKZdTTjbOpd2U4iISlxe4fBVjvsCIAbAQQBRRtTdDsBlANe099cC6Af9cBgI\nYCOUYACAB0bUWyJcbV05aiAis5XXtJI9ALtsX/YAXgIQAGCAEXW7AbiZ7f4t7bbsmgCoDiAIwEkA\ng41qdQl4teGrGNZmWGk3g4ioVOQ1cvDOZXt1AHsArMmnbjHi+S0BvACgOwAbAEcAHIWyRqHfGO+s\n5nh6esLT09OI6gvv5bov42W8bNLnICIqTsHBwQgODi6WuiwK+bgzAPL72HAHKAGTef3K7wBoAEzN\nVmY8gCrICqKlUEYmG3LUJSLGZA0REWWysLAACvk6X5ijlboBiDOi3Eko00YNAFgB+CeUBenstgDo\nDGXx2gZAewBhhWgTEREVo7ymlUINbHMEcAfAR0bUnQ5gDIBAKC/+y6AsRmeeG3sRlMNcAwCcgzKq\nWAKGAxFRqctruNEgx30B8BDAE5O1JnecViIiKqCiTCsVds2hpDEciIgKqKTXHIiIqJxjOBARkQrD\ngYiIVBgORESkwnAgIiIVhgMREakwHIiISIXhQEREKgwHIiJSYTgQEZEKw4GIiFQYDkREpMJwICIi\nFYYDERGpMByIiEiF4UBERCoMByIiUmE4EBGRCsOBiIhUGA5ERKTCcCAiIhWGAxERqTAciIhIheFA\nREQqDAci7p36AAANXUlEQVQiIlJhOBARkQrDgYiIVEwdDr0ARAC4BGB8HuVeApAO4B0Tt4eIiIxg\nynCoCGAelIBoCWAAgBa5lJsKIACAhQnbQ0RERjJlOLQDcBnANQBpANYC6Geg3L8AbAAQY8K2EBFR\nAZgyHNwA3Mx2/5Z2W84y/QAs1N4XE7aHiIiMVMmEdRvzQj8LwLfashbIY1rJ29tbd9vT0xOenp5F\nax0RUTkTHByM4ODgYqnLlHP8HQB4Q1lzAIDvAGigrC9kupqtDU4AngL4DMDWHHWJCAcVREQFYWFh\nARTydd6U4VAJwEUA3QHcBnAcyqJ0eC7lVwDYBmCTgX0MByKiAipKOJhyWikdwBgAgVCOSFoGJRhG\naPcvMuFzExFRETwrh45y5EBEVEBFGTnwE9JERKTCcCAiIhWGAxERqTAciIhIheFAREQqDAciIlJh\nOBARkQrDgYiIVBgORESkwnAgIiIVhgMREakwHIiISIXhQEREKgwHIiJSYTgQEZEKw4GIiFQYDkRE\npMJwICIiFYYDERGpMByIiEiF4UBERCoMByIiUqlU2g0gIjKkevXqiIuLK+1mPBMcHR0RGxtbrHVa\nFGttpiMiUtptIKISZGFhAf7fGye3vrKwsAAK+TrPaSUiIlJhOBARkQrDgYiIVBgORESkUhLh0AtA\nBIBLAMYb2D8IQAiAcwAOAWhdAm0iIiqy7777DrNnzzb582zbtg0ffPCByZ8nO1OHQ0UA86AEREsA\nAwC0yFHmKoD/gxIKEwAsNnGbiIiKLCYmBqtWrcLIkSMBAGFhYXjxxRdRvXp1ODg4oFOnTjh48KDR\ndQ0YMABubm5wcHBA586dcfz4cd3+vn374sKFCwgNDTXJz2KIqcOhHYDLAK4BSAOwFkC/HGWOAHik\nvX0MQB0Tt4mIqMhWrlyJN954A9bW1gAANzc3rF+/Hg8fPkRcXBw++OADvPfee0bV9eTJE7Rv3x6n\nT59GXFwchgwZgjfeeAOJiYm6MgMGDMDixSX33tnU4eAG4Ga2+7e023LzCYAdJm0REVExCAgIQNeu\nXXX3q1WrBnd3d1hYWCAjIwMVKlRArVq1jKrL3d0dX375JVxdXWFhYYHPPvsMqampiIyM1JXx9PSE\nv79/sf8cuTH1J6QL8gmWbgCGAehkorYQERWb0NBQNGvWTLXdwcEBiYmJqF27Nvbu3Vuous+ePYvU\n1FQ0btxYt6158+a4du0anjx5Ajs7u0K321imDodoAHWz3a8LZfSQU2sAS6CsTRj8vLy3t7futqen\nJzw9PYurjUT0jLIopnM8FOaD2PHx8bC3tze4/enTp/jpp5/w/vvv49SpU5mfVDbK48ePMXjwYHh7\ne+vVn3k7Pj4+13AIDg5GcHBwwX6QXJj69BmVAFwE0B3AbQDHoSxKh2crUw/AXgAfAjiaSz08fQaR\nmSnrp89wdXXFjh070LZtW4P7RQT29vY4fPgwWrc27iDMpKQk9OrVC82bN8eiRYv09sXGxsLJyQmP\nHz9WhcOzePqMdABjAAQCCAOwDkowjNB+AcCPABwBLARwBkqAEBGVaa1bt8bFixdz3Z+RkQGNRgMb\nGxuj6ktJScFbb72FevXqqYIBAMLDw9GgQYMSmVICSuZzDn8DaAagMYDJ2m2LtF8A8CmAGgDaaL/a\nlUCbiIiKpHfv3ti3b5/u/u7du3H27FlkZGTg8ePHGDduHJo1a6ZbN1i5ciXc3d0N1pWWlob33nsP\nNjY2WLlypcEy+/btQ+/evYv958gNPyFNRFQIH330EXbs2IHk5GQAylrAgAED4ODggGbNmiEmJgZb\nt27Vlb958yY6d+5ssK7Dhw/D398fu3btgoODA+zt7WFvb49Dhw7pyqxduxYjRoww+HhT4Cm7iahM\nKutrDgDw3//+Fy4uLvjiiy/yLduzZ0/MmTPH4BFO+dm2bRv+/PNPrF271uB+U6w5MByIqEx6FsKh\nrHgWF6SJiOgZxHAgIiIVhgMREakwHIiISIXhQEREKgwHIiJSYTgQEZEKw4GIqJB4mVAiItKT8zKh\nR48eRY8ePVCjRg24uLigf//+uHv3rtF1mdtlQomIyqWclwmNj4/HyJEjcf36dVy/fh329vYYOnSo\nUXWVxcuE8vQZRFQmlfXTZ3Tv3h2ffPIJBg4caHD/6dOn4enpicePHxeq/mrVqiE4OBht2rQBoJyc\n78MPP8TVq1dVZXn6DCKiMiK3y4Rm2r9/P1q1alWouvO7TGhJMPVlQomITMbip+KZ/BCvgo9QcrtM\nKACcO3cOEyZM0Dtlt7GKcpnQ4sRwIKJnVmFe1IuLo6MjEhISVNsvX76M3r17Y86cOejUqVOB6kxK\nSkLfvn3RsWNHjB8/Xm9f5nM5ODgUvtEFwGklIqJCMHSZ0OvXr6NHjx748ccfMWjQoALVZ46XCSUi\nKndyXiY0Ojoar7zyCsaMGYPhw4eryvMyoUREZiDnZUKXLl2KqKgo3VqBvb09qlatqivPy4SaBg9l\nJTIzZf1QVoCXCS0LGA5EZuZZCIeygp9zICKiEsFwICIiFYYDERGpMByIiEiF4UBERCo8fQYRlUmO\njo6ZR9tQPhwdHYu9TlP3fC8AswBUBLAUwFQDZeYAeB3AUwAfAzhjoAwPZSUiKqCyeihrRQDzoARE\nSwADALTIUaY3gMYAmgAYDmChCdtTLgQHB5d2E8oM9kUW9kUW9kXxMGU4tANwGcA1AGkA1gLol6PM\nmwB+094+BsABgKsJ2/TM4x9+FvZFFvZFFvZF8TBlOLgBuJnt/i3ttvzK1DFhm4iIyAimDAdjFwly\nzodxcYGIqJSZckG6AwBvKGsOAPAdAA30F6V/BRAMZcoJACIAdAVwL0ddlwE0MlE7iYjKqytQ1nXL\nlEpQGtYAgBWAszC8IL1De7sDgKMl1TgiIio9rwO4COWd/3fabSO0X5nmafeHAHihRFtHRERERETl\nQy8o6xCXAIzPp2x5sBzKektotm3VAewCEAlgJ5TDfTN9B6VvIgC8VkJtLCl1AQQBuADgPIB/a7eb\nY39UhnKo91kAYQAma7ebY19kqgjlA7PbtPfNtS+uATgHpS+Oa7eV+76oCGW6qQEASxhesyhvugBo\nA/1w8AHwjfb2eABTtLdbQukTSyh9dBnl61xZNQE8r71tB2V6sgXMtz9stN8rQVmb6wzz7QsAGAfg\nTwBbtffNtS+ioIRBduW+L14GEJDt/rfar/KuAfTDIQJZHwysqb0PKO8Aso+mAqAs6pdXfgBeBfvD\nBsAJAM/BfPuiDoDdALoha+Rgrn0RBaBGjm3F0hdlOTWM+RCdOXBF1qG995D1S68NpU8ylef+aQBl\nRHUM5tsfFaC867uHrOk2c+2LmQC+hnJofCZz7QuBEpQnAXym3VYsfVGWz8rKD8OpCfLul/LYZ3YA\nNgL4AkBCjn3m1B8aKNNs1QAEQnnXnJ259EUfAPehzLF75lLGXPoCADoBuAPAGco6Q0SO/YXui7I8\ncoiGsiiZqS70U89c3IMyNASAWlD+MQB1/9TRbitPLKEEwyoo00qAefcHADwC4A+gLcyzLzpCOSdb\nFIA1AF6B8vdhjn0BKMEAADEANkM5p1257wtjPkRXHjWAekE6c57wW6gXl6wAuEPpq/J08nsLAL9D\nmULIzhz7wwlZR5xUAbAfQHeYZ19k1xVZaw7m2Bc2AOy1t20BHIJyBJJZ9IWhD9GVZ2sA3AaQCmW9\nZSiUIxF2w/Bhad9D6ZsIAD1LtKWm1xnKVMpZKFMIZ6Ac2myO/fEPAKeh9MU5KPPtgHn2RXZdkXW0\nkjn2hTuUv4mzUA73znyNNMe+ICIiIiIiIiIiIiIiIiIiIiIiIiIiInqWPNF+rw9gQDHX/X2O+4eK\nuX4iIjKRzHMyeSLrE7XGyu/8YznP90RERM+IzBfwowDioXza+gso5xbzhXKRlBAAw7XlPAEcALAF\nWScy84Ny5svzyDr75RQA6dr6Vmm3ZY5SLLR1h0L5VHP/bHUHA1gPIBzAH9naOQXK2VZDtI8lIiIT\nygyH7OfiAZQw+K/2tjWU6yQ0gPIC/gTKNFQmR+33KlBe8DPv5xw5ZN5/F8qpCywAuAC4DuVkaJ5Q\nAqq2dt9hKGfWrAH9M2pWNfaHIzKFsnxWVqLilvMkY68B+AjKO/+jUM5J01i77ziUF/RMX0A5h80R\nKGe2bJLPc3UGsBrKKZHvA9gH4CXt/eNQzqEl2jrrQwmMZADLALwNIKmgPxxRcWI4kLkbA+VCQm0A\nNIJywjIASMxWxhPKWVA7QLmmwhko13XOi0AdRpnnzk/Jti0DyqnJM6CcbnkDlGsWBICoFDEcyJwk\nIOsUx4By0ZzRyFp0boqsazVnVxVAHJR39s2hf2nFNBhetD4A4J9Q/secAfwflBFDbqdItoVy9sy/\noVwf2SPfn4bIhMryleCIikvmO/YQKO/QzwJYAWAOlDWG01BetO9DmdLJefWsAAAjAYRBOYX8kWz7\nFkNZcD4FYHC2x22Gch30EO22r7X1t4D66lsCJbS2QBmRWAAYW+ifloiIiIiIiIiIiIiIiIiIiIiI\niIiIiIiIiIiIiKg8+3+ftEQRU4HjfQAAAABJRU5ErkJggg==\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "graph_utility_estimates(agent, Fig[17,1], 500, [(2,2), (3,2)])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.4.3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/rl.py b/rl.py
deleted file mode 100644
index 44673b528..000000000
--- a/rl.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""Reinforcement Learning (Chapter 21)
-"""
-
-from utils import * # noqa
-import agents
-
-
-class PassiveADPAgent(agents.Agent):
-
- """Passive (non-learning) agent that uses adaptive dynamic programming
- on a given MDP and policy. [Fig. 21.2]"""
- NotImplemented
-
-
-class PassiveTDAgent:
- """The abstract class for a Passive (non-learning) agent that uses
- temporal differences to learn utility estimates. Override update_state
- method to convert percept to state and reward. The mdp being probided
- should be an instance of a subclass of the MDP Class.[Fig. 21.4]
- """
-
- def __init__(self, pi, mdp, alpha=None):
-
- self.pi = pi
- self.U = {s: 0. for s in mdp.states}
- self.Ns = {s: 0 for s in mdp.states}
- self.s = None
- self.a = None
- self.r = None
- self.gamma = mdp.gamma
- self.terminals = mdp.terminals
-
- if alpha:
- self.alpha = alpha
- else:
- self.alpha = lambda n: 1./(1+n) # udacity video
-
- def __call__(self, percept):
- s_prime, r_prime = self.update_state(percept)
- pi, U, Ns, s, a, r = self.pi, self.U, self.Ns, self.s, self.a, self.r
- alpha, gamma, terminals = self.alpha, self.gamma, self.terminals
- if not Ns[s_prime]:
- U[s_prime] = r_prime
- if s is not None:
- Ns[s] += 1
- U[s] += alpha(Ns[s]) * (r + gamma * U[s_prime] - U[s])
- if s_prime in terminals:
- self.s = self.a = self.r = None
- else:
- self.s, self.a, self.r = s_prime, pi[s_prime], r_prime
- return self.a
-
- def update_state(self, percept):
- ''' To be overriden in most cases. The default case
- assumes th percept to be of type (state, reward)'''
- return percept
-
-
-def run_single_trial(agent_program, mdp):
- ''' Execute trial for given agent_program
- and mdp. mdp should be an instance of subclass
- of mdp.MDP '''
-
- def take_single_action(mdp, s, a):
- '''
- Selects outcome of taking action a
- in state s. Weighted Sampling.
- '''
- x = random.uniform(0, 1)
- cumulative_probability = 0.0
- for probabilty_state in mdp.T(s, a):
- probabilty, state = probabilty_state
- cumulative_probability += probabilty
- if x < cumulative_probability:
- break
- return state
-
- current_state = mdp.init
- while True:
- current_reward = mdp.R(current_state)
- percept = (current_state, current_reward)
- next_action = agent_program(percept)
- if next_action is None:
- break
- current_state = take_single_action(mdp, current_state, next_action)
diff --git a/search.ipynb b/search.ipynb
index 80c9743c5..caf231dcc 100644
--- a/search.ipynb
+++ b/search.ipynb
@@ -6,249 +6,6510 @@
"collapsed": true
},
"source": [
- "# The search.py module\n",
- "*Date: 14 March 2016*"
+ "# Solving problems by Searching\n",
+ "\n",
+ "This notebook serves as supporting material for topics covered in **Chapter 3 - Solving Problems by Searching** and **Chapter 4 - Beyond Classical Search** from the book *Artificial Intelligence: A Modern Approach.* This notebook uses implementations from [search.py](https://github.com/aimacode/aima-python/blob/master/search.py) module. Let's start by importing everything from search module."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "from search import *\n",
+ "from notebook import psource, heatmap, gaussian_kernel, show_map, final_path_colors, display_visual, plot_NQueens\n",
+ "\n",
+ "# Needed to hide warnings in the matplotlib sections\n",
+ "import warnings\n",
+ "warnings.filterwarnings(\"ignore\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## CONTENTS\n",
+ "\n",
+ "* Overview\n",
+ "* Problem\n",
+ "* Node\n",
+ "* Simple Problem Solving Agent\n",
+ "* Search Algorithms Visualization\n",
+ "* Breadth-First Tree Search\n",
+ "* Breadth-First Search\n",
+ "* Best First Search\n",
+ "* Uniform Cost Search\n",
+ "* Greedy Best First Search\n",
+ "* A\\* Search\n",
+ "* Hill Climbing\n",
+ "* Simulated Annealing\n",
+ "* Genetic Algorithm\n",
+ "* AND-OR Graph Search\n",
+ "* Online DFS Agent\n",
+ "* LRTA* Agent"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## OVERVIEW\n",
+ "\n",
+ "Here, we learn about a specific kind of problem solving - building goal-based agents that can plan ahead to solve problems. In particular, we examine navigation problem/route finding problem. We must begin by precisely defining **problems** and their **solutions**. We will look at several general-purpose search algorithms.\n",
+ "\n",
+ "Search algorithms can be classified into two types:\n",
+ "\n",
+ "* **Uninformed search algorithms**: Search algorithms which explore the search space without having any information about the problem other than its definition.\n",
+ " * Examples:\n",
+ " 1. Breadth First Search\n",
+ " 2. Depth First Search\n",
+ " 3. Depth Limited Search\n",
+ " 4. Iterative Deepening Search\n",
+ "\n",
+ "\n",
+ "* **Informed search algorithms**: These type of algorithms leverage any information (heuristics, path cost) on the problem to search through the search space to find the solution efficiently.\n",
+ " * Examples:\n",
+ " 1. Best First Search\n",
+ " 2. Uniform Cost Search\n",
+ " 3. A\\* Search\n",
+ " 4. Recursive Best First Search\n",
+ "\n",
+ "*Don't miss the visualisations of these algorithms solving the route-finding problem defined on Romania map at the end of this notebook.*"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For visualisations, we use networkx and matplotlib to show the map in the notebook and we use ipywidgets to interact with the map to see how the searching algorithm works. These are imported as required in `notebook.py`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline\n",
+ "import networkx as nx\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib import lines\n",
+ "\n",
+ "from ipywidgets import interact\n",
+ "import ipywidgets as widgets\n",
+ "from IPython.display import display\n",
+ "import time"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## PROBLEM\n",
+ "\n",
+ "Let's see how we define a Problem. Run the next cell to see how abstract class `Problem` is defined in the search module."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class Problem ( object ): \n",
+ "\n",
+ " """The abstract class for a formal problem. You should subclass \n",
+ " this and implement the methods actions and result, and possibly \n",
+ " __init__, goal_test, and path_cost. Then you will create instances \n",
+ " of your subclass and solve them with the various search functions.""" \n",
+ "\n",
+ " def __init__ ( self , initial , goal = None ): \n",
+ " """The constructor specifies the initial state, and possibly a goal \n",
+ " state, if there is a unique goal. Your subclass's constructor can add \n",
+ " other arguments.""" \n",
+ " self . initial = initial \n",
+ " self . goal = goal \n",
+ "\n",
+ " def actions ( self , state ): \n",
+ " """Return the actions that can be executed in the given \n",
+ " state. The result would typically be a list, but if there are \n",
+ " many actions, consider yielding them one at a time in an \n",
+ " iterator, rather than building them all at once.""" \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def result ( self , state , action ): \n",
+ " """Return the state that results from executing the given \n",
+ " action in the given state. The action must be one of \n",
+ " self.actions(state).""" \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def goal_test ( self , state ): \n",
+ " """Return True if the state is a goal. The default method compares the \n",
+ " state to self.goal or checks for state in self.goal if it is a \n",
+ " list, as specified in the constructor. Override this method if \n",
+ " checking against a single self.goal is not enough.""" \n",
+ " if isinstance ( self . goal , list ): \n",
+ " return is_in ( state , self . goal ) \n",
+ " else : \n",
+ " return state == self . goal \n",
+ "\n",
+ " def path_cost ( self , c , state1 , action , state2 ): \n",
+ " """Return the cost of a solution path that arrives at state2 from \n",
+ " state1 via action, assuming cost c to get up to state1. If the problem \n",
+ " is such that the path doesn't matter, this function will only look at \n",
+ " state2. If the path does matter, it will consider c and maybe state1 \n",
+ " and action. The default method costs 1 for every step in the path.""" \n",
+ " return c + 1 \n",
+ "\n",
+ " def value ( self , state ): \n",
+ " """For optimization problems, each state has a value. Hill-climbing \n",
+ " and related algorithms try to maximize this value.""" \n",
+ " raise NotImplementedError \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `Problem` class has six methods.\n",
+ "\n",
+ "* `__init__(self, initial, goal)` : This is what is called a `constructor`. It is the first method called when you create an instance of the class as `Problem(initial, goal)`. The variable `initial` specifies the initial state $s_0$ of the search problem. It represents the beginning state. From here, our agent begins its task of exploration to find the goal state(s) which is given in the `goal` parameter.\n",
+ "\n",
+ "\n",
+ "* `actions(self, state)` : This method returns all the possible actions agent can execute in the given state `state`.\n",
+ "\n",
+ "\n",
+ "* `result(self, state, action)` : This returns the resulting state if action `action` is taken in the state `state`. This `Problem` class only deals with deterministic outcomes. So we know for sure what every action in a state would result to.\n",
+ "\n",
+ "\n",
+ "* `goal_test(self, state)` : Return a boolean for a given state - `True` if it is a goal state, else `False`.\n",
+ "\n",
+ "\n",
+ "* `path_cost(self, c, state1, action, state2)` : Return the cost of the path that arrives at `state2` as a result of taking `action` from `state1`, assuming total cost of `c` to get up to `state1`.\n",
+ "\n",
+ "\n",
+ "* `value(self, state)` : This acts as a bit of extra information in problems where we try to optimise a value when we cannot do a goal test."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## NODE\n",
+ "\n",
+ "Let's see how we define a Node. Run the next cell to see how abstract class `Node` is defined in the search module."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class Node : \n",
+ "\n",
+ " """A node in a search tree. Contains a pointer to the parent (the node \n",
+ " that this is a successor of) and to the actual state for this node. Note \n",
+ " that if a state is arrived at by two paths, then there are two nodes with \n",
+ " the same state. Also includes the action that got us to this state, and \n",
+ " the total path_cost (also known as g) to reach the node. Other functions \n",
+ " may add an f and h value; see best_first_graph_search and astar_search for \n",
+ " an explanation of how the f and h values are handled. You will not need to \n",
+ " subclass this class.""" \n",
+ "\n",
+ " def __init__ ( self , state , parent = None , action = None , path_cost = 0 ): \n",
+ " """Create a search tree Node, derived from a parent by an action.""" \n",
+ " self . state = state \n",
+ " self . parent = parent \n",
+ " self . action = action \n",
+ " self . path_cost = path_cost \n",
+ " self . depth = 0 \n",
+ " if parent : \n",
+ " self . depth = parent . depth + 1 \n",
+ "\n",
+ " def __repr__ ( self ): \n",
+ " return "<Node {}>" . format ( self . state ) \n",
+ "\n",
+ " def __lt__ ( self , node ): \n",
+ " return self . state < node . state \n",
+ "\n",
+ " def expand ( self , problem ): \n",
+ " """List the nodes reachable in one step from this node.""" \n",
+ " return [ self . child_node ( problem , action ) \n",
+ " for action in problem . actions ( self . state )] \n",
+ "\n",
+ " def child_node ( self , problem , action ): \n",
+ " """[Figure 3.10]""" \n",
+ " next_state = problem . result ( self . state , action ) \n",
+ " next_node = Node ( next_state , self , action , \n",
+ " problem . path_cost ( self . path_cost , self . state , \n",
+ " action , next_state )) \n",
+ " return next_node \n",
+ " \n",
+ " def solution ( self ): \n",
+ " """Return the sequence of actions to go from the root to this node.""" \n",
+ " return [ node . action for node in self . path ()[ 1 :]] \n",
+ "\n",
+ " def path ( self ): \n",
+ " """Return a list of nodes forming the path from the root to this node.""" \n",
+ " node , path_back = self , [] \n",
+ " while node : \n",
+ " path_back . append ( node ) \n",
+ " node = node . parent \n",
+ " return list ( reversed ( path_back )) \n",
+ "\n",
+ " # We want for a queue of nodes in breadth_first_graph_search or \n",
+ " # astar_search to have no duplicated states, so we treat nodes \n",
+ " # with the same state as equal. [Problem: this may not be what you \n",
+ " # want in other contexts.] \n",
+ "\n",
+ " def __eq__ ( self , other ): \n",
+ " return isinstance ( other , Node ) and self . state == other . state \n",
+ "\n",
+ " def __hash__ ( self ): \n",
+ " return hash ( self . state ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(Node)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `Node` class has nine methods. The first is the `__init__` method.\n",
+ "\n",
+ "* `__init__(self, state, parent, action, path_cost)` : This method creates a node. `parent` represents the node that this is a successor of and `action` is the action required to get from the parent node to this node. `path_cost` is the cost to reach current node from parent node.\n",
+ "\n",
+ "The next 4 methods are specific `Node`-related functions.\n",
+ "\n",
+ "* `expand(self, problem)` : This method lists all the neighbouring(reachable in one step) nodes of current node. \n",
+ "\n",
+ "* `child_node(self, problem, action)` : Given an `action`, this method returns the immediate neighbour that can be reached with that `action`.\n",
+ "\n",
+ "* `solution(self)` : This returns the sequence of actions required to reach this node from the root node. \n",
+ "\n",
+ "* `path(self)` : This returns a list of all the nodes that lies in the path from the root to this node.\n",
+ "\n",
+ "The remaining 4 methods override standards Python functionality for representing an object as a string, the less-than ($<$) operator, the equal-to ($=$) operator, and the `hash` function.\n",
+ "\n",
+ "* `__repr__(self)` : This returns the state of this node.\n",
+ "\n",
+ "* `__lt__(self, node)` : Given a `node`, this method returns `True` if the state of current node is less than the state of the `node`. Otherwise it returns `False`.\n",
+ "\n",
+ "* `__eq__(self, other)` : This method returns `True` if the state of current node is equal to the other node. Else it returns `False`.\n",
+ "\n",
+ "* `__hash__(self)` : This returns the hash of the state of current node."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will use the abstract class `Problem` to define our real **problem** named `GraphProblem`. You can see how we define `GraphProblem` by running the next cell."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class GraphProblem ( Problem ): \n",
+ "\n",
+ " """The problem of searching a graph from one node to another.""" \n",
+ "\n",
+ " def __init__ ( self , initial , goal , graph ): \n",
+ " Problem . __init__ ( self , initial , goal ) \n",
+ " self . graph = graph \n",
+ "\n",
+ " def actions ( self , A ): \n",
+ " """The actions at a graph node are just its neighbors.""" \n",
+ " return list ( self . graph . get ( A ) . keys ()) \n",
+ "\n",
+ " def result ( self , state , action ): \n",
+ " """The result of going to a neighbor is just that neighbor.""" \n",
+ " return action \n",
+ "\n",
+ " def path_cost ( self , cost_so_far , A , action , B ): \n",
+ " return cost_so_far + ( self . graph . get ( A , B ) or infinity ) \n",
+ "\n",
+ " def find_min_edge ( self ): \n",
+ " """Find minimum value of edges.""" \n",
+ " m = infinity \n",
+ " for d in self . graph . graph_dict . values (): \n",
+ " local_min = min ( d . values ()) \n",
+ " m = min ( m , local_min ) \n",
+ "\n",
+ " return m \n",
+ "\n",
+ " def h ( self , node ): \n",
+ " """h function is straight-line distance from a node's state to goal.""" \n",
+ " locs = getattr ( self . graph , 'locations' , None ) \n",
+ " if locs : \n",
+ " if type ( node ) is str : \n",
+ " return int ( distance ( locs [ node ], locs [ self . goal ])) \n",
+ "\n",
+ " return int ( distance ( locs [ node . state ], locs [ self . goal ])) \n",
+ " else : \n",
+ " return infinity \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(GraphProblem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Have a look at our romania_map, which is an Undirected Graph containing a dict of nodes as keys and neighbours as values."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "romania_map = UndirectedGraph(dict(\n",
+ " Arad=dict(Zerind=75, Sibiu=140, Timisoara=118),\n",
+ " Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211),\n",
+ " Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138),\n",
+ " Drobeta=dict(Mehadia=75),\n",
+ " Eforie=dict(Hirsova=86),\n",
+ " Fagaras=dict(Sibiu=99),\n",
+ " Hirsova=dict(Urziceni=98),\n",
+ " Iasi=dict(Vaslui=92, Neamt=87),\n",
+ " Lugoj=dict(Timisoara=111, Mehadia=70),\n",
+ " Oradea=dict(Zerind=71, Sibiu=151),\n",
+ " Pitesti=dict(Rimnicu=97),\n",
+ " Rimnicu=dict(Sibiu=80),\n",
+ " Urziceni=dict(Vaslui=142)))\n",
+ "\n",
+ "romania_map.locations = dict(\n",
+ " Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288),\n",
+ " Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449),\n",
+ " Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506),\n",
+ " Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537),\n",
+ " Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410),\n",
+ " Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350),\n",
+ " Vaslui=(509, 444), Zerind=(108, 531))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "It is pretty straightforward to understand this `romania_map`. The first node **Arad** has three neighbours named **Zerind**, **Sibiu**, **Timisoara**. Each of these nodes are 75, 140, 118 units apart from **Arad** respectively. And the same goes with other nodes.\n",
+ "\n",
+ "And `romania_map.locations` contains the positions of each of the nodes. We will use the straight line distance (which is different from the one provided in `romania_map`) between two cities in algorithms like A\\*-search and Recursive Best First Search.\n",
+ "\n",
+ "**Define a problem:**\n",
+ "Now it's time to define our problem. We will define it by passing `initial`, `goal`, `graph` to `GraphProblem`. So, our problem is to find the goal state starting from the given initial state on the provided graph. \n",
+ "\n",
+ "Say we want to start exploring from **Arad** and try to find **Bucharest** in our romania_map. So, this is how we do it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Romania Map Visualisation\n",
+ "\n",
+ "Let's have a visualisation of Romania map [Figure 3.2] from the book and see how different searching algorithms perform / how frontier expands in each search algorithm for a simple problem named `romania_problem`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Have a look at `romania_locations`. It is a dictionary defined in search module. We will use these location values to draw the romania graph using **networkx**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'Arad': (91, 492), 'Bucharest': (400, 327), 'Craiova': (253, 288), 'Drobeta': (165, 299), 'Eforie': (562, 293), 'Fagaras': (305, 449), 'Giurgiu': (375, 270), 'Hirsova': (534, 350), 'Iasi': (473, 506), 'Lugoj': (165, 379), 'Mehadia': (168, 339), 'Neamt': (406, 537), 'Oradea': (131, 571), 'Pitesti': (320, 368), 'Rimnicu': (233, 410), 'Sibiu': (207, 457), 'Timisoara': (94, 410), 'Urziceni': (456, 350), 'Vaslui': (509, 444), 'Zerind': (108, 531)}\n"
+ ]
+ }
+ ],
+ "source": [
+ "romania_locations = romania_map.locations\n",
+ "print(romania_locations)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's get started by initializing an empty graph. We will add nodes, place the nodes in their location as shown in the book, add edges to the graph."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# node colors, node positions and node label positions\n",
+ "node_colors = {node: 'white' for node in romania_map.locations.keys()}\n",
+ "node_positions = romania_map.locations\n",
+ "node_label_pos = { k:[v[0],v[1]-10] for k,v in romania_map.locations.items() }\n",
+ "edge_weights = {(k, k2) : v2 for k, v in romania_map.graph_dict.items() for k2, v2 in v.items()}\n",
+ "\n",
+ "romania_graph_data = { 'graph_dict' : romania_map.graph_dict,\n",
+ " 'node_colors': node_colors,\n",
+ " 'node_positions': node_positions,\n",
+ " 'node_label_positions': node_label_pos,\n",
+ " 'edge_weights': edge_weights\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have completed building our graph based on romania_map and its locations. It's time to display it here in the notebook. This function `show_map(node_colors)` helps us do that. We will be calling this function later on to display the map at each and every interval step while searching, using variety of algorithms from the book."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can simply call the function with node_colors dictionary object to display it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABTsAAAPKCAYAAABbVI7QAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3Xdc1eX///HnQYaCg8SFmCPcouLG1BQX5Uj9OHKVfBLtY0qOzJELREXNcFbmKC0zS1Nz5RZHoqklOTBH7r1yJvP8/uALv06gggJvODzut9u5+Tnv93Vd7+f7KPThxXVdb5PZbDYLAAAAAAAAALI4G6MDAAAAAAAAAEBaoNgJAAAAAAAAwCpQ7AQAAAAAAABgFSh2AgAAAAAAALAKFDsBAAAAAAAAWAWKnQAAAAAAAACsAsVOAAAAAAAAAFaBYicAAAAAAAAAq0CxEwAAAAAAAIBVoNgJAAAAAAAAwCpQ7AQAAAAAAABgFSh2AgAAAAAAALAKFDsBAAAAAAAAWAWKnQAAAAAAAACsAsVOAAAAAAAAAFaBYicAAAAAAAAAq0CxEwAAAAAAAIBVoNgJAAAAAAAAwCpQ7AQAAAAAAABgFSh2AgAAAAAAALAKFDsBAAAAAAAAWAWKnQAAAAAAAACsAsVOAAAAAAAAAFaBYicAAAAAAAAAq0CxEwAAAAAAAIBVoNgJAAAAAAAAwCpQ7AQAAAAAAABgFSh2AgAAAAAAALAKFDsBAAAAAAAAWAWKnQAAAAAAAACsAsVOAAAAAAAAAFaBYicAAAAAAAAAq0CxEwAAAAAAAIBVoNgJAAAAAAAAwCpQ7AQAAAAAAABgFSh2AgAAAAAAALAKFDsBAAAAAAAAWAWKnQAAAAAAAACsAsVOAAAAAAAAAFaBYicAAAAAAAAAq0CxEwAAAAAAAIBVoNgJAAAAAAAAwCrYGh0AyGiRkZHavn27Hj16lHisRo0acnNzMzAVAAAAAAAAnpfJbDabjQ4BZISzZ89q3759cnBwUOPGjeXk5CRJMpvN2rNnjy5duqTChQurXr16MplMBqcFAAAAAABAalHsRLawefNm5cqVSy+//PITC5lXrlzR+vXr1aVLFzk4OGRgQgAAAAAAADwvip2wehs3btRLL72k0qVLp6h9dHS0vv76a7311luytWWnBwAAAAAAgKyCYiesWnh4uMxmszw9PVPV7++//9aaNWvUsWPHdEoGAAAAAACAtMbT2GHVTpw4kepCpyTlypVLefPm1b1799IhFQAAAAAAANIDxU5YrevXr6tgwYLP3L9x48baunVrGiYCAAAAAABAeqLYCav1888/q0GDBs/c387OTrGxsWmYCAAAAAAAAOmJYiesVo4cOWRj83z/xO3s7NIoDQAAAAAAANIbxU5YrbR49hbP7wIAAAAAAMg6KHbCaplMpkwxBgAAAAAAADIGxU5YLVtbWz18+PC5xoiKikqjNAAAAAAAAEhvFDthtRo3bqwtW7Y8c//bt2/L2dk5DRMBAAAAAAAgPVHshNVycHBQZGTkM++7uX37djVq1ChtQwEAAAAAACDdUOyEVatXr55++umnVPc7e/ascufOrRw5cqRDKgAAAAAAAKQHip2waq6uripevLi2bt2a4j4XLlzQgQMH1LRp03RMBgAAAAAAgLRmMj/rGl8gCzl+/Lj27NmjJk2ayM3NLdk20dHRWrhwoV544QW1b98+gxMCAAAAAADgedkaHQDICGXLltWCBQu0fv16tW/fXs7OzipSpIjs7e1169YtXbhwQba2ttqxY4dcXFwodgIAAAAAAGRBzOxEtnDjxg2VK1dOv/76q0qUKKG7d+/q+vXrioqK0gsvvKDChQvLZDLp2rVrqlSpkkJDQ1WpUiWjYwMAAAAAACAVKHYiWxg6dKju3bunTz/99KltZ8yYoR9//FGbN2+WyWTKgHQAAAAAAABICxQ7YfWuXr2qihUrKjw8XMWKFXtq+5iYGFWrVk1jxoxRhw4dMiAhAAAAAAAA0gLFTli9QYMGKTY2VtOnT09xn9DQUPXo0UMRERFydHRMx3QAAAAAAABIKxQ7YdUuXbokDw8PHTlyRK6urqnq+8Ybb6hcuXIaO3ZsOqUDAAAAAABAWqLYCavm7+8ve3t7ffzxx6nue/78eXl6emrfvn166aWX0iEdAAAAAAAA0hLFTlit8+fPq2rVqjp27JgKFSr0TGOMHz9e+/bt08qVK9M4HQAAAAAAANIaxU5YrT59+ihfvnyaOHHiM4/x6NEjeXh46JNPPpGPj08apgMAAAAAAEBao9gJq3TmzBnVqFFDf/zxhwoUKPBcY61evVqDBw/WoUOHZG9vn0YJAQAAAAAAkNYodgIAAAAAAACwCjZGBwAAAAAAAACAtECxEwAAAAAAAIBVoNgJAAAAAAAAwCpQ7AQAAAAAAABgFSh2Av+ybNkymUwmo2MAAAAAAAAglSh2ItO7ePGievfurWLFisne3l5ubm7q1auXLly4YHQ0AAAAAAAAZCIUO5GpnT59WjVr1tThw4e1cOFCnTx5UosWLdKRI0dUq1YtnTlzJtl+UVFRGRsUAAAAAAAAhqPYiUytb9++srGx0ebNm9WkSRMVL15c3t7e2rx5s2xsbNS3b19JUqNGjdSnTx8NHjxYBQsWVL169SRJISEhqlKlipycnOTm5iY/Pz/99ddfFtf46quvVKJECTk6OqpVq1a6evVqkhyrV69WjRo1lDNnTpUqVUojRoywKKguWrRItWrVUp48eVSoUCF17NhRFy9eTMdPBgAAAAAAAP9GsROZ1q1bt7R+/Xr17dtXjo6OFuccHR317rvv6qefftLt27clxRcczWazdu7cqa+++kqSZGNjo2nTpunIkSNavHixfvnlF/n7+yeOs3fvXvn6+qp37946ePCgWrdurdGjR1tca8OGDerWrZv69eunI0eO6IsvvtCyZcv04YcfJraJiopSYGCgwsPDtWbNGt24cUNdunRJr48GAAAAAAAAyTCZzWaz0SGA5Ozdu1deXl5avny52rVrl+T8ihUr9J///Ed79+7VkCFDdOvWLf3+++9PHHP9+vVq06aN/v77b9nY2Khr1666fv26Nm3alNjGz89P8+fPV8KXxiuvvKJmzZpp1KhRiW1Wrlyp7t276969e8k+zOjYsWOqUKGCzp8/r2LFij3rRwAAAAAAAIBUYGYnMr3HPRk9oRiZcL5GjRpJ2mzdulXNmjVTsWLFlCdPHv3nP/9RVFSUrly5IkmKiIhQ3bp1Lfr8+/2BAwc0fvx45c6dO/HVtWtXPXjwIHGcX3/9VW3atFGJEiWUJ08e1axZU5J07ty557hzAAAAAAAApAbFTmRaZcqUkclk0pEjR5I9HxERIZPJJHd3d0mSk5OTxfmzZ8+qZcuWqlChgpYuXaoDBw7oiy++kPT/H2CUkonNcXFxGjNmjA4ePJj4+v3333XixAkVLFhQDx48kI+PjxwdHfX1119r3759Wr9+vcV1AAAAAAAAkP5sjQ4APE7+/Pnl4+OjTz/9VAMHDrTYt/Phw4f65JNP9Nprryl//vzJ9t+/f7+ioqI0depU5ciRQ5K0Zs0aizYVK1bUnj17LI79+3316tV17NgxlS5dOtnrhIeH68aNG5owYYJKlSolSVq+fHnqbhYAAAAAAADPjZmdyNRmzZqlmJgYNW3aVFu3btX58+cVGhqqZs2ayWw2a9asWY/tW6ZMGcXFxWnatGk6ffq0vv32W02bNs2izXvvvafNmzcrODhYJ06c0Ny5c7VixQqLNqNHj9bixYs1evRoHT58WMeOHdOyZcs0ZMgQSVLx4sXl4OCgWbNm6c8//9TatWst9vcEAAAAAABAxqDYiUzN3d1d+/fvV6VKlfTmm2/qpZdeUteuXVWhQgXt27cvcSZlcqpUqaLp06crJCREFStW1Lx58zRlyhSLNl5eXpo/f74+++wzValSRcuXL1dAQIBFGx8fH61du1bbtm1T7dq1Vbt2bU2cOFHFixeXJBUsWFALFy7UypUrVbFiRQUGBiokJCTNPwsAAAAAAAA8GU9jBwAAAAAAAGAVmNkJAAAAAAAAwCpQ7AQAAAAAAABgFSh2AgAAAAAAALAKFDsBAAAAAAAAWAWKnQAAAAAAAACsAsVOZAlms1k1atTQ8uXLjY6SImazWc2aNdO0adOMjgIAAAAAAJBtUOxElrBq1SrFxcWpbdu2RkdJEZPJpBkzZmjcuHG6evWq0XEAAAAAAACyBZPZbDYbHQJ4kri4OFWrVk1BQUF6/fXXjY6TKu+//75u376tL774wugoAAAAAAAAVo+Zncj0li9fLnt7e7Vu3droKKk2ZswYrV+/Xnv37jU6CgAAAAAAgNWj2IlMzWw26/r16xo7dqxMJpPRcVItb968Cg4Olr+/v+Li4oyOAwAAAAAAYNVYxo5ML+GfaFYsdkrxy/Dr1asnPz8/9ezZ0+g4AAAAAAAAVotiJ5ABDhw4oJYtW+rYsWNydnY2Og4AAAAAAIBVotgJZJDevXsrV65cmj59utFRAAAAAAAArBLFTiCDXL9+XRUrVtS2bdvk4eFhdBwAAAAAAACrwwOKgAxSsGBBjRkzRv7+/uJ3DAAAAAAAAGmPYieQgf73v//p5s2bWrp0qdFRAAAAAAAArA7L2IEMtn37dr355puKiIiQk5OT0XEAAAAAAACsBjM7Yahbt24ZHSHDNWzYUPXq1VNwcLDRUQAAAAAAAKwKMzthmHnz5mnXrl3y9fWVp6ennJ2dE8+ZzWaZTKbHvs/qLly4oKpVq+qXX36Ru7u70XEAAAAAAACsAsVOGCI2Nlb58+dXVFSUnJ2d1a5dO3Xu3FlVq1ZVvnz5Ets9ePBAdnZ2sre3NzBt+ggODlZYWJhWrVpldBQAAAAAAACrwDJ2GGLZsmWqVKmSfvvtNwUGBmrdunXq2LGjRo0apZ07d+revXuSpGnTplntcu9BgwYpIiJCP/30k9FRAAAAAAAArAIzO2GItWvXasuWLRoyZIiKFCkiSZo1a5YmTZqkmJgYdenSRbVr11bXrl21adMmNWnSxODE6WPt2rUaOHCgDh06JAcHB6PjAAAAAAAAZGkUO5Hh7t+/r9y5c+vPP//USy+9pJiYGNna2iaenz59uqZOnapz586pQYMG2r59u4Fp01+rVq3UoEEDDR061OgoAAAAAAAAWRrFTmSoR48eqVWrVpo4caJq1qxp8eChfxY9jx07pooVK2rPnj2qXbu2kZHT3cmTJ+Xl5aXw8HC5ubkZHQcAAAAAACDLYs9OZKiRI0dq69atGj58uO7evWvxhPWEQmdsbKwmTJigMmXKWH2hU5JKly6t3r17a8iQIUZHAQAAAAAAyNIodiLD3LlzR9OnT9e8efN0+fJlde3aVZcvX5YUX+BMYDab1aBBAy1dutSoqBnuww8/1I4dO7Rz506jowAAAAAAAGRZLGNHhvHz89Off/6prVu3atGiRRowYIC6dOmimTNnJmkbGxurHDlyGJDSOEuWLNHEiRN14MCBbHfvAAAAAAAAaYFiJzLEzZs3VaRIEe3evVu1atWSFF/c8/f315tvvqnx48crV65ciouLk41N9pxwbDab5e3trU6dOundd981Og4AAAAAAECWQ7ETGaJPnz76448/tHXrVsXGxsrGxkYxMTGaMGGCpk2bpo8++kh+fn5GxzTc77//rqZNm+ro0aMqUKCA0XEAAAAAAACyFIqdyBBRUVG6d++eXFxckpwbMWKEZs6cqSlTpqh3794GpMtc/P39FR0drdmzZxsdBQAAAAAAIEuh2AnDJCxZv3nzpvz9/bVhwwZt2bJFnp6eRkcz1O3bt1WhQgWtW7dO1atXNzoOAAAAAABAlpE9N0dEppCwN6eLi4vmz58vT09POTo6GpzKeC+88IKCgoLk7+8vfhcBAAAAAACQcszshOESZnjevXtXefPmNTpOphAbGysvLy+99957evPNN42OAwAAAAAAkCVQ7ESGSng4kSSZTCaD02Rue/fu1X/+8x9FRERQBAYAAAAAAEgBlrEjQw0ePFiLFi2i0JkCderUUfPmzRUUFGR0FAAAAAAAgCyBmZ3IMJcuXZKHh4eOHj2qIkWKGB0nS7h69ao8PDy0c+dOlS9f3ug4AAAAAAAAmRrFTmQYf39/OTg4aMqUKUZHyVKmTp2q9evXa/369cyIBQAAAAAAeAKKncgQ58+fl6enpyIiIlSoUCGj42Qp0dHR8vT01Pjx49W2bVuj4wAAAABAhrt7966uXbum6Ohoo6MAWZqdnZ0KFSpk1c8GodiJDPG///1Pzs7OmjhxotFRsqQtW7aoV69eOnLkiHLlymV0HAAAAADIMHfv3tXVq1fl5uamXLlyseINeEZms1l///23Ll68qMKFC1ttwZNiJ9LdmTNnVKNGDR0/flwuLi5Gx8myOnTooCpVqmj06NFGRwEAAACADHPy5EkVLVpUjo6ORkcBrMLDhw916dIllS5d2ugo6YKnsSPdjRs3Tu+++y6Fzuf08ccfa8aMGTp79qzRUQAAAAAgw0RHR7PCDUhDuXLlsuotISh2Il2dOnVKK1eu1KBBg4yOkuWVKFFC7733nt5//32jowAAAABAhmLpOpB2rP3riWIn0tXYsWPl7++vF154wegoVuGDDz7Qr7/+qi1bthgdBQAAAAAAINOxNToArNcff/yhdevW6eTJk0ZHsRq5cuVSSEiI/P39FR4eLjs7O6MjAQAAAAAAZBrM7ES6GTt2rAYOHKh8+fIZHcWqtGnTRi+++KJmzZpldBQAAAAAwDPw9fVVsWLFkj0XGhoqk8mkzZs3Z3CqtJNwD6GhoUZHSeTr66uSJUsaHQMZgGIn0sXRo0e1efNm+fv7Gx3F6phMJk2fPl0TJkzQ1atXjY4DAAAAAACQaVDsRLoICAjQ+++/rzx58hgdxSqVL19evr6+GjZsmNFRAAAAAABIN7GxsYqJiTE6BrIQip1Ic7///rt27typvn37Gh3Fqo0aNUobN27Unj17jI4CAAAAAEgnJUuWVPfu3bVkyRJVqFBBTk5Oqlmzpnbt2pXiMebOnauqVasqZ86cKlCggHr27Klbt24lnp83b55MJpNWrlyZeCw2NlavvPKK3N3dde/ePUnxE5tMJpMOHTokb29vOTo6ytXVVaNHj1ZcXNwTM5jNZk2dOlXlypWTvb29XF1d1a9fP929e9einclk0ogRIzRx4kSVKlVK9vb2OnTokCTpxo0b6tOnj9zc3OTg4KDy5ctrzpw5Sa61ZcsWVa9eXTlz5pS7u7s+//zzFH9WyPp4QBHSXEBAgIYMGSInJyejo1i1vHnzauLEifL399fevXtlY8PvLgAAAADAGu3cuVN//PGHgoKClDNnTo0aNUqtWrXSmTNn5Ozs/MS+w4YN08cff6z33ntPH330kS5evKiRI0fq8OHD2r17t3LkyCE/Pz9t3LhRfn5+qlWrltzc3BQUFKSwsDDt2rUryarNtm3b6u2339bw4cO1YcMGBQUFycbGRgEBAY/NMWLECAUHB6tv375q3bq1jh49qlGjRik8PFzbt2+3+Jl2wYIFeumllzRlyhQ5OTmpaNGiunv3rurVq6e///5bAQEBKlWqlDZs2KA+ffooMjIycRu9iIgItWjRQjVr1tSSJUsUGRmpgIAA3b9/Xzly5Hj2vwRkGRQ7kaZ+/fVX7d27V998843RUbKF7t27a/bs2friiy/k5+dndBwAAAAAQDq4e/euDh48qBdeeEGSVKRIEdWqVUvr1q1T165dH9vvzJkz+uijjzRmzBiNHj068XjZsmVVv359rV69Wm3btpUkzZkzR1WrVlX37t0VEBCgcePGKSgoSHXq1Ekybq9evRK3VWvevLnu3r2rjz/+WAMGDEi2+Hrr1i2FhISoR48eiQ/b9fHxUcGCBfXmm29qzZo1ev311xPbm81mbdy4Ubly5Uo8FhQUpLNnz+rQoUMqU6aMJKlp06b666+/FBgYqD59+sjW1lbjxo1Tnjx5tHHjxsRJWC+//LLc3d1VtGjRlH3gyNKYCoY0NWbMGA0bNsziGxLSj8lk0syZMzVy5Ejdvn3b6DgAAAAAgHRQt27dxEKnJFWuXFmSdO7cOUnxxcGYmJjEV2xsrCRp06ZNiouLU7du3SzO16lTR3nz5tWOHTsSx3R2dtbixYu1c+dO+fj4qEGDBho6dGiyeTp16mTxvnPnzrp//74OHz6cbPs9e/YoMjJS3bt3T9LP1tZW27dvtzj+6quvJqkrrF+/XnXq1FGpUqUs7sXHx0c3b97U0aNHJUlhYWFq0aKFxWrTF198UfXq1Us2G6wPxU6kmV9++UUHDx5Ur169jI6SrVSvXl1t27bVmDFjjI4CAAAAAEgBW1vbxILkvyUct7X9/4tx8+fPb9HGwcFBkvTo0SNJ0sKFC2VnZ5f4cnd3lyRdu3ZNklS6dGmL83Z2drp7965u3rxpMa6Xl5fKlSunyMhI9e/f/7HbpRUuXDjZ9xcvXky2fcL+oK6urhbHbW1t5eLiYrF/aHLtEu5lx44dSe6jY8eOkpR4L5cvX06SL7nMsF4sY0eaGTNmjEaMGKGcOXMaHSXbGT9+vCpUqCA/Pz9VqVLF6DgAAABIQ7GxsTpw4ICuX78us9msF154QbVq1ZK9vb3R0QA8o0KFCunGjRuKiopK8rV86dIlSakrzrVu3Vr79u1LfJ9QDHVxcZEkbdy40WJmaIKE8wkCAwN14sQJValSRQMHDpS3t7fy5cuXpN/Vq1f10ksvWbyXJDc3t2TzJRRrr1y5okqVKiUej4mJ0c2bN5PkMJlMyWYtVKiQpk+fnuw1ypUrJym+UJqQ59+ZkT1Q7ESa2L17tyIiIvTjjz8aHSVbcnFxUUBAgPz9/RUaGprsfxgAAACQtVy/fl07d+6UyWRSnTp1VL16dZlMJt2+fVvr169XVFSU6tSpoxdffNHoqABSydvbW8HBwVq1apU6dOhgce6HH36Qq6trYvEuJVxcXJIUDCWpWbNmsrGx0blz59SsWbMnjrFz505NmDBBwcHBeuONN1S1alX16dNHixcvTtL2+++/T9yzU5KWLFmi3Llzy8PDI9mxvby85ODgoCVLlqhJkyaJx7/77jvFxMSoYcOGT73HV199VTNnzlTx4sVVqFChx7arW7eu1q1bpwcPHiQuZT9//rx+/vln9uzMJih2Ik2MHj1aI0eO5LfLBnrnnXc0Z84cfffdd+rcubPRcQAAAPActmzZIrPZrLZt2yZZRlqgQAG9/vrrMpvN2rNnjw4cOJD4gBEAWUPTpk3VrFkz+fr66tixY6pTp47u3bunJUuW6Mcff9SXX3752CXkqeHu7q6hQ4eqX79++uOPP9SwYUPlzJlT58+f16ZNm+Tn5ydvb2/dvn1b3bp1k7e3twYPHiyTyaQ5c+aoU6dO8vHxUY8ePSzGnTt3ruLi4lSrVi1t2LBB8+bNU0BAwGOfDJ8/f34NGjRIwcHBcnJyUosWLRQREaGRI0eqfv36atmy5VPvZeDAgfruu+/UoEEDDRw4UOXKldODBw907Ngx7dy5M3Hy1ciRI7V06VI1b95cH3zwgaKiojRmzBiWsWcjFDvx3LZv367Tp08n+eaHjJUjRw7NnDlTXbt2VatWrZQ7d26jIwEAAOAZrF+/XqVLl1bp0qWf2M5kMqlu3bq6cuWKli5dmrhvHYDMz2QyadWqVRo3bpy++uorBQUFyd7eXp6enlq5cqXatGmTZteaMGGCKlSooE8++USffPKJTCaTXnzxRTVp0iTxqea9e/fW33//ra+++ipxpWDHjh3Vs2dP9evXT/Xq1bP4nvTjjz/K399fQUFBypcvn0aOHKlRo0Y9Mcf48eNVsGBBzZ49W59++qlcXFz01ltvKTg4OEWF3Xz58mn37t0aO3asJk2apIsXL8rZ2VnlypVT+/btE9tVqFBB69at0wcffKA33nhDbm5uGjp0qMLCwhQaGvoMnyCyGpPZbDYbHQJZl9lsVqNGjfT2229T7MwkunXrphIlSmjChAlGRwEAAEAq7d+/Xzlz5nzsUtDHOXfunE6ePKnGjRunUzLAOBEREapQoYLRMSApICBAgYGBio6OtniAErIea/664mnseC7btm3T5cuX1a1bN6Oj4P9MnjxZc+bM0cmTJ42OAgAAgFQ6c+ZMqgudklS8eHHdvn1bzGUBAGR3FDvxzMxms0aNGqUxY8bwG51MxM3NTR988IEGDBhgdBQAAACkwqlTp+Tu7v7M/b28vLRnz540TAQAQNZDsRPPbOPGjbp9+zYPw8mEBgwYoOPHj2vt2rVGRwEAAEAKhYeHq1q1as/c383NTZcuXUrDRABgKSAgQGazmQlPyNQoduKZmM1mjR49WgEBAcqRI4fRcfAvDg4Omj59ugYMGKDIyEij4wAAACAF7OzsnnsMe3v7NEgCAEDWRbETz2TdunV6+PChOnToYHQUPMZrr72mChUqKCQkxOgoAAAASIG02G+TPTsBANkdxU6kWsKszsDAQNnY8E8oM5s6daqmTJmiCxcuGB0FAAAAT2EymTLFGAAAZGVUqpBqP/74o8xms9q1a2d0FDyFu7u7+vTpow8++MDoKAAAAHiK6Ojo556ZGRUVlUZpAADImih2IlXi4uI0ZswYBQYG8lvjLGL48OH6+eeftX37dqOjAAAA4Alq1Kih/fv3P3P/M2fOqFixYmmYCACArIdiJ1Jl+fLlsre3V6tWrYyOghRycnLSlClT5O/vr5iYGKPjAAAA4DFKlCihs2fPPnP/Tz/9VJMnT1ZEREQapgKsjNksXd8tHZsmHQqK//P67vjjAKwCxU6kWGxsrMaMGaOxY8cyqzOL6dixowoUKKDZs2cbHQUAAABP4O7uroMHD6a6359//qmmTZuqTp06atiwoXx9fXX69Ol0SAhkUXHR0onZ0ip3aVtz6eBQ6dCY+D+3NY8/fmJ2fDsAWRrFTqTY999/r3z58unVV181OgpSyWQyacaMGQoMDNT169eNjgMAAIDHqFatmq5fv65jx46luM+FCxcUHh6u5s2ba8iQITpx4oRKlCihmjV6rEGuAAAgAElEQVRrql+/frp8+XI6JgaygOj70pbG0q/vSw9OSzEPpLgoSeb4P2MexB//9X1pS5P49ulswYIFMplMyb42b96c7tf/p+XLl2vatGlJjm/evFkmk0m7du3K0DzA86LYiRSJiYlRQEAAszqzMA8PD3Xt2lUjRowwOgoAAACeoFmzZrp69arWrVv3xG2I4uLiFBoaqvDwcIuHh+bLl0+BgYE6duyYHBwcVKlSJQ0dOlQ3b97MiPhA5hIXLYW+Jt3cJ8U+fHLb2IfSzV+k0BYZNsNz6dKlCgsLs3jVrl07Q66d4HHFztq1ayssLExVq1bN0DzA87I1OgAyl0uXLum3335TbGysTCaTihcvrqpVq+rbb79V4cKF1aRJE6Mj4jkEBgaqfPny6t27t2rWrGl0HAAAADxGw4YNdefOHa1evVqxsbHy9PRU4cKFZWNjoxs3bujAgQMym81q0KCBChUqlOwYBQsW1Mcff6yBAwcqKChI5cqVU//+/TVgwADlyZMng+8IMMip+dKtX6W4yJS1j4uUbh2QTn0hlXknfbNJ8vT0VOnSpVPUNjIyUg4ODumc6P/LmzevvLy80mQss9ms6Oho2dvbp8l4wJMwsxMym83atWuXfvjhB509e1Y+Pj56/fXX1apVK+XOnVtLly7V7Nmz9eGHHzKrM4tzdnbW+PHj5e/vr7i4OKPjAAAA4Any5cundu3aqX379nr06JH279+vsLAw3bp1S23atFH79u0fW+j8p2LFiunzzz/Xnj179Mcff6h06dKaOnWqHj16lAF3ARjIbJaOTn76jM5/i30Y38/AhxYlLCFfuXKl3n77bRUoUEBubm6J59etW6c6deooV65ccnZ2Vrt27XTixAmLMerXr69GjRpp48aNqlatmhwdHeXh4aFVq1Yltunevbu++eYbnT17NnEZfULx9XHL2JctW6Y6derI0dFRzs7O6tSpky5cuGDRplixYvL19dXcuXNVrlw52dvba8OGDWn9MQHJotiZzd27d08LFixQ6dKl1b59e9WtW1e2tvETfk0mk9zd3dWxY0dt2bJF9+/f19GjRw1OjOf13//+V7Gxsfr666+NjgIAAIAUMJlM8vDwkLe3t5o2bapq1aopR44cqR6ndOnSWrRokTZv3qzt27erTJkymjt3rqKjeSALrNSNMCny2rP1jbwa3z+dxcbGKiYmJvEVGxtrcb5v376ytbXVN998o/nz50uS1qxZo1atWumFF17Q999/r08++UTh4eGqX7++rly5YtH/+PHjGjRokAYPHqzly5ercOHCat++feIDzAIDA+Xj46MiRYokLqNftmzZY/POmjVLnTp1UuXKlfXDDz9o9uzZCg8PV6NGjXT/vuVep5s2bUp8dsT69etVqVKltPjIgKdiGXs29uDBAy1fvlw9evSQjc2T6945c+ZUhw4dFBoaqri4OHl4eGRQSqQ1GxsbzZw5U+3atVPbtm2VL18+oyMBAAAgA1WuXFkrV67U3r17NWLECE2aNEljx45V586dn/pzAZBpHBgg3T745DYPL0gxqZzVmSDmoRT2luRY7PFtXvCUaiTd6zI1ypcvb/G+Xr16FjMpX375Zc2ZM8eizciRI1W2bFmtXbs28RcfderUUfny5RUSEqLJkycntr1x44Z27dqll156SZJUtWpVFS1aVEuXLtWQIUPk7u6uAgUKyMHB4alL1u/evavhw4fLz8/PIlOtWrVUvnx5LViwQP369Us8fufOHf32228pmoEOpCX+S5aNrVixQt27d0/V/6Fp1KiRTp06pb/++isdkyG91alTR6+++qrGjh1rdBQAAAAYpE6dOtq8ebPmzJmjGTNmyNPTU6tWrZLZwKW7QJoyx0p61n/P5v/rn75WrFihffv2Jb4SZm8m+OfDx6T4gmN4eLg6d+5sMcO7dOnS8vLy0vbt2y3aly9fPrHQKUmurq4qUKCAzp07l+qsP//8s+7fv69u3bpZzEYtUaKEypQpox07dli0f/nllyl0whDM7MymTpw4ocqVKz/T8pdWrVppzZo1atOmTTokQ0YJDg6Wh4eH/Pz8VKFCBaPjAAAAwCCNGzdWWFiY1qxZoxEjRmjChAmaMGGCGjdubHQ04PFSMqPy2DTp4FApLir149s4SOUGSOX7p75vKnh4eDzxAUWurq4W72/dupXscUkqUqSIwsPDLY7lz58/STsHB4dn2rP32rX4LQEaNWqUoqzJZQQyAsXObOr3339X+/btn6lvjhw5FBsbK7PZzAOLsrDChQtrxIgReu+997Rx40b+LgEAALIxk8mk1q1bq2XLlvruu+/0zjvvqESJEho/frzq1KljdDzg2bjUlmzsnrHYaSu51Er7TKn075/TEoqX/96bM+GYi4tLumVJGPvrr79OsvxekvLkyWPxnp8xYRSWsWdD0dHRsre3f64x6tWrp927d6dRIhilb9++unTpklasWGF0FAAAAGQCNjY26tKli44ePao33nhDHTp0UJs2bXTo0CGjowGpV6Cu5PCMy6hzFo7vn8nkzZtXnp6e+v777xUXF5d4/M8//9SePXvUsGHDVI/p4OCgv//++6nt6tevLycnJ506dUo1a9ZM8ipXrlyqrw2kB4qd2dD169efezp54cKFE6fPI+uys7PTzJkzNWjQID18+IwbdwMAAMDq2NnZqVevXjpx4oS8vb3VrFkzdevWTSdPnjQ6GpByJpNUcYiUwzF1/XI4ShWGxPfPhIKCghQREaHWrVtrzZo1Wrx4sZo3by4XFxcNHDgw1eNVrFhR165d05w5c7Rv3z4dPnw42XbOzs6aNGmSxo0bpz59+mjVqlUKDQ3VN998Iz8/P3333XfPe2tAmqDYmQ3dv39fTk5Ozz0OG5dbh8aNG6tWrVoWT+wDAAAAJClnzpwaMGCATpw4oQoVKsjLy0vvvPOOLly4YHQ0IGXce0r5q8fvwZkSNg5S/hqS+9vpm+s5tGrVSqtXr9aNGzfUoUMH9enTR5UrV9auXbtUpEiRVI/Xu3dvderUSUOHDlXt2rXVtm3bx7bt27evVqxYoYiICHXr1k0tWrRQQECAzGazqlat+jy3BaQZk5mKVbZz5coVnTt3TrVr136ucVavXq3WrVunUSoY6dy5c6pWrZoOHDigkiVLGh0HAAAAmdStW7c0efJkzZ07Vz169NDw4cNVsGBBo2PBykVERDzfQ1Wj70uhLaRbB6TYJ6xoy+EYX+hstE6yy/3s1wOygOf+usrEmNmZDRUoUECXL19+rjHOnDmjokWLplEiGK148eIaOHCgBg0aZHQUAAAAZGL58+fXxIkTdfjwYUVFRal8+fIaPXq07ty5Y3Q04PHscktNtkjVQySnlyRbp/+b6WmK/9PWScr9Uvz5JlsodAJZHMXObMjW1lbR0dHPtQz9wIEDql69ehqmgtEGDx6s8PBwbdq0yegoAAAAyORcXV01a9YsHThwQOfPn1eZMmU0efJk9oFH5mVjJ5V5R3r9pOS9UfKcJFUZG/+n9yap9cn48zZ2RicF8JwodmZTXl5e2rNnzzP1jYyMlL29vUyZdLNmPJucOXNq6tSpeu+99xQVFWV0HAAAAGQBJUuW1Jdffqnt27dr3759Kl26tD755BP+/yQyL5NJKviyVL6/5DEy/s+CdTPtw4gApB7FzmyqWLFiOn36tB49epTqvitXrlSTJk3SIRWM1rp1a5UsWVIzZ840OgoAAACykAoVKmjp0qVavXq11qxZo3LlymnhwoWKjY01OhoAIJuh2JmNdezYUYsXL1ZkZGSK+6xevVpeXl5ydHRMx2Qwislk0vTp0xUcHPzc+7oCAAAg+6lRo4Z++uknLVy4UPPmzVPlypX1ww8/PNcWWgAApAbFzmzMzs5Ob775ppYtW6bff//9iW2vXr2qRYsWydPTUyVKlMighDBC2bJl1bNnTw0bNszoKAAAAFmWr6+vTCaTxo0bZ3E8NDRUJpNJN27cMChZvAULFih37vR7CMsrr7yiHTt2KCQkROPHj1etWrW0YcMGip4AgHRHsTObs7OzU7du3RQbG6sWLVpo1apVOn36tG7duqULFy5o586d+uGHH3T8+HF169ZNL774otGRkQFGjhypLVu2aPfu3UZHAQAAyLJy5sypyZMn6/r160ZHMYTJZNKrr76q/fv3a9iwYRowYIAaNWqkXbt2GR0NAGDFKHZCkvTbb7/Jzs5OTZs21f3793XkyBFdu3ZN5cuXV/v27dWgQQMeSJSN5MmTR5MmTZK/vz/7LAEAADwjb29vlSxZUkFBQY9tc/ToUbVs2VJ58uRRoUKF1KVLF125ciXx/L59+9S8eXMVKFBAefPmVf369RUWFmYxhslk0meffaY2bdrI0dFRZcuW1bZt23ThwgX5+PjIyclJnp6e+vXXXyXFzy7973//qwcPHshkMslkMikgICBdPgNJsrGxUYcOHXTo0CH997//Vffu3dWiRYvEPAAApCWKnZAkzZ8/Xz179pSjo6MqV66sBg0aqHr16ipYsKDR0WCQrl27ytHRUfPnzzc6CgAAQJZkY2OjiRMnavbs2Tp16lSS85cvX9Yrr7wiDw8P/fLLL9q8ebPu37+v119/XXFxcZKke/fu6c0339TOnTv1yy+/yNPTUy1atEiyDH7cuHHq3LmzwsPDVbNmTXXp0kU9e/bUu+++q99++01FixaVr6+vJOnll1/WtGnT5OjoqMuXL+vy5csaPHhwun8etra28vX11R9//KGWLVuqVatW6tSpk44dO5bu1wYSmc3S7t3StGlSUFD8n7t3xx8HYBVMZjZNyfYiIiLUuHFjnTt3TnZ2dkbHQSZy8OBB+fj4KCIiQvnz5zc6DgAAQJbh6+urGzduaM2aNfL29lbhwoW1ZMkShYaGytvbW9evX9eMGTP0888/a8uWLYn9bt++rfz582vv3r2qXbt2knHNZrOKFi2qjz76SN27d5cUP7Nz2LBhCg4OliQdPnxYlStX1scff6xBgwZJksV1CxQooAULFqhfv366f/9+BnwayXvw4IFmzZqlKVOmqHXr1hozZgzPB0CyIiIiVKFChecbJDpamj9fmjxZunYt/n10tGRnF/8qVEgaMkTq2TP+PWDl0uTrKpNiZif05Zdf6q233qLQiSQ8PT3Vvn17jR492ugoAAAAWdbkyZO1dOlS7d+/3+L4gQMHtGPHDuXOnTvxlbBHfsJM0GvXrumdd95R2bJllS9fPuXJk0fXrl3TuXPnLMaqUqVK4v8uXLiwJKly5cpJjl27di3tb/AZOTk5aejQoTpx4oTc3NxUvXp1+fv7WyzjB9LE/ftS48bS++9Lp09LDx5IUVHxszmjouLfnz4df75Jk/j2GSAsLEydOnVS0aJFZW9vLxcXFzVr1kwLFy7MstuJrVy5UiEhIUmOJzycLTQ0NE2uk7AFR3KvlStXpsk1/i2t7yG9xgTFzmwvOjpaX331ld5++22joyCTCgoK0tKlSxUeHm50FAAAgCypVq1aat++vYYOHWpxPC4uTi1bttTBgwctXidOnFCrVq0kST169NC+ffs0depU7d69WwcPHlSxYsUUFRVlMdY/Jy4k7LWf3LGE5fGZibOzs4KCghQRESE7OztVqlRJw4cP161bt4yOBmsQHS299pq0b5/08OGT2z58KP3yi9SiRXy/dDRt2jTVq1dPt27d0qRJk7R582Z98cUXKlu2rPr06aM1a9ak6/XTy+OKnenB19dXYWFhSV4NGzbMkOunherVqyssLEzVq1c3OopVsTU6AIy1du1alSlTRuXKlTM6CjIpFxcXBQYGyt/fX9u3b+dBVQAAAM9gwoQJqlixotavX594rHr16vr+++9VokSJx66y2rVrl2bMmKGWLVtKkq5evarLly8/dx57e/tMN3OsUKFCCgkJ0cCBAxUUFKSyZctq4MCB6t+/v3Lnzm10PGRV8+dLv/4qRUamrH1kpHTggPTFF9I776RLpB07dmjQoEHq16+fZsyYYXGuTZs2GjRokB48ePDc14mOjpatrW2yP8NFRkbKwcHhua9hJDc3N3l5eRkd45nExsbKbDYrb968WfYeMjNmdmZz8+fPZ1YnnqpXr166f/++lixZYnQUAACALKl06dLq3bu3pk+fnnisb9++unPnjt544w3t3btXf/75pzZv3qzevXvr3r17kqSyZctq0aJFOnr0qPbt26fOnTvL3t7+ufOULFlSjx490qZNm3Tjxg09fNqMtwz04osvas6cOQoLC9ORI0dUunRpTZ8+XY8ePTI6GrIaszl+j87U/vt++DC+Xzo94mTixInKnz+/Jk+enOx5d3f3xK0pAgICki1W+vr6qmTJkonvz5w5I5PJpE8//VRDhgxR0aJF5eDgoL/++ksLFiyQyWTSjh071LFjRzk7O6tOnTqJfbdv364mTZooT548cnJyko+Pjw4fPmxxvUaNGql+/fravHmzqlevLkdHR3l4eFgsGff19dXChQt18eLFxCXl/8z4T/369VPhwoUV/a8ZtPfv31eePHk0fPjwJ36GKTFv3rwky9pjY2P1yiuvyN3dPfH7bMJnfOjQIXl7e8vR0VGurq4aPXr0U2fDm81mTZ06VeXKlZO9vb1cXV3Vr18/3b1716KdyWTSiBEjNHHiRJUqVUr29vY6dOhQssvYU/JZJ/j2229Vvnx55cyZU5UrV9aqVavUqFEjNWrU6Nk/OCtAsTMbu3Tpknbt2qWOHTsaHQWZXI4cOTRz5kx98MEHhm5iDwAAkJWNHj1atrb/f3Fd0aJF9fPPP8vGxkavvvqqKlWqpL59+8rBwSFxxtUXX3yh+/fvq0aNGurcubPefvvtxxYPUuPll1/W//73P3Xp0kUFCxZ8bNHFSGXKlNHixYu1YcMGbdmyRWXLltW8efMUExNjdDRkFWFh8Q8jehZXr8b3T2OxsbEKDQ1V8+bNlTNnzjQff/z48Tp+/LjmzJmjFStWWFyjW7duKlWqlJYtW6aJEydKil/t2aRJE+XOnVuLFi3S4sWLde/ePTVo0EDnz5+3GPvUqVPq37+/Bg0apOXLl8vV1VUdOnTQyZMnJUmjRo1SixYtVLBgwcQl5StWrEg257vvvqtr164lOf/NN9/owYMH6tWr11Pv1Ww2KyYmJskrgZ+fnzp27Cg/Pz9dvHhRUvw2bWFhYVq8eLHy5MljMV7btm3VtGlTrVy5Ul27dlVQUJDGjh37xAwjRozQoEGD1KxZM61evVpDhgzRggUL1LJlyySF0gULFmjt2rWaMmWK1q5dq6JFiz523Kd91pK0adMmdevWTeXLl9cPP/ygwYMHa8CAATp+/PhTPzurZ0a2FRwcbPbz8zM6BrKQ7t27m4cNG2Z0DAAAAGRDYWFhZm9vb3OZMmXM3377rTk2NtboSMggR48eTXqwf3+zuWHDJ7/c3c1mk8lsjp+jmbqXyRTf/0nj9++f6nu5cuWKWVKKf64aM2aMObnSTY8ePcwlSpRIfH/69GmzJHO1atXMcXFxFm2//PJLsyTzgAEDkozj7u5ubty4scWxO3fumF1cXMz9/3F/DRs2NNva2pqPHz+eeOzq1atmGxsb8/jx4y1yubm5JbnOtm3bzJLM27Ztsxjz39euVq2a2cfHJ0n/f5P02Nf169cT292+fdtcvHhxc6NGjcyhoaHmHDlymCdMmGAxVsJnHBwcbHHcz8/PnDt3bvPt27eTvYebN2+aHRwczD169LDo9/XXX5slmX/88UeLvK6uruaHDx+m6HNJyWddt25dc6VKlSz+vg8cOGCWZG7YsOFTP8Nkv66sBDM7s7Fhw4Zp7ty5RsdAFjJ58mTNnTtXJ06cMDoKAAAAshkvLy9t3bpVn332maZOnapq1appzZo1MqfTUmNYgdjYZ1+KbjbH989i2rZt+9jnLLRr187i/YkTJ3Tq1Cl169bNYmako6Oj6tatqx07dli0L1OmjMqUKZP4vlChQipUqJDOnTv3TFnfffddbdu2LfHny3379um3337TOyncK/Xtt9/Wvn37krycnZ0T2zg7O2vx4sXauXOnfHx81KBBgyQPi0vQqVMni/edO3fW/fv3kyzpT7Bnzx5FRkaqe/fuSfrZ2tpq+/btFsdfffVV5cqVK0X39rTPOjY2Vvv371f79u0t/r6rV6+uUqVKpega1owHFAFIMVdXVw0dOlQDBgzQ2rVrjY4DAACAbKhJkybas2ePVq1apeHDh2v8+PGaMGGCvL29U9Q/Li5ONjbM+8nypk1LWZuhQ6WoqNSP7+AgDRgg9e+f+r5P4OLioly5cuns2bNpOm4CV1fXFJ+79n9L/Hv27KmePXsmaV+8eHGL9/nz50/SxsHB4Zn3023Xrp2KFCmizz//XFOmTNHs2bNVtGhRtW7dOkX9XV1dVbNmzae28/LyUrly5XT06FH179//sV//hQsXTvZ9whL4f7t161Zijn+ytbWVi4tL4vl/5k2pp33WN27cUHR0tAoVKpSk3b/vIzviOzyAVOnfv79OnTqlNWvWGB0FAAAA2ZTJZFKbNm108OBB9evXT35+furSpcsTZ3leuXJFU6dOla+vr0aPHp3kwSiwQrVrS3Z2z9bX1laqVStt8yi+ENaoUSNt2rRJkSl4QnzCnptR/yrY3rx5M9n2j5vVmdw5FxcXSVJwcHCyMyRXr1791HzPw87OTn5+flqwYIGuXbumJUuWqGfPnhZ7G6eFwMBAnThxQlWqVNHAgQN1586dZNtdvXo12fdubm7Jtk8oSF65csXieExMjG7evJn4+SZ40t9NahUoUEB2dnaJBet/+vd9ZEcUOwGkir29vaZPn64BAwbwREwAAAAYKkeOHOrWrZuOHTumkJCQx7aLi4vTu+++q2nTpqlIkSLaunWr3NzctHTpUkliKby1qltXSmbmW4oULhzfPx0MGzZMN2/e1AcffJDs+dOnT+v333+XJJUoUUKSLJZS//XXX9q9e/dz5yhXrpxKliypI0eOqGbNmkleCU+ETw0HBwf9/fffKW7/zjvv6M6dO+rYsaMiIyNT9GCi1Ni5c6cmTJig8ePHa/Xq1frrr7/Up0+fZNt+//33Fu+XLFmi3Llzy8PDI9n2Xl5ecnBw0JIlSyyOf/fdd4qJiVHDhg3T5iaSkSNHDtWsWVM//PCDxfevAwcO6PTp0+l23ayCZewAUs3Hx0ceHh4KCQnRhx9+aHQcAAAAZHN2dnZPXCJ66dIlHT16VCNHjkwspkyaNEmzZs1Sy5Yt5ejomFFRkZFMJmnIEOn996WHD1Pez9Exvl8azsT7p1deeUUhISEaNGiQIiIi5Ovrq+LFi+v27dvasmWL5s2bp8WLF6tKlSp67bXXlC9fPvXq1UuBgYGKjIzU5MmTlTt37ufOYTKZ9Mknn6hNmzaKiopSp06dVKBAAV29elW7d+9W8eLFNWjQoFSNWbFiRd26dUufffaZatasqZw5c6py5cqPbe/m5qbWrVtrxYoVat26tV588cUUX+vixYvas2dPkuMlSpSQq6urbt++rW7dusnb21uDBw+WyWTSnDlz1KlTJ/n4+KhHjx4W/ebOnau4uDjVqlVLGzZs0Lx58xQQEGCxB+g/5c+fX4MGDVJwcLCcnJzUokULRUREaOTIkapfv75atmyZ4nt5FoGBgWrevLnatWun3r1768aNGwoICFCRIkWy/VYd2fvu8VS+vr5q1arVc4/j4eGhgICA5w+ETCMkJEQhISE6f/680VEAAACAJ0rY2++fRYvixYvr1KlTCg8PlxS/9HT+/PlGRUR66dlTql49fg/OlHBwkGrUkN5+O11jDRgwQLt27ZKzs7MGDx6sxo0by9fXVxEREfr8888T9610dnbWmjVrZGNjo06dOmn48OHy9/dP8R61T9OiRQvt2LFDDx48kJ+fn3x8fDRkyBBduXJFdZ9hZqufn586d+6sDz/8ULVr107R/psdO3aUpBQ/mCjBggULVLdu3SSvb775RpLUu3dv/f333/rqq68Sl5B37NhRPXv2VL9+/XTy5EmL8X788Udt2rRJr7/+uhYtWqSRI0dq1KhRT8wwfvx4hYSE6KefflKrVq00ceJEvfXWW1q7dm26FxybNWumb775RhEREWrXrp0mTZqkjz/+WEWKFFG+fPnS9dqZncnMfP0sLTQ09Inf5Bo1aqRt27Y98/h37tyR2Wx+7G8yUsrD4/+xd99RUV3v18D30JsNsSAIRpAiiNhFbGAhNqyUBAtqopGIGlRUYhQLqFHsmq9KswPW2INgB4wNOwYlNkZEiQ0QYRjm/cOf84bYEbgMsz9rzVLunHvvHpYIPPOcc2wxaNAgFjwrmRkzZiA1NfWttn0iIiIioorizz//xNKlS5Gamork5GSMHTsW7u7umDp1KlRUVLBu3TpYWloiOTkZrVu3Rr169RAUFPTWDssknJSUFFhbW5f8Ajk5QM+ewPnzH+7w1NF5Xeg8cAAohc5J+jReXl5ISEjA33//LUhHYmBgIGbNmgWJRFLq64WWt/T0dJibm+Pnn3/+aKH2i7+uKjB2diq4du3aISMj463HmjVrIBKJ4OPjU6LrFhYWQiaToVq1al9c6KTKa+rUqUhKSsKxY8eEjkJERERE9Ja8vDw4OzujXr16WLp0Kfbs2YM//vgDkyZNQteuXTFv3jxYWloCAJo1awaJRILJkyfDz88PZmZmOHDggMCvgEqFnh4QHw8sXgw0bAjo6r7u4BSJXv+pq/v6+OLFr8ex0FkuTp8+jf/973+Ijo6Gn5+f0k+9/lx5eXkYM2YMduzYgePHjyMiIgLdunWDjo4OvvvuO6HjCYr/khSchoYG6tatW+zx9OlTTJ48GQEBAfJ2cLFYDE9PT9SoUQM1atRAr169cPPmTfl1AgMDYWtri8jISJiZmUFTUxO5ublvTWPv3C9L3UQAACAASURBVLkzfHx8EBAQAAMDA9SuXRuTJk1CUVGRfMyjR4/Qt29faGtrw9TUFOHh4eX3CaFypaOjg5CQEPj6+qKwsFDoOERERERExWzduhW2trYICAhAhw4d0Lt3b6xatQoPHjzA6NGj4ejoCOD1BkVvHmPHjkV6ejr69OmD3r1746effsLLz1nvkSomdXVg9Gjg1i0gNhZYsACYPfv1n4cPvz4+enTJd2+nz+bg4IDJkydj2LBhJW7UUmaqqqp4+PAhxo4di27dusHPzw+NGjXCiRMnPriGsTJgsbOSefbsGfr164dOnTphzpw5AICXL1/CyckJWlpaOH78OJKSkmBoaIiuXbsW+6Z9+/ZtbNmyBdu2bcOlS5egpaX1znts3rwZampqSExMxMqVK7F06VJER0fLn/f29satW7cQFxeH3bt3Y8OGDbhz506Zvm4SzsCBA1G7dm2sXr1a6ChERERERMVIJBJkZGTgxYsX8mNGRkaoXr06zp8/Lz8mEokgEonkuxrHx8fj1q1bsLS0hJOTEzcwqkxEIqBdO2D8eGD69Nd/OjiU2WZE9H4ymQzZ2dkICwsTdPp4YGAgZDKZwk1h19DQwK5du5CRkYGCggI8ffoUe/bsee/u8cqExc5KpKioCN9++y1UVVWxadMm+QK8UVFRkMlkiIiIgJ2dHaysrLBmzRrk5ORg37598vMLCgqwceNGNG/eHLa2tu/9Qm/cuDFmz54NCwsLuLu7w8nJCfHx8QCA1NRUHDx4EGvXroWjoyOaNWuG9evXIy8vr+w/ASQIkUiE5cuXY86cOXj06JHQcYiIiIiI5Dp16oS6deti4cKFEIvFuHr1KrZu3Yr09HQ0atQIwOuCy5uZalKpFCdPnsTQoUPx/Plz7NixA66urkK+BCIi+kyKVbamDwoICEBSUhLOnDmDqlWryo+fP38et2/fRpUqVYqNf/nyJdLS0uQfGxsbo06dOh+9j52dXbGP69WrJy9ypaSkQEVFBa1bt5Y/b2pqinr16pXoNZFisLGxweDBgxEQEIDQ0FCh4xARERERAQCsrKwQERGBMWPGoGXLlqhZsyZevXoFf39/WFpaoqioCCoqKvJGkSVLlmDFihXo2LEjlixZAhMTE8hkMvnzRERU8bHYWUlER0dj0aJF2L9/v/wdyjeKiopgb2//zh2z9fX15X/X1dX9pHup/2cNE5FIJH8n9M20D1I+gYGBsLKywtmzZ9GqVSuh4xARERERAXj9xvyJEydw8eJF3Lt3Dy1atEDt2rUBvN6YVUNDA0+ePEFERARmz54Nb29vLFy4ENra2gDAQicRkYJhsbMSuHjxIkaMGIH58+fDxcXlreebN2+OrVu3wsDAoMx3Vre2tkZRURHOnj2Ldu3aAQDu3buHBw8elOl9SXjVqlVDcHAwxo4di6SkJO6kR0REREQVir29Pezt7QFA3qyhoaEBAJgwYQL279+P6dOnY9y4cdDW1pZ3fRIRkWLh/9wKLisrC/369UPnzp0xePBgPHz48K2Hl5cX6tSpg759++L48eO4ffs2Tpw4gYkTJxbbkb00WFpa4uuvv8bo0aORlJSEixcvwtvbW/6uKFVuw4YNg0gkwoULF4SOQkRERET0Xm+KmHfv3kXHjh2xa9cuzJ49G1OnTpVvRvTfQidnsRERKQZ2diq4/fv34+7du7h79y4MDQ3fOUYmk+HEiROYOnUq3Nzc8Pz5c9SrVw9OTk6oUaNGqWeKjIzE999/D2dnZxgYGGDmzJncuEZJqKio4OTJkwq3ix0RERERKSdTU1OMGTMGJiYmcHR0BIAPdnT6+vpi7NixsLS0LM+YVIpkMhnS09MhFouRn58PTU1NGBkZwdjYmEsWEFUSIhnfniIiIiIiIiL6oMLCQixcuBCLFy+Gq6srZsyYAVNTU6FjKYWUlBRYW1t/0TWkUimSk5ORkJCA3NxcFBUVQSqVQlVVFSoqKtDV1YWjoyOaNWsGVVXVUkpOVHGVxtdVRcVp7EQkmPz8fKEjEBERERF9EjU1NUybNg03b96EoaEhmjdvjvHjxyMzM1PoaPQRBQUF2LBhA2JjY/Hs2TNIJBJIpVIAr4ugEokEz549Q2xsLDZs2ICCgoIyzxQZGQmRSPTOR1ntteHt7Y0GDRqUybVLSiQSITAwUOgYVMmw2ElE5a6oqAjx8fFYvnw5Hj58KHQcIiIiIqJPVr16dcydOxfXr1+HSCRC48aN8fPPP+Pp06dCR6N3kEql2Lx5M8RiMSQSyQfHSiQSiMVibN68WV4MLWvbtm1DUlJSsUdcXFy53JuosmKxk4jKnYqKCl6+fIljx45hwoQJQschIiIiIvpsderUwdKlS5GcnIzMzExYWFhg3rx5yM3NFToa/UtycjIyMjI+uXgplUqRkZGB5OTkMk72mr29Pdq2bVvs0bJly3K595fgLD2qyFjsJKJy9WZKSJ8+fTBw4EDExMTg8OHDAqciIiIiIioZExMThIaG4tSpU7h06RLMzc2xfPlyFoMqAJlMhoSEhI92dP6XRCJBQkIChNzipKioCJ07d0aDBg3w/Plz+fErV65AW1sbkydPlh9r0KABBg8ejHXr1sHc3BxaWlpo3rw5jh49+tH7ZGRkYOjQoTAwMICmpibs7OywadOmYmPeTLk/ceIE3NzcUL16dbRp00b+/PHjx9GlSxdUqVIFurq6cHFxwdWrV4tdQyqVYvr06TA0NISOjg46d+6Ma9eulfTTQ/RBLHYSUbkoLCwEAGhoaKCwsBATJ06En58fHB0dP/uHDyIiIiKiisbS0hJRUVE4ePAgDh8+DAsLC4SHh8t/Dqbyl56eXuJO29zcXKSnp5dyordJpVIUFhYWexQVFUFFRQWbNm1CdnY2Ro8eDQDIy8uDp6cnbGxsEBQUVOw6x48fx+LFixEUFISoqChoamqiR48e+Ouvv95779zcXHTq1AkHDx5EcHAwdu/ejSZNmmDIkCFYu3btW+O9vLzw1VdfYfv27Zg/fz4AYP/+/ejSpQv09PSwadMmbNmyBdnZ2ejQoQPu378vPzcwMBDBwcHw8vLC7t270b17d7i6upbGp5DoLWpCB6CyER0djXXr1nGtDxJUWloaioqK0KhRI6ipvf7vZv369QgICICWlhZ++eUXuLq6wszMTOCkRERERESlw97eHnv37kViYiICAgKwYMECzJkzB4MGDYKKCvuNSsuhQ4c+uv7/ixcvStxYIZFIsGvXLlStWvW9Y+rWrYuvv/66RNd/w8rK6q1jvXr1wr59+2BsbIzQ0FAMGDAALi4uSEpKwt27d3HhwgVoaGgUOyczMxMJCQkwMTEBAHTp0gWmpqaYO3cuNm7c+M57R0RE4ObNmzh69Cg6d+4MAOjRowcyMzMxffp0jBw5stjO9IMGDcKvv/5a7Brjx49Hp06d8Pvvv8uPOTk5oWHDhggJCcHSpUvx9OlTLFmyBKNGjcKiRYsAAN27d4eqqiqmTp36+Z80oo9gsbOSCgsLw8iRI4WOQUpu8+bN2Lp1K1JSUpCcnAxfX19cvXoV3377LYYNG4amTZtCS0tL6JhERERERKWuXbt2OHr0KOLi4hAQEIDg4GAEBQWhZ8+eEIlEQsdTCkVFRYKe/yl27doFY2PjYsf+vRt7//79MXr0aIwZMwb5+fkIDw+HhYXFW9dp27atvNAJAFWqVEGvXr2QlJT03nufOHECRkZG8kLnG4MHD8bw4cNx/fp1NGnSpFiWf7t58ybS0tIQEBBQrINZR0cHDg4OOHHiBIDXU+9zc3Ph7u5e7HxPT08WO6lMsNhZCb18+RIFBQXo16+f0FFIyU2bNg0hISFo0aIFbt68iXbt2mHDhg1o37499PX1i4199uwZLl26hE6dOgmUloiIiIiodIlEInTr1g1du3bF7t27MWXKFAQHByM4OJg/936hT+moPH36NOLi4kq0s7qqqqp8w6CyZGtrC3Nz8w+OGTZsGNasWYPatWvj22+/feeYOnXqvPOYWCx+73WfPHkCQ0PDt47XrVtX/vy//Xfso0ePAAAjR458Z7PVm+JrRkbGOzO+KzNRaWAPfSWkra2No0ePQltbW+gopOTU1dWxevVqJCcnY8qUKVizZg1cXV3fKnQeOnQIP/30EwYMGID4+HiB0hIRERERlQ2RSIT+/fvj0qVLGDNmDIYPHw4XFxecO3dO6GiVmpGRUYmXDlBRUYGRkVEpJ/p8L1++xIgRI2Bra4vnz5+/txMyMzPzncc+9Br09fXfuRTAm2M1a9Ysdvy/Hclvnp83bx7Onj371mPv3r0A/n+R9L8Z35WZqDSw2FkJiUQiTougCsPLywuNGzdGamoqTE1NAUC+q+HDhw8xe/Zs/Pzzz/jnn39ga2uLoUOHChmXiIiIiKjMqKqqYvDgwbhx4wb69++Pvn37YuDAgbh+/brQ0SolY2Nj6OrqluhcPT29t6aXC2H8+PEQi8X4/fff8euvv2LZsmU4dOjQW+NOnz5dbEOg7Oxs7N+/Hw4ODu+9dqdOnZCeno6EhIRix7ds2YLatWvD2tr6g9ksLS3RoEEDXLt2DS1btnzrYWdnBwCws7ODrq4uYmJiip0fFRX10ddPVBKcxk5EZS48PByjR4+GWCyGkZGRvBhfVFQEqVSK1NRUREZGokmTJrC0tERgYCACAwOFDU1EREREVEY0NDTwww8/YNiwYVi1ahWcnJzg4uKCwMBANGzYUOh4lYZIJIKjoyNiY2M/a6MidXV1tGvXrlyaiC5evIisrKy3jrds2RK///47QkNDsXHjRjRs2BDjxo1DbGwsvL29cfnyZdSuXVs+vk6dOujevTsCAwOhqamJBQsWIDc3F7/88st77+3t7Y1ly5ZhwIABCAoKgrGxMTZv3ozDhw9jzZo1xTYneheRSIRVq1ahb9++KCgogLu7OwwMDJCZmYnExESYmJjAz88P1atXx08//YSgoCBUqVIF3bt3x9mzZxEWFlbyTxzRB7Czk4jKXOvWrbF9+3ZUrVpVvkg1ANSrVw9jx45Fq1atEB0dDQBYtGgRgoKC8PTpU6HiEhERERGVC21tbUyaNAk3b96EmZkZWrVqBR8fHzx48EDoaJVGs2bNYGho+NHC3RuqqqowNDREs2bNyjjZa25ubnBwcHjrkZGRge+//x5eXl4YPHiwfHxERAREIhG8vb3lM+aA112aEydOREBAADw8PPDq1SscPHjwnZsZvaGrq4vjx4+je/fumDp1Kvr27YtLly5h48aNGDVq1Cfl79mzJ06cOIHc3Fx89913cHFxgb+/Px4+fFisqzQwMBABAQHYuHEjXF1dERsbK5/mTlTaRLJ/f3UQEZURmUyG7777DlKpFKGhoVBVVZW/UxoVFYWQkBAcOHAAtWrVgp+fH3r27ImuXbsKnJqIiIiIqPxkZWVhwYIFCA8Px8iRIzFlypS31k1URikpKR+dUv0hBQUF2Lx5MzIyMj7Y4amurg5DQ0N4eXlBQ0OjxPcrbw0aNED79u2xadMmoaOQAvnSr6uKjJ2dCkomk4F1alIkIpEILVu2xJkzZ1BYWAiRSCTfFfHRo0eQyWTQ09MDAISEhLDQSURERERKx8DAAAsXLsTly5eRnZ0NS0tLzJo1Cy9evBA6mkLT0NDA0KFD0b17d1SvXh3q6uryTk9VVVWoq6ujRo0a6N69O4YOHapQhU4iehs7OysJmUwGkUgk/5OoojI3N8eQIUPg6+sLfX19iMVi9OnTB/r6+jh06BDU1LiUMBERERERAKSlpSEwMBCxsbHw9/eHj48PtLW1hY5V7kqzA00mkyE9PR1isRgFBQXQ0NCAkZERjI2NFfZ3aXZ2UklU5s5OFjsV0Lx58/Ds2TMsWLBA6ChEny0hIQFjxoyBrq4u6tevj9OnT8PIyAiRkZGwtLSUj5NKpUhMTESdOnU+uM4MEREREVFld/XqVcyYMQNnzpzBL7/8ghEjRkBdXV3oWOWmMhdliIRSmb+uOI1dAa1cuRLm5ubyj/fv34/ffvsNS5YswdGjR1FYWChgOqIPc3R0RGhoKBwcHPD48WOMGDECixcvhoWFRbGlGW7fvo3Nmzdj6tSpKCgoEDAxEREREZGwbG1tsXPnTuzatQs7duyAtbU1Nm3aJF8WioiI/j92diqYpKQkdOnSBU+ePIGamhomTZqEDRs2QFtbGwYGBlBTU8PMmTPh6uoqdFSiT1JUVAQVlXe/73Ls2DH4+fmhZcuWWLt2bTknIyIiIiKqmI4ePYqff/4ZL168wNy5c9G3b1+FnYL9KSpzBxqRUCrz1xU7OxXMwoUL4enpCS0tLcTExODo0aNYtWoVxGIxNm/ejEaNGsHLywsPHz4UOirRBxUVFQGAvND53/ddpFIpHj58iNu3b2Pv3r1clJ2IiIiI6P84OTkhISEBCxYsQGBgINq2bYu4uDhuYktEBBY7FU5iYiIuXbqEPXv2YMWKFRg6dCi++eYbAK+nNsyfPx9fffUVLly4IHBSog97U+TMzMwEgGLvRJ8/fx59+vSBl5cXPDw8cO7cOVStWlWQnEREREREFZFIJEKvXr1w4cIF+Pn5wcfHB126dEFSUpLQ0YiIBMVipwLJycmBn58fLC0t4e/vj1u3bsHe3l7+vFQqRd26daGiosJ1O0kh3LlzBz4+Prh58yYAQCwWY+LEiXB0dMTz589x6tQp/O9//4ORkZHASYmIiIiIKiYVFRV4eHjg+vXr8mYBV1dXXL58WehoRESC4JqdCuT69eto3LgxxGIxzpw5gzt37qBbt26wtbWVjzlx4gR69uyJnJwcAZMSfbrWrVvDwMAAgwYNQmBgICQSCebOnYuRI0cKHY2IiIiISOG8evUKa9euRXBwMJycnDBr1ixYWFgIHeuLlObagjKZDEnpSTgjPoPs/GxU0ayC1kat4WDsUKnXPSX6r8q8ZieLnQri/v37aNWqFVasWAE3NzcAgEQiAQCoq6sDAC5evIjAwEBUr14dkZGRQkUl+ixpaWnyndj9/Pwwffp0VK9eXehYREREREQKLScnB8uXL8eSJUvQr18/zJgxA/Xr1xc6VomURlFGIpUgLDkMvyb8ike5jyApkkAilUBdVR3qKuqorVsb/o7+GNlsJNRV1UspOVHFVZmLnZzGriAWLlyIR48ewdvbG3PmzEF2djbU1dWL7WJ948YNiEQiTJs2TcCkRJ/HzMwM06ZNg4mJCYKDg1noJCIiIiIqBXp6eggICEBqaipq1aoFe3t7/PTTT3j06JHQ0cpdTkEOnDc4Y2LsRNx+dhu5klwUSAsggwwF0gLkSnJx+9ltTIydiC4buiCnoGxnSkZGRkIkEr3zERcXBwCIi4uDSCTCqVOnyizH4MGDYW5u/tFxDx8+hK+vLywsLKCtrQ0DAwO0aNEC48ePlzdhfapbt25BJBJh06ZNn533yJEjCAwMLNVrUuXEYqeCiIiIQHx8PAIDA7Fu3Tps2LABAKCqqiof4+npiR07dsDS0lKomEQlMnfuXKSnp8v/XRMRERERUemoUaMGgoODce3aNUilUlhbW+OXX37Bs2fPhI5WLiRSCXps7oGz4rN4KXn5wbEvJS9xRnwGPTf3hET6eUW8kti2bRuSkpKKPVq3bg3g9XJfSUlJaNq0aZnn+JBnz56hdevWOHjwIPz8/HDgwAGsWbMGPXr0wJ49e5Cfn19uWY4cOYJZs2a9dbx+/fpISkrC119/XW5ZqGJTEzoAfdzOnTuhq6sLJycnNG3aFJmZmRg3bhwuX76MOXPmoHbt2igsLIRIJCpW/CRSJMeOHUN+fj5kMhnXyiEiIiIiKmV169bF8uXLMXHiRMyePRsWFhbw8/ODr68vdHV1hY5XZsKSw3Ah4wLypZ9WlMuX5uN8xnmEJ4djdMvRZZrN3t7+vZ2VVatWRdu2bcv0/p8iJiYG9+/fx9WrV2FjYyM/PnDgQMyZM6dC/O6mqalZIT5XVHGws1MBLF68GN7e3gAAfX19LFq0CKtXr8Yff/yBhQsXAgDU1NRY6CSF1r59e3Tp0qVCfLMkIiIiIqqsTE1NERYWhhMnTiA5ORmNGjXCypUry7VDr7zIZDL8mvDrRzs6/+ul5CV+TfgVQm5x8q5p7O3bt0fnzp0RGxuLZs2aQUdHB7a2ttizZ0+xc1NTUzF48GA0aNAA2traMDMzw48//liibt4nT54AeF0s/6///u5WUFCAgIAAmJqaQkNDAw0aNMCMGTM+OtW9ffv26Nq161vHjY2N8d133wEApk+fjqCgIPl9RSIR1NRe9++9bxr7+vXrYWdnB01NTdSqVQvDhg1DZmbmW/fw9vbG5s2bYWVlBV1dXbRq1QqJiYkfzEwVG4udFdyLFy+QlJSEUaNGAQCkUikAYOTIkfD398eqVavQp08f3LlzR8CUREREREREpEisrKwQHR2N/fv34+DBg7C0tERkZCQKCws/+RovXrzA7t27sWfPHvlj586dSEtLK8Pkny4pPQmPcku2RmlmbiaS0pNKOVFxUqkUhYWF8seb3/c/JDU1FX5+fpg0aRJ27tyJOnXqYODAgbh9+7Z8jFgshqmpKZYtW4Y//vgDP//8M/744w/07t37szO+mVbv7u6O2NhY5Obmvnfs4MGDsXDhQgwfPhz79u3D0KFDERwcjJEjR372ff/rhx9+kDeBvZnyn5CQ8N7xq1evhre3N5o0aYLdu3cjKCgI+/fvR+fOnfHyZfHi99GjR7F8+XIEBQUhKioKBQUF6N27N168ePHFuUkYnMZewVWtWhWPHz+Gvr4+gP+/Rqeamhp8fHxQq1Yt+Pv7Y9y4cYiKioKOjo6QcYlKzZt3UdnpSURERERUdpo1a4b9+/cjISEBAQEBWLBgAWbPno2BAwcW2xD33+7cuYNz586hSpUq6NWrF9TVi+9efuHCBWzfvh1GRkZwcHAok9wTDk3AxYcXPzgm/UX6Z3d1vvFS8hJDdw2FcVXj946xr2uPpV8vLdH1gdcF539zdHT86IZEWVlZOHXqFBo2bAgAaNq0KerVq4dt27bB398fAODk5AQnJyf5Oe3atUPDhg3h5OSEK1euoEmTJp+c0dnZGTNmzEBwcDCOHDkCVVVVNGvWDH369MGECRNQtWpVAMClS5ewbds2zJkzB9OnTwcAdO/eHSoqKpg1axamTp2Kxo0bf/J9/8vY2BhGRkYA8NEp64WFhZg5cya6dOmCzZs3y49bWFjAyckJkZGR8PHxkR/PyclBbGwsqlWrBgCoVasWHBwccOjQIbi7u5c4MwmHnZ0K4E2h813c3NywePFiZGVlsdBJlUpRURFatWqFI0eOCB2FiIiIiKjSc3R0xLFjx7Bs2TIsWLAALVu2xMGDB9+ayn3hwgWkpaVh0KBBcHFxeavQCQDNmzfHoEGDYGBggF27dpXXS3iLtEgKGUo2FV0GGaRFH++0/BK7du3C2bNn5Y+wsLCPnmNlZSUvdAKAoaEhDAwMcO/ePfmx/Px8zJ07F1ZWVtDW1oa6urq8+PnXX399ds5Zs2bh7t27WLduHQYPHozHjx9j5syZsLW1xePHjwEAx48fB/C6u/Pf3nz85vnycP36dWRlZb2VpXPnzjAyMnori6Ojo7zQCUBeDP7355QUCzs7K4H+/fujc+fOQscgKlWqqqoICAjAuHHjkJyc/M4fooiIiIiIqPSIRCJ0794d3bp1w65duzBx4kQEBwcjODgYHTp0wLVr15Cbm4suXbp80vUaNWoEXV1d7N27F3369CnVrJ/SUbn09FJMiZuCAmnBZ19fU1UTE9pOwPi240sS75PY2tq+d4Oi93lXM5SmpiZevXol/9jf3x+//fYbAgMD0bZtW1SpUgV3796Fm5tbsXGfo169evjuu+/ka2guW7YMEyZMQEhICObPny9f29PQ0LDYeW/W+nzzfHl4X5Y3ef6b5b+fU01NTQAo8eeKhMfOzkqiRo0aQkcgKnX9+/eHoaEhVq9eLXQUIiIiIiKlIRKJMGDAAFy5cgXff/89hg4diq+//hqnT59Ghw4dPuta9erVg7GxMVJSUsoo7fu1NmoNdZWSNU2oqaihlVGrUk5UPqKiojBixAgEBATA2dkZrVq1Kta5WBrGjx+PqlWr4vr16wD+f8Hw4cOHxca9+bhmzZrvvZaWlhYKCooXpGUyGZ4+fVqibO/L8ubYh7JQ5cBip4IRcjc4ovImEomwfPlyzJ07F48elWxhcSIiIiIiKhlVVVUMHToUf/31F5o3b46ePXuW6DrNmjWTF8XKk4OxA2rr1i7RuXX06sDBuGzWGy1reXl5b82Mi4iIKNG1MjIy3rlxUnp6OrKzs+Xdk506dQLwutD6b2/WzOzYseN772Fqaoq//vqr2OZYR48efWsjoTcdl3l5eR/M3LhxYxgYGLyV5fjx4xCLxfKsVHmx2KlAbt68iZCQEBY8SalYW1tj6NChmDZtmtBRiIiIiIiUkoaGBlq0aPHOacGfSldXFzk5OaWY6uNEIhH8Hf2ho/55+1voqOvAv52/wm6W6uLigvDwcPz222+IjY3F999/jzNnzpToWuvXr0fDhg0xa9YsHDx4EMeOHcPatWvh7OwMLS0t+UY/TZs2hZubG3755RfMmTMHhw8fRmBgIObOnYshQ4Z8cHMiT09PPHr0CCNGjEBcXBzWrFmDH3/8EVWqVCk27s01Fi1ahD///BPnz59/5/XU1NQwa9YsHDp0CMOGDcOhQ4cQGhoKNzc3WFlZYdiwYSX6XJDiYLFTgYSHhyMjI0Nh/8MlKqmZM2fi4MGDJf4GTUREREREJZebmyvfdbuknJ2dceLEiVJK9OlGNhuJ5obNoamq+UnjNVU10cKwBUY0G1HGycrO6tWr0atXL0ybNg0eHh549epVsV3JP0efPn3Qv39/7Nq1C15eXujWrRsCAwNhb2+PxMRENG3aVD522Ua2lgAAIABJREFU06ZNmDRpEkJDQ9GzZ09ERkZi2rRpH914qVu3bli1ahUSExPRp08fbNy4EVu2bHnr31zfvn0xevRoLF++HA4ODmjTps17r+nj44PIyEgkJyejb9++mDp1Knr06IFjx45xc2clIJKxTVAhFBYWwsTEBHFxcR98R4Soslq/fj1WrVqF06dPQ0WF79MQEREREZWXu3fv4vnz57Czs/ui65R0o6KUlBRYW1uX+L45BTnoubknzmecx0vJy/eO01HXQQvDFjjgdQB6Gnolvh+RIvjSr6uKjBUDBXHo0CGYmpqy0ElKa8iQIVBVVUVkZKTQUYiIiIiIlEphYSFUVVW/+DpC9Vrpaeghfmg8FndfjIbVG0JXXReaqpoQQQRNVU3oquuiYY2GWNx9MeKHxrPQSaTg1IQOQJ8mLCwMI0eOFDoGkWBUVFSwcuVK9O7dGwMGDED16tWFjkREREREpBT09fVx5cqVL7qG0JNK1VXVMbrlaIxqMQpJ6Uk4Kz6L7IJsVNGogtZGrdHWuC2XjCOqJDiNXQFkZmbC0tIS9+7d++J1UogU3ahRo6Cjo4OlS5cKHYWIiIiISGns2LEDAwcOLPH5iYmJaNCgAerVq/fZ51bm6bZEQqnMX1ecxq4ANm7ciP79+7PQSQQgKCgIW7ZswdWrV4WOQkRERESkNLS0tJCXl1fi8x88eFCiQicR0edisbOCk8lknMJO9C+1atXCjBkzMG7cOMGnwhARERERKYsuXbogLi6uROeKxWIYGhqWciIiondjsbOCS0pKQlFRERwdHYWOQlRh/PDDD8jKysL27duFjkJEREREpBS0tLSgp6eH1NTUzzrv1atXiIuLQ7t27b7o/mx0ICo9lf3ricXOCi4sLAwjRozgQslE/6KmpoYVK1Zg4sSJyM3NFToOEREREZFScHJyQlpaGlJSUj5pfHZ2NrZu3Ypvv/32i36nVVdX/6Ip9ERUXF5eHtTV1YWOUWa4QVEFlpOTg/r16yMlJQV169YVOg5RhfPNN9/AzMwMc+fOFToKEREREZHSSExMhFgsRps2bWBiYvLW87m5uVi9ejWMjIzg6ekJFZUv67N68eIFMjMzYWRkBG1tbTYDEZWQTCZDXl4exGIx6tSpU2n3hlETOgC9X0xMDDp27MhCJ9F7LFy4EE2bNsXw4cNhZmYmdBwiIiIiIqXQrl07yGQynD17FmfOnIGGhob8ucLCQmhra+PGjRt4+vTpFxc6AcgLMg8ePIBEIvni6xEpM3V19Upd6ATY2VmhOTo6YsqUKXB1dRU6ClGFNW/ePCQlJWHPnj1CRyEiIiIiov9z7949NGvWDCkpKahdu7bQcYhIibDYWUGlpKTA2dkZ9+7dq9TrKBB9qfz8fNja2mL58uXo0aOH0HGIiIiIiOj/+Pr6QkNDAyEhIUJHISIlwmJnBeXv7w+RSIQFCxYIHYWowtu/fz9++uknXLlyBZqamkLHISIiIiIiABkZGbCxscHVq1dRr149oeMQkZJgsbMCkkgkqF+/Po4fPw5LS0uh4xAphN69e6NDhw6YMmWK0FGIiIiIiOj/TJo0Ca9evcLKlSuFjkJESoLFzgpo9+7dCAkJwcmTJ4WOQqQwbt26hbZt2+LSpUswMjISOg4REREREQF4/PgxrKyscOHCBZiamgodh4iUwJdvi0alLiwsDCNGjBA6BpFCMTc3x6hRo+Dv7y90FCIiIiIi+j+1atXCDz/8gLlz5wodhYiUBDs7K5gHDx7AxsYG9+/fh56entBxiBRKTk4OrK2tsWXLFnTo0EHoOEREREREBODJkyewsLDA6dOnYW5uLnQcIqrk2NlZwWzYsAGDBg1ioZOoBPT09LBw4UL4+vpCKpUKHYeIiIiIiADo6+tj3LhxmD17ttBRiEgJsLOzApHJZLC0tMSGDRvQtm1boeMQKSSZTAYnJye4u7vDx8dH6DhEREREREREVI7Y2VmBnDx5EmpqamjTpo3QUYgUlkgkwvLlyxEYGIisrCyh4xARERERERFROWKxswIJDw/HyJEjIRKJhI5CpNDs7Ozg4eGB6dOnCx2FiIiIiIiIiMoRp7FXEC9evICJiQlSU1NRu3ZtoeMQKbynT5/C2toaBw4cQPPmzYWOQ0RERERERETlgJ2dFURUVBS6dOnCQidRKalRowbmzJkDX19f8D0dIiIiIiIiIuXAYmcFER4ejhEjRggdg6hSGTFiBPLz87Fp0yahoxARERERKb3AwEDY2toKHYOIKjlOY68Arl27hu7du+Pu3btQU1MTOg5RpXL69GkMHDgQKSkpqFq1qtBxiIiIiIgUire3N7KysrBv374vvlZOTg7y8/NRs2bNUkhGRPRu7OysAMLCwuDt7c1CJ1EZaNu2Lbp164Y5c+YIHYWIiIiISKnp6emx0ElEZY7FToEVFBRg06ZNGD58uNBRiCqt+fPnIyIiAjdu3BA6ChERERGRwjp79iy6d+8OAwMDVK1aFe3bt0dSUlKxMWvWrIGFhQW0tLRQq1YtuLi4oLCwEACnsRNR+WCxU2B79+5F48aNYW5uLnQUokqrbt26CAgIwPjx47lZERERERFRCWVnZ2PIkCE4efIkzpw5A3t7e/Ts2RNZWVkAgHPnzuHHH3/EzJkz8ddffyEuLg5ff/21wKmJSNmw2CmwsLAwjBw5UugYRJWer68v7t+/j99//13oKERERERECsnZ2RlDhgyBtbU1rKyssGLFCmhpaeHQoUMAgHv37kFXVxeurq4wNTVF06ZN8dNPP3HJNiIqVyx2Cig9PV2+eQoRlS11dXUsX74cfn5+yMvLEzoOEREREZHCefToEUaPHg0LCwtUq1YNVapUwaNHj3Dv3j0AQLdu3WBqaoqvvvoKXl5eWL9+PbKzswVOTUTKhsVOAUVGRsLd3R06OjpCRyFSCl27dkXz5s2xcOFCoaMQERERESmcYcOG4ezZs1iyZAkSExNx8eJFGBsbo6CgAABQpUoVXLhwATExMTAxMcG8efNgZWWFBw8eCJyciJQJi53lRCKR4NGjR3jw4AHy8vJQVFSEiIgITmEnKmchISFYvnw57t69K3QUIiIiIiKFcurUKfj6+qJXr16wsbFBlSpVkJGRUWyMmpoanJ2dMW/ePFy+fBm5ubnYt2/fJ12/qKioLGITkZLhwhllSCaT4fTp0xCLxdDW1kbNmjWhpqaGq1ev4vbt26hbty7s7OyEjkmkVExNTTFu3DhMnDgR27dvFzoOEREREZHCsLCwwKZNm9CmTRvk5ubC398fGhoa8uf37duHtLQ0dOzYEfr6+jh69Ciys7NhbW39Sdfftm0bPDw8yio+ESkJFjvLyM2bN3Hu3Dm0b98eDg4O7xzz7bff4uDBg9DX10fHjh3LOSGR8po8eTJsbGwQHx+PLl26CB2HiIiIiEghhIeHY9SoUWjRogXq1auHwMBAPH78WP589erVsXv3bsyePRsvX76EmZkZQkND0aFDh0+6/syZMzFw4EBuaEREX0Qkk8lkQoeobK5evYrMzMxPLqLcuHED9+7dQ/fu3cs4GRG9sXv3bgQEBODSpUtQV1cXOg4RERERkdLr2LEjvvvuOwwdOlToKESkwLhmZykTi8W4f//+Z3WLWVlZwcjICElJSWWYjIj+rW/fvqhfvz5WrlwpdBQiIiIiIgIwd+5cBAYGQiKRCB2FiBQYi52l7PTp0+jRo8dnn2djY4MHDx6AjbZE5UMkEmHZsmUIDg5GZmam0HGIiIiIiJRex44dYWZmhoiICKGjEJECY7GzFOXm5kJbW7vE57ds2RJnz54txURE9CFWVlbw9vbG1KlThY5CREREREQA5syZg7lz5+LVq1dCRyEiBcViZyk6cuTIF212Ympqirt375ZiIiL6mF9++QWxsbE4ffq00FGIiIiIiJRe27ZtYWdnh3Xr1gkdhYgUFIudpUgmk0FTU/OLrqGlpVVKaYjoU1StWhXz58+Hr68vioqKhI5DRERERKT0Zs+ejXnz5uHly5dCRyEiBcRiZwXDNTuJyt/gwYOhoaGB8PBwoaMQERERESm95s2bw8HBAatXrxY6ChEpIBY7S5FIJKoQ1yCizyMSibBixQpMnz4dT58+FToOEREREZHSmzVrFhYuXIjs7GyhoxCRgmGxsxQVFhZ+8TW4CDORMJo3b45+/fph5syZQkchIiIiIlJ6tra26NKlC5YvXy50FCJSMCIZ502XmrS0NLx48QLNmjUr0fmvXr1CmzZtYGNjA09PT7i4uHzxGqBE9On++ecfWFtbIz4+Hk2aNBE6DhERERGRUktNTYWjoyNu3ryJ6tWrCx2HiBQEOztLkZmZGdLS0kp8fnx8PPbs2YMOHTogJCQEhoaG8Pb2xqFDhyCRSEoxKRG9S82aNREYGAhfX1+un0tEREREJDALCwv07t0bixcvFjoKESkQFjtLmaGhYYkKnnl5ecjLy4OpqSnGjBmD48eP48qVK2jWrBlmzZqFevXqYdSoUYiPj4dUKi2D5EQEAKNHj8azZ88QExMjdBQiIiIiIqU3Y8YMrFq1CllZWUJHISIFwWnsZWDHjh1o37496tSp80njJRIJNm3ahCFDhkBNTe2dY+7evYuYmBhER0cjPT0dgwYNgoeHBxwdHaGiwpo1UWk6efIkvLy8kJKSAl1dXaHjEBEREREptTFjxqBq1apYsGCB0FGISAGw2FkGZDIZfv/9dzRq1Ag2NjYfHJuVlYW9e/fim2++gZaW1idd/9atW4iOjkZ0dDSePHkCd3d3eHh4oHXr1tzNnaiUeHl5oUGDBggKChI6ChERERGRUktPT0fTpk1x7do11K1bV+g4RFTBsdhZhi5fvozU1FRUr14dnTt3Lta1ef78edy5cwf6+vro1KlTibszr1+/Li985ufnw8PDAx4eHrC3t2fhk+gLiMViNG3aFKdPn4a5ubnQcYiIiIiIlNqECRMAAEuXLhU4CRFVdCx2loNnz57h5MmTyM7ORmhoKCZMmIAmTZrgq6++KrV7yGQyXL58GVFRUYiOjoaamho8PT3h4eHx0e5SInq3BQsW4NSpU9i7d6/QUYiIiIiIlNrDhw9hY2ODS5cuwdjYWOg4RFSBsdhZjp4/fw4TExM8f/68TO8jk8lw7tw5REVFISYmBtWqVZN3fFpYWJTpvYkqk/z8fDRp0gRLly5Fz549hY5DRERERKTUpkyZghcvXuC3334TOgoRVWAsdpaj/Px8VK1aFfn5+eV2z6KiIiQlJSE6Ohrbtm2DoaGhvPDZoEGDcstBpKgOHjyIcePG4erVq9DU1BQ6DhERERGR0srKyoKlpSXOnTtXqjMliahyYbGzHMlkMqiqqkIikUBVVbXc7y+VSnHixAlER0djx44dMDMzg4eHB9zc3DgNgOgDXF1d0a5dO0ydOlXoKERERERESm3GjBlIT09HeHi40FGIqIJisbOcaWtr459//oGOjo6gOSQSCY4cOYLo6Gjs3r0btra28PDwwKBBg1CnTh1BsxFVNGlpaWjTpg0uXboEIyMjoeMQERERESmtZ8+eoVGjRkhISOAybUT0Tix2ljN9fX3cunUL+vr6QkeRy8/PR2xsLKKjo7Fv3z60bNkSHh4eGDBgAGrWrCl0PKIKYfr06fj777+xZcsWoaMQERERESm1oKAgXL9+HZs3bxY6ChFVQCx2lrN69erh7NmzFbY7LC8vDwcOHEB0dDT++OMPtGvXDp6enujXrx+qVasmdDwiweTm5sLa2hqbNm1Cx44dhY5DRERERKS0srOzYW5ujvj4eNja2godh4gqGBWhAygbLS0tvHr1SugY76WtrY2BAwciJiYGYrEYw4YNw65du2BiYoK+ffti69atyMnJETomUbnT1dXFokWL4Ovri8LCQqHjEBEREREprSpVqmDy5MkIDAwUOgoRVUAsdpYzbW3tCl3s/Dc9PT14enpi9+7duHfvHgYOHIiNGzfCyMgIbm5u2L59O/Ly8oSOSVRu3NzcULNmTaxZs0boKERERERESs3HxweJiYlITk4WOgoRVTCcxk6f7Z9//sGuXbsQFRWFc+fOoVevXvDw8ICLiws0NTWFjkdUpq5evQpnZ2dcv34dBgYGQschIiIiIlJaK1asQGxsLPbu3St0FCKqQFjspC+SmZmJHTt2IDo6GleuXEHfvn3h4eGBLl26QF1dXeh4RGVi/PjxePXqFTs8iYiIiIgElJ+fj0aNGiEmJgZt27YVOg4RVRAsdlKpEYvF2LZtG6Kjo3Hr1i0MGDAAHh4e6NSpE1RVVYWOR1Rqnj17BisrK+zbtw8tW7YUOg4RERERkdJau3Yttm/fjtjYWKGjEFEFwWInlYk7d+4gJiYG0dHREIvFcHNzg4eHB9q1awcVFS4VS4ovLCwMoaGhSEhI4L9pIiIiIiKBSCQSWFlZISIiAh07dhQ6DhFVACx2Upm7efMmoqOjER0djWfPnsHNzQ2enp5o1aoVRCKR0PGISqSoqAht27bFjz/+iGHDhgkdh4iIiIhIaa1fvx5hYWE4fvw4f8ckIhY7FUHv3r1hYGCAyMhIoaN8sWvXrskLnxKJBO7u7vDw8IC9vT2/KZHC+fPPP9G/f3+kpKSgWrVqQschIiIiIlJKhYWFsLW1xYoVK9CtWzeh4xCRwDj38gskJydDVVUVjo6OQkdRGDY2Npg9ezZu3LiBnTt3AgAGDBgAKysrzJgxA9evXxc4IdGna9OmDb7++mvMnj1b6ChEREREREpLTU0NgYGB+OWXX8B+LiJisfMLrFu3Dj4+Prh69SpSUlI+OFYikZRTKsUgEolgb2+P+fPn4++//8bGjRuRm5uL7t27o0mTJpg7dy5u3rwpdEyij5o3bx42bNjw0f8DiIiIiIio7Li7uyM3Nxf79+8XOgoRCYzFzhLKy8vDli1b8P3332PQoEEICwuTP3fnzh2IRCJs3boVzs7O0NbWxpo1a/DPP//gm2++gbGxMbS1tWFjY4OIiIhi13358iW8vb2hp6eHOnXqIDg4uLxfWrkTiURo3bo1QkJCcO/ePfz222/IzMxEhw4d0KJFC/z666+4c+eO0DGJ3qlOnTr4+eefMW7cOL6LTEREREQkEBUVFcyePRszZsxAUVGR0HGISEAsdpbQ9u3bYWpqCjs7OwwZMgQbNmx4q3tz2rRp8PHxwfXr19GvXz+8evUKzZs3x759+3Dt2jWMHz8eo0ePRnx8vPycSZMm4fDhw9ixYwfi4+ORnJyMEydOlPfLE4yKigrat2+PFStWQCwWY+HChUhLS0OrVq3Qtm1bLF26FGKxWOiYRMX8+OOPePDgAXbt2iV0FCIiIiIipdWvXz+IRCL+XE6k5LhBUQl16tQJffr0waRJkyCTyfDVV18hJCQEAwcOxJ07d/DVV19h0aJFmDhx4gev4+npCT09PYSGhiInJwc1a9ZEeHg4vLy8AAA5OTkwNjZGv379KsUGRSUlkUhw5MgRREVF4ffff4etrS08PDwwaNAg1KlTR+h4RDhy5AhGjBiB69evQ0dHR+g4RERERERK6cCBA5g8eTIuX74MVVVVoeMQkQDY2VkCt27dQkJCAr799lsAr6dhe3l5ITQ0tNi4li1bFvtYKpUiKCgIdnZ2qFmzJvT09LBz507cu3cPAJCWloaCggI4ODjIz9HT00OTJk3K+BVVfOrq6nBxcUFERAQyMjIwadIkJCYmwtLSEl27dkVoaCiePHkidExSYs7OzmjVqhV+/fVXoaMQERERESmtHj16oFq1aoiOjhY6ChEJRE3oAIooNDQUUqkUJiYm8mNvGmTv378vP6arq1vsvEWLFiEkJATLli1DkyZNoKenh4CAADx69KjYNejDNDU14erqCldXV+Tl5eHAgQOIiorCxIkT4ejoCA8PD/Tr1w/VqlUTOiopmZCQEDRr1gze3t5o0KCB0HGIiIiIiJSOSCTCnDlzMGbMGLi7u0NNjWUPImXDzs7PVFhYiPXr12PevHm4ePGi/HHp0iXY2dm9teHQv506dQp9+vTBkCFDYG9vDzMzM6SmpsqfNzc3h7q6Ok6fPi0/lpubi6tXr5bpa1Jk2traGDhwILZt2waxWIwhQ4Zg165dMDExQb9+/bB161bk5OQIHZOUhImJCSZMmAA/Pz+hoxARERERKS1nZ2cYGRlh48aNQkchIgGw2PmZ9u/fj6ysLHz//fewtbUt9vD09ER4ePh7d36zsLBAfHw8Tp06hRs3bmDs2LG4ffu2/Hk9PT2MHDkSU6ZMweHDh3Ht2jWMGDECUqm0vF6eQtPT08M333yD3bt34+7du+jfvz82btwIIyMjuLu7Y8eOHcjLyxM6JlVykydPxsWLF3H48GGhoxARERERKaU33Z2zZ89GQUGB0HGIqJyx2PmZwsLC4OTkhJo1a771nJubG+7evYu4uLh3njt9+nS0bt0aPXr0QMeOHaGrqyvfiOiNRYsWwcnJCf3794eTkxNsbW3RsWPHMnktlVn16tUxbNgwHDhwAH///Te6deuG3377DYaGhhg8eDD27t2L/Px8oWNSJaSlpYUlS5Zg3Lhx/MGKiIiIiEgg7du3h6WlJcLDw4WOQkTljLuxk1LJzMzE9u3bER0djatXr6Jv377w9PSEs7Mz1NXVhY5HlYRMJkOPHj3QrVs3TJw4Ueg4RERERERK6ezZs+jfvz9u3boFLS0toeMQUTlhsZOUVnp6OrZt24bo6GikpaVhwIAB8PT0RMeOHaGqqip0PFJwf/31FxwdHXHlyhUYGhoKHYeIiIiISCn17dsXzs7OGD9+vNBRiKicsNhJBODOnTuIiYlBVFQUMjIyMGjQIHh6esLBwQEqKlztgUrG398fmZmZWL9+vdBRiIiIiIiU0qVLl3D+/HkMHz4cIpFI6DhEVA5Y7CT6j9TUVHnh8/nz53B3d4eHhwdatWrFb470WbKzs2FtbY2YmBi0a9dO6DhEREREREpJJpPxdzkiJcJiJ9EHXLt2DdHR0YiKikJhYSE8PDzg4eGBpk2b8pslfZLNmzdj8eLFOHPmDJdHICIiIiIiIipjLHYSfQKZTIaLFy8iOjoa0dHR0NDQgKenJzw8PNC4cWOh41EFJpPJ0LFjRwwZMgSjRo0SOg4RERERERFRpcZiZznLzMxEkyZN8OjRI6GjUAnJZDKcOXMG0dHRiImJQY0aNeSFT3Nzc6HjUQV08eJFuLi4ICUlBfr6+kLHISIiIiIiIqq0WOwsZ8+fP0f9+vXx4sULoaNQKSgqKkJCQgKio6Oxfft2GBkZwdPTE+7u7jA1NS3R9SQSCTQ1NcsgLQnJx8cHKioqWLlypdBRiIiIiIjoX86fPw8tLS3Y2NgIHYWISgGLneWsoKAAenp6KCgoEDoKlTKpVIrjx48jKioKO3fuRKNGjeDh4QE3NzcYGRl90jVSU1OxbNkyPHz4EM7Ozhg+fDh0dHTKODmVh3/++QeNGzdGbGwsmjZtKnQcIiIiIiKll5iYiJEjR+LevXuoW7cunJ2dMX/+fNSsWVPoaET0BVSEDqBs1NXVUVhYCKlUKnQUKmWqqqpwdnbG2rVrkZGRgZkzZ+LixYto0qQJOnXqhNWrVyM/P/+D13j69Cn09fVhZGQEX19fLF26FBKJpJxeAZWlmjVrYtasWfD19QXfYyIiIiIiEtbz58/xww8/wMLCAn/++SfmzJmDzMxMjBs3TuhoRPSF2NkpAB0dHTx+/Bi6urpCR6FykJ+fjz/++ANRUVHYsGED1NTUPnrO/v37MWLECGzduhXOzs7lkJLKg1QqRatWrTB58mR88803QschIiIiIlIqL1++hIaGBtTU1HDkyBH571wODg4AgGvXrsHBwQHXrl1D/fr1BU5LRCXFzk4BaGtr49WrV0LHoHKiqakJV1dXbNmyBaqqqh8c+2Z5g61bt6Jx48awtLR857hnz55h8eLF2LlzJ7sEFYiqqipWrFiByZMnIycnR+g4RERERERK4+HDh9i4cSNSU1MBAKampkhPT4e9vb18jK6uLuzs7PD06VOhYhJRKWCxUwBaWlosdiopkUj0wec1NDQAAIcOHYKLiwtq164N4PXGRUVFRQCAuLg4zJw5E5MmTYKPjw8SEhLKNjSVKkdHRzg5OSEoKEjoKERERERESkNdXR2LFi3CgwcPAABmZmZo06YNfH19kZ+fj5ycHAQFBeHevXvs6iRScCx2/j/27jsqqrN7G/A9BRiqgnTBjr1GFBsqYgkajEoUG/beTTCvHQsSe2yJvhqFiAUUeRU0BjWKgp3YOxAbiqiggiB15vsjP/kklqACzwxzX2u5hMM5Z+5jlgb27Gc/AigUCrx69Up0DFIzr+e47tu3D0qlEi1atICOjg4AQCqVQiqVYuXKlRg+fDjc3NzQpEkTdOvWDVWqVClwn8ePH+PPP/8s8fxUeIsXL8aGDRsQGxsrOgoRERERkVYoV64cGjdujLVr1+Y3H+3Zswfx8fFwdnZG48aNERMTg40bN8LU1FRwWiL6HCx2CsDOTvoQf39/ODo6olq1avnHzp07h+HDh2Pr1q3Yt28fmjZtivv376NevXqwtbXNP+/nn39Gly5d0LNnTxgaGmLKlClIT08X8Rj0ATY2NvjPf/6DSZMmiY5CRERERKQ1fvzxR1y6dAk9e/bE//73P+zZswc1a9ZEfHw8VCoVRo4cidatW2Pfvn1YtGgRkpKSREcmok/AYqcAnNlJ/6RSqfLneR4+fBhffvklzM3NAQBRUVHw8vJCo0aNcPz4cdSuXRubNm1C2bJlUb9+/fx7HDhwAFOmTEHjxo1x5MgR7Ny5E2FhYTh8+LCQZ6IPmzhxIuLj47F3717RUYiIiIiItIKNjQ02bdoEOzs7jBw5EsuWLcO1a9cwZMgQREVFYdSoUdDT08O9e/cQERGB77//XnRkIvoo+0jcAAAgAElEQVQE/74tNBU5LmOnN+Xk5GDRokUwMjKCXC6Hnp4eWrZsCV1dXeTm5uLSpUu4desWNm/eDJlMhpEjR+LAgQNwdnZGnTp1AACJiYmYO3cuunTpgnXr1gH4e+D21q1bsWTJEri7u4t8RHoHXV1drFy5EmPHjkX79u2hUChERyIiIiIiKvWcnZ3h7OyMZcuW4fnz59DV1c1vNMnNzYVcLseoUaPQsmVLODs74/Tp03BychKcmog+Bjs7BeAydnqTVCqFsbExFixYgAkTJiApKQn79+9HYmIiZDIZhg8fjlOnTsHZ2RnLly+Hjo4Ojh07hszMTJQpUwbA38vcT58+jalTpwL4u4AK/L2boK6ubv48UFIvnTp1Qt26dbF8+XLRUYiIiIiItIqBgQEUCsVbhc68vDxIJBLUr18fXl5eWLNmjeCkRPSxWOwUgMvY6U0ymQwTJ07EkydPcPfuXcyaNQv//e9/MXjwYCQnJ0NXVxeNGzfGkiVLcPPmTYwcORJlypRBWFgYxo8fDwA4duwYbG1t8cUXX0ClUuVvbHTnzh1UqVKFncRqbPny5Vi+fDnu378vOgoRERERkVbIy8uDq6srGjZsiClTpuCPP/7I/5np9XgxAEhLS4OBgQGbR4g0DIudArCzk97H3t4ec+fORWJiIjZv3pz/LuObLl26hG7duuHy5ctYtGgRACA6OhqdOnUCAGRnZwMALl68iJSUFFSoUAFGRkYl9xD0UapUqYIxY8ZgypQpoqMQEREREWkFmUwGR0dHJCQkIDk5GX369EGTJk0wYsQIhISE4OzZswgPD0doaCiqVq1aoABKROqPxU4BOLOTCsPS0vKtY7dv30ZMTAzq1KkDOzs7GBsbAwCSkpJQo0YNAIBc/vco3j179kAul6N58+YA/t4EidTT1KlTcfLkSURGRoqOQkRERESkFebOnQu5XI6xY8ciISEBU6dORU5ODqZOnYru3bvDw8MDAwYM4CZFRBpIomIFpMQNHz48/10josJSqVSQSCSIjY2FQqGAvb09VCoVcnJyMGbMGFy9ehXR0dGQyWRIT0+Hg4MD+vbtCx8fn/yi6Ov7xMTEwNTUFNWqVRP4RPSmkJAQzJs3D+fOncsvWBMRERERUfGZPHkyoqOjcfbs2QLHY2Ji4ODgkL9HwuufxYhIM7CzUwDO7KRP8fp/rg4ODrC3t88/pquri+HDh+P58+cYPnw4/Pz84OTkBBMTE3z77bcFCp2v7dq1Cy1btoSjoyOWLFmCu3fvluiz0Ns8PDxgYWGBtWvXio5CRERERKQVli5divPnzyM8PBzA35sUAYCjo2N+oRMAC51EGobFTgG4jJ2KkkqlgpOTE/z9/ZGamorw8HAMHDgQe/bsga2tLZRKZYHzJRIJFi5ciAcPHmDRokW4desWGjdujBYtWmDlypV4+PChoCfRbhKJBKtWrcK8efPw5MkT0XGIiIiIiEo9mUyG6dOnY//+/QDAFVZEpQSXsQswe/ZsyGQy+Pj4iI5CBADIycnBoUOHEBwcjD179qBBgwbw9PSEh4fHO2eHUvGZPHkyXr58iQ0bNoiOQkRERESkFW7cuIEaNWqwg5OolGBnpwBcxk7qRkdHB25ubggICEBiYiImT56MqKgoVK9eHR06dMDGjRuRkpIiOqZWmDNnDvbu3YuYmBjRUYiIiIiItELNmjXfKnSyL4xIc7HYKYBCoWCxk9SWQqHA119/jW3btuHhw4cYMWIE9u/fj8qVK6NLly4IDAxEamqq6JilVpkyZeDn54dx48a9NYKAiIiIiIiKl0qlgkqlwrNnz0RHIaJPxGKnAJzZSZrCwMAAPXv2REhICBISEtC3b1/s3LkT9vb26N69O4KDg5Geni46ZqkzcOBAAMDmzZsFJyEiIiIi0i4SiQS//fYbOnXqxO5OIg3FYqcAXMZOmsjY2Bj9+vVDWFgY7ty5g65du8Lf3x+2trbw9PREaGgoi/hFRCqVYvXq1Zg+fTpevHghOg4RERERkVZxc3NDTk4OwsLCREchok/AYqcAXMZOms7U1BSDBw/G77//jvj4eLi6umLNmjWwtbWFl5cX9u7di+zsbNExNVqTJk3QuXNnzJ07V3QUIiIiIiKtIpVKMW/ePMyePZujpYg0EIudAnAZO5Um5ubmGDFiBA4fPozr16/DyckJCxcuhI2NDYYOHYoDBw4gNzdXdEyN5Ofnh8DAQFy7dk10FCIiIiIireLu7g49PT2EhISIjkJEH4nFTgHY2UmllbW1NcaNG4fo6GhcuHABderUwcyZM2Fra4vRo0cjMjISeXl5omNqDEtLS8yaNQsTJkzgvCAiIiIiohIkkUgwf/58+Pj48GcYIg3DYqcAnNlJ2sDe3h7ffvstzpw5g1OnTqFixYqYPHky7O3tMXHiRJw4cYJLQgphzJgxSEpKQmhoqOgoRERERERapWPHjjA3N8e2bdtERyGijyBRsV2oxJ0+fRoTJkzA6dOnRUchKnE3b95EcHAwgoKC8PLlS/Tq1Qu9e/dG48aNIZFIRMdTS5GRkRg0aBCuXbsGAwMD0XGIiIiIiLRGZGQkhg0bhuvXr0NHR0d0HCIqBHZ2CsCZnaTNatSogdmzZ+Pq1avYt28fFAoF+vTpg2rVqmH69Om4ePEil2z/Q9u2beHk5IRFixaJjkJEREREpFXatm2LSpUq4ddffxUdhYgKiZ2dAty6dQtfffUVbt26JToKkVpQqVQ4f/48goKCsGPHDujr68PT0xOenp6oVauW6Hhq4f79+2jUqBHOnj2LypUri45DRERERKQ1Tp48id69e+PWrVvQ09MTHYeI/gU7OwXgBkVEBUkkEnzxxRdYvHgxbt++DX9/fzx//hzt27dHgwYN4Ofnh/j4eNExhbK3t8fkyZPx7bffio5CRERERKRVmjdvjrp16+KXX34RHYWICoGdnQI8fvwYderUwZMnT0RHIVJrSqUS0dHRCAoKwq5du1ChQgV4enqiV69eqFChguh4JS4zMxN169bFTz/9hE6dOomOQ0RERESkNf7880907doVcXFx0NfXFx2HiD6AxU4BUlNTUb58eaSlpYmOQqQxcnNzERkZieDgYISGhqJGjRro3bs3evbsCRsbG9HxSkx4eDi8vb1x+fJl6Orqio5DRERERKQ1evTogVatWnG1FZGaY7FTgJycHBgYGCAnJ0d0FCKNlJ2djUOHDiE4OBhhYWFo0KABevfuDQ8PD1hYWIiOV6xUKhW6dOkCFxcXTJkyRXQcIiIiIiKtcfnyZXTo0AFxcXEwMjISHYeI3oPFTgFUKhXkcjmysrIgl8tFxyHSaJmZmfj9998RHByM/fv3o2nTpvD09ET37t1hZmYmOl6xuHXrFlq0aIFLly7B1tZWdBwiIiIiIq3Rp08f1K9fH9OmTRMdhYjeg8VOQQwNDZGUlMR3g4iKUEZGBvbt24egoCAcOnQIzs7O8PT0xNdffw0TExPR8YrU1KlT8eDBAwQGBoqOQkRERESkNW7evIlWrVohLi4OZcqUER2HiN6BxU5BzM3NcePGDZibm4uOQlQqpaamIiwsDMHBwTh27BhcXV3h6emJr776CoaGhqLjfbaXL1+iZs2aCA4ORsuWLUXHISIiIiLSGoMGDUKlSpUwZ84c0VGI6B1Y7BTEzs4Op06dgp2dnegoRKXes2fPsHv3bgQFBeHUqVNwc3ODp6cn3NzcoFAoRMf7ZNu2bcOSJUsQExMDmUwmOg4RERERkVb466+/0LRpU9y8eRPlypUTHYeI/kEqOoC2UigUePXqlegYRFrB1NQUgwcPRkREBOLi4uDi4oLVq1fDxsYGAwYMwL59+5CdnS065kfr06cPjI2NsWHDBtFRiIiIiIi0RpUqVeDh4YGlS5eKjkJE78DOTkHq1q2L7du3o169eqKjEGmtxMREhISEIDg4GNevX0e3bt3Qu3dvuLi4aMzmYRcvXkSHDh1w/fp1vqtMRERERFRC7t+/j4YNG+LatWuwsrISHYeI3sDOTkH09fWRmZkpOgaRVrOxscH48eMRHR2N8+fPo3bt2pgxYwZsbW0xevRoREZGIi8vT3TMD2rQoAF69uyJWbNmiY5CRERERKQ17O3t0a9fPyxatEh0FCL6B3Z2CuLs7IwFCxagdevWoqMQ0T/Ex8djx44dCA4OxuPHj9GzZ0/07t0bzZo1g0QiER3vLSkpKahVqxYiIiLQsGFD0XGIiIiIiLRCYmIi6tSpg8uXL6N8+fKi4xDR/2FnpyAKhYKdnURqqmrVqpg2bRouXLiAw4cPw8zMDEOHDkWlSpUwZcoUxMTEQJ3eJzIzM8O8efMwfvx4tcpFRERERFSa2djYYOjQofDz8xMdhYjewGKnIFzGTqQZatasCR8fH1y9ehV79+6Fnp4eevfuDQcHB8yYMQOXLl1SiwLjsGHDkJGRgW3btomOQkRERESkNb7//nsEBQXh7t27oqMQ0f9hsVMQdnYSaRaJRIJ69erB19cXsbGxCA4ORk5ODtzd3VG7dm3MnTsXN27cEJZPJpNh9erV+P7775GWliYsBxERERGRNrGwsMDo0aMxf/580VGI6P+w2CmIQqHAq1evRMcgok8gkUjQuHFjLF68GLdv38amTZvw7NkztGvXDg0aNICfnx/i4+NLPFeLFi3g6uoKX1/fEn9tIiIiIiJt9d1332H37t2Ii4sTHYWIwGKnMOzsJCodpFIpmjdvjhUrVuD+/ftYtWoVEhIS0Lx5czRp0gTLli3D/fv3SyzPokWLsHHjRty8ebPEXpOIiIiISJuZmppi0qRJmDt3rugoRAQWO4XhzE6i0kcmk6FNmzb4+eef8fDhQ/j5+eH69eto2LAhWrZsiVWrViExMbFYM9jY2GDatGmYNGmSWswSJSIiIiLSBhMnTsSBAwdw7do10VGItB6LnYJwGTtR6SaXy9GhQwf88ssvSExMxPTp0xETE4PatWvDxcUF69atw5MnT4rltcePH487d+4gPDy8WO5PREREREQFGRsbw9vbG3PmzBEdhUjrsdgpCJexE2kPXV1ddOnSBZs3b0ZiYiImTpyIyMhIVKtWDZ06dcqf+VmUr7dq1SpMnjyZ/84QEREREZWQsWPHIjo6GhcuXBAdhUirsdgpCJexE2knhUKBbt26ISgoCA8fPsTQoUOxd+9eVKxYEe7u7tiyZQtSU1M/+3U6dOiABg0aYOnSpfnH0tLSEBcXhytXruD+/fvIy8v77NchIiIiIqK/GRgYYOrUqZg9e7boKERaTaLiUDchVqxYgTt37mDFihWioxCRGkhNTUVYWBiCgoIQFRUFV1dX9O7dG126dIGhoeEn3fPOnTto3Lgx/P39kZ2dDRMTE9jZ2UGhUOD58+e4c+cOVCoVWrduDQsLiyJ+IiIiIiIi7ZOZmQkHBwfs2rULTZs2FR2HSCux2CnIunXrcP78efz3v/8VHYWI1MyzZ8/wv//9D8HBwTh16hTc3NzQu3dvfPnll1AoFIW+T0JCAvz9/dGvXz9UqVLlnecolUpERUXhyZMn8PDwgEQiKarHICIiIiLSSv/9738RGhqKiIgI0VGItBKXsQvCmZ1E9D6mpqYYMmQIIiIiEBcXh7Zt22LlypWwsbHBgAED8NtvvyE7O/uD97h9+zbOnz+PWbNmvbfQCQBSqRRt2rSBq6srtm7dyh3ciYiIiIg+0+DBg3Hr1i1ERUWJjkKklVjsFIQzO4moMCwsLDBq1CgcOXIEV69ehaOjIxYsWAAbGxsMGzYMBw8eRG5uboFrUlNTERMTA3d390K/jqmpKTp37ow9e/YU9SMQEREREWkVXV1d+Pj4YNasWWwmIBKAxU5BFAoFXr16JToGEWkQW1tbTJgwAcePH8f58+dRs2ZNTJ8+HeXLl8eYMWNw9OhR5OXl4fDhw+jevftH39/MzAz6+vpIS0srhvRERERERNqjf//+SExMxOHDh0VHIdI6LHYKwmXsRPQ5KlSoAG9vb5w9exYnTpyAnZ0dJkyYADs7O8THx0Mul3/Sfdu1a8dvyIiIiIiIPpNcLsecOXMwc+ZMdncSlTAWOwXhMnYiKipVq1bF9OnTcfHiRaxYsQJ9+vT55Hvp6Oi8tSyeiIiIiIg+nqenJ9LS0rB//37RUYi0CoudgtSuXRs+Pj6iYxBRKWNgYABbW9vPuoehoSFycnKKKBERERERkXaSSqWYN28eZ3cSlTAWOwUpV64c2rVrJzoGEZUyRfFNlJGRER49elQEaYiIiIiItFv37t2hUqmwe/du0VGItManDXWjzyaRSERHIKJSqCj+bUlISEC7du2gr68Pa2trWFtbw8rK6q2PX/9uaWkJXV3dIkhPRERERFS6SCQSzJ8/H1OnTsXXX38NqZQ9Z0TFjcVOIqJSREdHBxkZGTAwMPjke+jp6SErKwvPnz/Ho0ePkJSUhEePHuV/HBsbW+DYkydPYGJi8t6i6JsfW1hYQCaTFeETExERERGpt86dO8PX1xc7duxA7969RcchKvUkKg6OICIqNbKysnDgwAG4u7t/0vUqlQqhoaHw8PAo9DVKpRLJyclvFUX/+XFSUhJSUlJgZmb2zg7Rf35sZmbGd76JiIiIqFQ4dOgQxo4di6tXr0IuZ98ZUXHi3zAiolLkdVemSqX6pCXtZ86cgZOT00ddI5VKYWFhAQsLC9StW/eD5+bm5uLJkycFCqCPHj1CQkIC/vzzzwIF0tTUVFhaWn5wCf3rj8uWLcvxIERERESktlxdXWFjY4OtW7di4MCBouMQlWrs7FRTOTk5kEqlXO5JRB/t3r17+Ouvv9C2bduPui4vLw9BQUHo169f8QT7SNnZ2Xj8+PE7O0T/eSwrKwtWVlb/2i1qZWUFIyMjFkaJiIiIqMRFRUVh4MCBuHHjBmfeExUjFjsFiYiIQLNmzVCmTJn8Y6//U0gkEvzyyy9QKpUYMWKEqIhEpMFOnDgBfX19NGrUqFDnK5VKBAYGomfPnp8171OUV69efbAY+uYxAIXqFrW2toa+vr7gJyu8DRs24OjRo9DX14eLiwv69OnDoi4RERGRmunUqRN69OiBkSNHio5CVGqx2CmIVCrF8ePH0bx583d+ff369diwYQOio6Ohp6dXwumIqDQ4efIkUlNT0aFDhw/OvkxOTkZYWBg8PDxgYmJSggnFePnyZaG6RZOSkqCnp/fBYuibv4t6dz49PR0TJ07EiRMn0LVrVzx69AixsbHo3bs3xo8fDwC4fv065s2bh1OnTkEmk2HAgAGYPXu2kLxERERE2uzMmTPw8PBAbGwsFAqF6DhEpRKLnYIYGhpi+/btaN68OTIyMpCZmYnMzEy8evUKmZmZOH36NKZNm4aUlBSULVtWdFwi0lCPHz9GVFQUJBIJXFxcYGpqmv+1P//8E4cPH8aRI0cQHh7OsRn/oFKp8OLFi0J1iz558gRGRkaF6ha1sLAo0qH0J0+eRMeOHeHv749vvvkGALBu3TrMmjUL8fHxSEpKQrt27eDo6Ahvb2/ExsZiw4YNaNu2LRYsWFBkOYiIiIiocLp27Yr27dtjwoQJoqMQlUosdgpiY2ODpKSk/CWSEokkf0anTCaDoaEhVCoVLl68WKA4QUT0KfLy8nDs2DGkpaXlH6tbty5sbW1RtWpV7N27t9BL3ultSqUSKSkphdqRPjk5Gaampv/aLWptbY1y5cr96470gYGB+M9//oP4+Hjo6upCJpPh7t27cHd3x7hx46Cjo4NZs2bhxo0bMDIyAgBs2rQJc+fOxfnz52FmZlYSf0RERERE9H8uXLiAzp07Iy4uTiNHSBGpO+7GLkheXh6+++47tGvXDnK5HHK5HDo6Ovm/y2QyKJVKGBsbi45KRKWATCaDi4vLO7/m7e0NX19f7Nq1q4RTlR5SqRTm5uYwNzdHnTp1Pnhubm4unj59+laH6MOHD3H+/PkCBdIXL17AwsICly9fRrly5d55P2NjY2RlZSEsLAyenp4AgP379+P69etITU2Fjo4OTE1NYWRkhKysLOjp6aFmzZrIyspCVFQUvv766yL/8yAiIiKi92vYsCFatmyJn376CVOmTBEdh6jUYbFTELlcjsaNG8PNzU10FCLSciNHjsSiRYtw+fJl1KtXT3ScUk8ul+d3bjZo0OCD52ZnZ+PJkycfHGfy5ZdfYsiQIZgwYQI2bdoES0tLJCQkIC8vDxYWFihfvjwSEhKwbds29O3bFy9fvsTq1avx5MkTpKenF/XjEREREVEhzJkzB+3atcOoUaPY5ERUxGRz5syZIzqENkpJSYGTkxPs7Oze+ppKpeIOukRUYnR0dKBUKrFjx478mY+kHmQyGUxMTD64lF0ul6Np06Zo1KgRsrOzYWNjgypVquDFixdo2rQpevTogfT0dEydOhW+vr4IDw/P7/Ds1KkTateunX8vlUqFhw8f4urVq8jJyYGenh50dHRK4lGJiIiItIqlpSUuXryI+Ph4tG7dWnQcolKFMzvV1LNnz5CTkwNzc/N/nddGRPS50tLSULVqVRw7dgw1a9YUHYc+0/z58xEWFob169fnz2J98eIFrl27Bmtra2zatAl//PEHFi9ejFatWuVfp1KpEB4eDj8/v/yl9Do6OoXekV5PT0/UIxMRERFpnNjYWLRo0QK3bt3iXh1ERYjFTkF27tyJqlWr4osvvihwXKlUQiqVIiQkBDExMRg3btw7uz+JiIraggULcPPmTWzevFl0FPoI58+fR15eHho1agSVSoX//e9/GD16NLy9vTFlypT8lQJvvnHWpk0b2NnZYfXq1R/coEilUiE1NbVQO9I/fvwYhoaGhd6Rnh2jnycjIwNHjhyBUqnMXxGiUCjg4uICuZxTioiIiDTF0KFDYWtri/nz54uOQlRqsNgpSOPGjeHu7o73TRE4efIkxo8fj2XLlqFNmzYlG46ItNKLFy9QtWpVnDp1CtWqVRMdhwrp999/x6xZs5CWlgZLS0ukpKTA1dUVfn5+MDQ0xK5duyCTydC0aVNkZGRg2rRpiIqKwu7du9GsWbMiy6FUKvHs2bNC7Uj/9OlTlC1bttA70stksiLLqen++usvnD9/HgYGBmjXrl2BbtoXL17gyJEjyM3NRevWrWFpaSkwKRERERXGnTt34OjoiBs3bsDc3Fx0HKJSgcVOQdq1a4eqVavC29sbL1++xKtXr5CZmYmMjAxkZWXh4cOH+O677xAYGIg+ffqIjktEWsLHxwcJCQnYuHGj6ChUSFlZWbh58yZu3bqFp0+folq1amjfvn3+14ODg+Hj44Pbt2/DwsICjRo1wpQpU4TOhsrLy3vnjvTv+vj58+cwNzd/Z1H0nwVSMzOzUj3z+vjx41AqlXB2dv7geSqVCvv27UPlypVRp06dEkpHREREn2rMmDEwMjLC4sWLRUchKhVY7BTEy8sLW7duha6uLpRKJWQyGeRyOeRyOXR0dGBkZIScnBwEBATA1dVVdFwi0hIpKSlwcHDAn3/+iUqVKomOQ5/oXRvdZWRkIDk5GQYGBihXrpygZB8vJycHT548+eAS+tcfp6enw8rK6oNL6F9/bGJiolGF0VOnTkGhUKBhw4aFvuaPP/6Avb09qlevXozJiIiI6HM9ePAA9evXx9WrV2FtbS06DpHGY7FTkF69eiEjIwNLliyBTCYrUOyUy+WQSqXIy8uDqakpN3wgIiIqhMzMTDx+/LhQM0Zzc3ML1S1qbW0NQ0NDoc+VnJyMM2fOwM3N7aOv3bZtGzw9PTkKgIiISM1NnjwZSqUSK1euFB2FSOOx2CnIgAEDIJVKERAQIDoKERGR1klPT3+rCPq+5fRyubzQO9IrFIoizxoaGoqvv/76kwqWycnJuHTpElxcXIo8FxERERWdpKQk1K5dGxcuXIC9vb3oOEQajdt1CtK3b19kZ2fnf/56yaFKpcr/JZVKNWqJHRERkaYwNDRElSpVUKVKlQ+ep1KpkJaW9s5i6JkzZ97akV5fX79QO9JbWloWakf617utf2pnZrly5ZCSkvJJ1xIREVHJsbKywvDhw7FgwQKsW7dOdBwijcbOTiIiIqIioFKpCr0j/ZMnT1CmTJl/7Ra9e/cumjVr9lk7qx8/fhwODg7cnZ2IiEjNJScno0aNGjh79iwqV64sOg6RxmKxU6C8vDxcv34dcXFxqFSpEho2bIjMzEycO3cOr169Qt26dWFlZSU6JhERERWxvLw8JCcn/+sSeolEgkuXLn3Wa929exfPnz9HgwYNiig9ERERFRcfHx/cu3cP/v7+oqMQaSwuYxdo0aJFmDlzJnR1dWFhYYH58+dDIpFg4sSJkEgk6NatGxYuXMiCJxF9tLZt26Ju3bpYs2YNAKBSpUoYN24cvL2933tNYc4hoqIhk8lgaWkJS0tL1KtX773nhYWFffZr6enpISsr67PvQ0RERMVv8uTJcHBwwM2bN1GjRg3RcYg0klR0AG119OhRbN26FQsXLkRmZiZ+/PFHLF26FBs2bMDPP/+MgIAAXL16FevXrxcdlYjU0JMnTzBmzBhUqlQJenp6sLKygqurKw4ePAjg7w1Nfvjhh4+659mzZzFmzJjiiEtEn0gikUCpVH7WPZ4/f46yZcsWUSIiIiIqTmXLlsXkyZMxd+5c0VGINBY7OwW5f/8+ypQpg++++w4A8M033+D48eO4dOkS+vbtCwC4evUqTpw4ITImEakpDw8PZGRkYOPGjahWrRoeP36Mo0ePIjk5GQBgZmb20fe0sLAo6phE9JmaNm2K6OhotG7d+pPvcePGDXz11VdFmIqIiIiK04QJE1CtWjVcuXIFdevWFR2HSOOws1MQHR0dZGRkFNhdVUdHB+np6fmfZ2VlITc3V0Q8IlJjz58/R1RUFBYuXAhXV1dUrFgRTZo0gbe3N3r37g3g72Xs48aNK3Ddy5cv0b9/f6fUT6oAACAASURBVBgZGcHa2hpLly4t8PVKlSoVOCaRSBASEvLBc4ioeFlZWeHx48effL1KpUJeXh7kcr6/TUREpCmMjIzw/fffw8fHR3QUIo3EYqcg9vb2UKlU2Lp1KwDg1KlTOH36NCQSCX755ReEhIQgIiICbdq0EZyUiNSNkZERjIyMEBYWhszMzEJft3z5ctSqVQvnzp3D3LlzMX36dISGhhZjUiIqCnZ2dkhISPika48fP46WLVsWcSIiIiIqbqNHj8apU6dw7tw50VGINA7f5hekYcOG6Ny5MwYPHoxff/0Vt2/fRqNGjTBs2DD06dMHCoUCTZs2xfDhw0VHJSI1I5fLERAQgOHDh2P9+vVo1KgRWrZsiZ49e8LJyem91zk5OWHGjBkAgOrVq+Ps2bNYvnw5evToUVLRiegTODk54ddff0W/fv2go6NT6OtSUlKQmJiIVq1aFWM6IiIiKg76+vqYPn06Zs+ejb179yIuLg7Xrl2DRCIBABgbG8PZ2bnAalEi+hs7OwUxMDDAvHnzsGPHDtSoUQOTJk3Ctm3b0LFjR1y4cAFbtmzB9u3bYW5uLjoqEakhDw8PPHz4EOHh4XBzc8OJEyfQrFkz+Pn5vfea5s2bv/X5tWvXijsqEX0miUSC3r17Y8uWLYXu5n78+DF+++03fPPNN8WcjoiIiIrLoEGDcP/+ffzyyy9IT09H165d4e7uDnd3dzRo0ABhYWHYtWvXZ428ISqN2NkpkI6ODrp164Zu3boVOG5vbw97e3tBqYhIUygUCnTo0AEdOnTA7NmzMWzYMMyZMwfe3t5Fcn+JRAKVSlXgWE5OTpHcm4g+jkKhQP/+/REaGgpzc3O0bdv2nZ0cmZmZ2LdvH5YvX47g4OD87g8iIiLSLM+fP8fu3bsRGRkJU1PTt75uamqK7t27Q6lU4uDBgyhTpgyaNWsmICmR+mGxUw28Lia8+QOJSqXiDyhE9FFq166N3Nzc93Z+nTp16q3Pa9Wq9d77WVhYIDExMf/zpKSkAp8TUcnS0dGBp6cnUlJSEBYWBpVKBR0dHejp6SEzMxM5OTnQ09ND586dceXKFQwbNgz79+/n9xNEREQa5uXLlwgLC8PAgQP/9f/jUqkUnTp1wrlz53Dy5Mm3VnMRaSMWO9XAu/7x4g8mRPQ+ycnJ6NmzJ4YMGYL69evD2NgYMTExWLx4MVxdXWFiYvLO606dOoUffvgB33zzDSIjI7F58+b8TdLepV27dvjpp5/QokULyGQyTJ8+HQqForgei4gKyczMDN27dwfw95ujWVlZ0NPTK/C9w/Tp09GiRQusW7cOo0ePFhWViIiIPsHu3bvRv3//j6oLfPHFFzh8+DDu37/PlaKk9VjsJCLSMEZGRmjWrBlWrlyJuLg4ZGVloXz58ujbty9mzpz53uu+/fZbXLp0CQsWLIChoSHmzZv3wXl+y5Ytw9ChQ9G2bVtYWVlh8eLFuH79enE8EhF9IolE8s43IXR0dBAYGIhWrVqhffv2cHBwEJCOiIiIPtbt27dRs2ZNSKUfv8WKi4sLdu3axWInaT2J6p8D2YiIiIioVFi1ahW2b9+OqKgoyOV8j5uIiEjdhYSEwMPD45NXe+7Zswdubm7Q1dUt4mREmoO7sQukVCoRGxsrOgYRERGVUuPGjYOhoSEWL14sOgoRERH9C5VKBZlM9llj7VxdXXHkyJEiTEWkeVjsFEipVKJmzZpv7XZMREREVBSkUin8/f2xYsUKnD9/XnQcIiIi+oC0tLR37rz+MYyMjJCdnV1EiYg0E4udAsnlckilUuTm5oqOQkRERKWUvb09li1bBi8vL2RmZoqOQ0RERO+RkZEBAwODz74PG6pI27HYKZhCocCrV69ExyAiIqJSrH///qhZsyZmzZolOgoRERG9h4mJCVJTU0XHINJ4LHYKplAo2GVBRERExUoikWDdunXYunUrjh49KjoOERERvYO+vj5evHjxWfdISEiApaVlESUi0kwsdgqmr6/PYicRaaw2bdogMDBQdAwiKgRzc3M8fPgQbdq0ER2FiIiI3kEikUAmk33WqLvTp0/DycmpCFMRaR4WOwVjZycRabJZs2ZhwYIFyMvLEx2FiIiIiEjjubi4fPJu6jk5OZDL5Z+1mztRacBip2Cc2UlEmszV1RWmpqYICQkRHYWIiIiISOOVKVMGaWlpSElJ+ehrd+3aBVdX12JIRaRZWOwUjMvYiUiTSSQSzJ49G/Pnz4dSqRQdh4iIiIhI43Xv3h179+7Fs2fPCn3N7t270aJFCxgZGRVjMiLNwGKnYFzGTkSa7ssvv4S+vj52794tOgoRERERkcaTSCTw8vLCH3/8gX379n2wqeDOnTsIDAxE06ZNUaFChRJMSaS+5KIDaDsuYyciTSeRSDBz5kzMnTsX3bt354wgIiIiIqLPJJFI4O7ujipVqmDatGkoX7487O3tUbZsWbx69QqJiYlIS0tDxYoV0b9/f34PTvQGdnYKxs5OIioNunbtCqVSiX379omOQqQ2Bg0aBIlE8tavCxcuiI5GREREGmDjxo1o1KgRxo0bh6+//hq2trbIzs6GkZERWrZsCQ8PDzg6OrLQSfQP7OwUjDM7iag0eN3dOW/ePHTp0oXfcBH9n/bt2yMwMLDAMXNzc0FpgOzsbOjq6gp7fSIiIiqcrKws/PDDDwgNDQUASKVS2NrawtbWVnAyIvXHzk7B2NlJRKVFjx49kJ6ejgMHDoiOQqQ29PT0YG1tXeCXXC7Hb7/9hlatWqFs2bIwMzODm5sbbt68WeDaEydOoGHDhlAoFPjiiy+wd+9eSCQSREdHAwBycnIwZMgQVK5cGfr6+qhevTqWLl0KlUqVf4/+/fujW7du8PPzQ/ny5VGxYkUAwK+//gpHR0cYGxvDysoKnp6eSExMzL8uOzsb48aNg42NDfT09GBvb48ZM2aUwJ8YERERAX93ddavXx9NmjQRHYVI47CzUzDO7CSi0kIqleZ3d3bs2JHdnUQfkJ6ejm+//Rb16tVDRkYG5s2bB3d3d1y9ehU6OjpITU2Fu7s7OnfujG3btuH+/fuYNGlSgXvk5eWhQoUK2LFjBywsLHDq1CmMGDECFhYWGDhwYP55f/zxB0xMTHDgwIH8QmhOTg7mz5+PGjVq4MmTJ/j+++/Rt29fHDlyBADw448/Ijw8HDt27ECFChWQkJCA2NjYkvsDIiIi0mJZWVlYuHAhQkJCREch0kgS1Ztv/1OJmzx5MipUqIDJkyeLjkJE9Nny8vJQu3ZtrF27Fu3atRMdh0ioQYMGYcuWLVAoFPnHnJ2dsX///rfOTU1NRdmyZXHixAk0a9YMP/30E3x8fJCQkJB//ebNmzFw4EBERUWhVatW73xNb29vXLlyBb///juAvzs7Dx06hHv37n1w+fqVK1dQr149JCYmwtraGmPGjEFcXBwiIiL4xgUREVEJW7t2Lfbu3ct5+ESfiMvYBeMydiIqTWQyGaZPn4758+eLjkKkFlq3bo0LFy7k//rll18AALGxsejTpw+qVKkCExMT2NraQqVS4d69ewCAGzduoH79+gUKpU5OTm/d/6effoKjoyMsLCxgZGSE1atX59/jtXr16r1V6IyJiUHXrl1RsWJFGBsb59/79bWDBw9GTEwMatSogfHjx2P//v1QKpVF9wdDRERE7/R6VqePj4/oKEQai8VOwbiMnYhKm759++LevXuIiooSHYVIOAMDA1SrVi3/V/ny5QEAXbp0QUpKCjZs2IDTp0/jzz//hFQqRXZ2NgBApVL9a0fl1q1b4e3tjSFDhiAiIgIXLlzAyJEj8+/xmqGhYYHP09LS0KlTJxgbG2PLli04e/YsfvvtNwDIv7ZJkya4c+cOfH19kZOTg/79+8PNzQ1cEERERFS8/P39UbduXTRt2lR0FCKNxZmdgikUCiQnJ4uOQURUZHR0dDBt2jTMnz+fmxURvUNSUhJiY2OxceNGODs7AwDOnDlToHOyVq1aCA4ORlZWFvT09PLPeVN0dDRatGiBMWPG5B+Li4v719e/du0aUlJSsHDhQtjb2wMALl269NZ5JiYm6NWrF3r16gUvLy+0atUKt2/fRpUqVT7+oYmIiOhfZWVlwc/PDzt37hQdhUijsbNTMH19fS5jJ6JSZ8CAAXjw4AGePn0qOgqR2jE3N4eZmRnWr1+PuLg4REZGYuzYsZBK//+3ZV5eXlAqlRgxYgSuX7+OgwcPYuHChQCQ3/FZvXp1xMTEICIiArGxsZgzZw6OHz/+r69fqVIl6OrqYvXq1bh9+zb27t371lK5pUuXIigoCDdu3EBsbCy2b9+OMmXKwNbWtgj/JIiIiOhNr7s63zW6hogKj8VOwbiMnYhKI11dXVy5cgXlypUTHYVI7chkMgQHB+PcuXOoW7cuxo8fjx9++AE6Ojr555iYmCA8PBwXLlxAw4YN8Z///Adz584FgPw5nmPGjEGPHj3g6emJpk2b4sGDB2/t2P4uVlZWCAgIQEhICGrVqgVfX18sX768wDlGRkZYtGgRHB0d4ejomL/p0ZszRImIiKhojRo1Kn+0DBF9Ou7GLtjmzZtx8OBBBAYGio5CREREamzXrl3o1asXnj59ClNTU9FxiIiIiIjUEmd2CsZl7ERERPQu/v7+cHBwgJ2dHS5fvoxvv/0W3bp1Y6GTiIiIiOgDWOwUTKFQsNhJRFpJqVQWmFFIRAU9evQIc+bMwaNHj2BjYwN3d/f8uZ1ERERERPRuXMYu2MGDB7Fo0SIcOnRIdBQiohKhVCoRFhaG7du3o1q1aujatSuHsBMREREREVGRYEuNYOzsJCJtkZOTAwC4cOECvvvuOyiVSkRFRWHo0KFITU0VnI6IiIiISDPl5uZCIpFg9+7dxXoNkaZgsVMwzuwkotIuIyMDU6ZMQf369dG1a1eEhISgRYsW2L59OyIjI2FtbY3p06eLjklEREREVOTc3d3Rvn37d37t+vXrkEgkOHjwYAmnAuRyORITE+Hm5lbir01U3FjsFEyhUODVq1eiYxARFQuVSoU+ffrgxIkT8PX1Rb169RAeHo6cnBzI5XJIpVJMnDgRR48eRXZ2tui4RERERERFatiwYTh8+DDu3Lnz1tc2btyIihUrwtXVteSDAbC2toaenp6Q1yYqTix2CsZl7ERUmt28eRO3bt2Cl5cXPDw8sGDBAixfvhwhISF48OABMjMz8dtvv8Hc3Bzp6emi4xLRv1i+fDmcnZ2Rl5cnOgoREZFG6NKlC6ysrODv71/geE5ODgIDAzFkyBBIpVJ4e3ujevXq0NfXR+XKlTF16lRkZWXln3/37l107doVZmZmMDAwQK1atbBz5853vmZcXBwkEgkuXLiQf+yfy9a5jJ1KMxY7BeMydiIqzYyMjPDq1Su0bt06/5iTkxOqVKmCQYMGoWnTpjh+/Djc3NxgamoqMCkRFcakSZMgk8mwfPly0VGIiIg0glwux8CBAxEQEAClUpl/PDw8HE+fPsXgwYMBACYmJggICMD169exZs0abNmyBQsXLsw/f9SoUcjOzkZkZCSuXr2K5cuXo0yZMiX+PESagMVOwdjZSUSlmZ2dHWrWrIkVK1bkf3MXHh6O9PR0+Pr6YsSIERg4cCAGDRoEAAW+ASQi9SOVShEQEIDFixfj0qVLouMQERFphKFDh+LevXs4dOhQ/rGNGzeiY8eOsLe3BwDMnj0bLVq0QKVKldClSxdMnToV27dvzz//7t27cHZ2Rv369VG5cmW4ubmhY8eOJf4sRJpALjqAtuPMTiIq7ZYsWYJevXrB1dUVjRo1QlRUFLp27QonJyc4OTnln5ednQ1dXV2BSYmoMCpVqoTFixfDy8sLZ86c4awvIiKif+Hg4IDWrVtj06ZN6NixIx4+fIiIiAgEBwfnnxMcHIxVq1YhPj4eL1++RG5uLqTS/9+fNnHiRIwbNw779u2Dq6srevTogUaNGol4HCK1x85OwV53dqpUKtFRiIiKRb169bB69WrUqFED586dQ7169TBnzhwAQHJyMn7//Xf0798fI0eOxM8//4zY2FixgYnoXw0aNAiVKlXK/7tMREREHzZs2DDs3r0bKSkpCAgIgJmZGbp27QoAiI6ORr9+/dC5c2eEh4fj/PnzmDdvXoENPEeOHIm//voLAwcOxI0bN9CsWTP4+vq+87VeF0nfrDPk5OQU49MRqRcWOwWTyWSQy+X8h4eISrX27dtj3bp12Lt3LzZt2gQrKysEBASgTZs2+Oqrr/DgwQOkpKRgzZo16Nu3r+i4RPQvJBIJNmzYgICAABw/flx0HCIiIrX3zTffQKFQYMuWLdi0aRMGDBgAHR0dAMDx48dRsWJFzJgxA02aNIGDg8M7d2+3t7fHyJEjsXPnTsyePRvr169/52tZWloCABITE/OPvblZEVFpx2KnGuBSdiLSBnl5eTAyMsKDBw/QoUMHDB8+HM2bN8f169dx4MABhIaG4vTp08jOzsaiRYtExyWif2FpaYm1a9di4MCBePnypeg4REREak1fXx99+/bFnDlzEB8fj6FDh+Z/rXr16rh37x62b9+O+Ph4rFmzBjt27Chw/fjx4xEREYG//voL58+fR0REBGrXrv3O1zIyMoKjoyMWLlyIa9euITo6Gt9//32xPh+ROmGxUw1wkyIi0gYymQwAsHz5cjx9+hR//PEHNmzYAAcHB0ilUshkMhgbG6NJkya4fPmy4LREVBjdunWDs7MzvL29RUchIiJSe8OGDcOzZ8/QokUL1KpVK/949+7dMXnyZEyYMAENGzZEZGQk5s6dW+DavLw8jB07FrVr10anTp1Qvnx5+Pv7v/e1AgICkJubC0dHR4wZM+a9S96JSiOJisMihatYsSKOHTuGihUrio5CRFSsEhIS0K5dOwwcOBAzZszI33399Vyhly9fombNmpg5cyZGjRolMioRFdKLFy/QoEEDrF27Fm5ubqLjEBEREZGWY2enGmBnJxFpi4yMDGRmZqJfv34A/i5ySqVSZGZmYteuXXBxcYG5uTm6d+8uOCkRFVaZMmXg7++PYcOGITk5WXQcIiIiItJyLHaqAc7sJCJtUb16dZiZmcHPzw93795FdnY2tm3bhgkTJmDJkiUoX7481qxZAysrK9FRiegjuLi4wNPTE6NHjwYXDRERERGRSCx2qgF2dhKRNlm7di2uX7+ORo0aoVy5cli6dClu3bqFTp06YcWKFWjVqpXoiET0CRYsWIArV64gKChIdBQiIiIi0mJy0QHo713ZWOwkIm3RvHlz7N+/HxEREdDT0wMANGzYEHZ2doKTEdHn0NfXR2BgINzc3ODs7My/00REREQkBIudaoDL2IlI2xgZGcHDw0N0DCIqYo0bN8b48eMxZMgQREREQCKRiI5ERERERFqGy9jVAJexExERUWkxbdo0vHjxAj///LPoKERERELl5OSgSpUqiIqKEh2FSKuw2KkGuIydiAhQqVTc2ISoFJDL5di8eTN8fHxw69Yt0XGIiIiE2bJlCypXrgxnZ2fRUYi0CoudaoCdnUREQGhoKJYtWyY6BhEVgRo1amDOnDkYMGAAcnNzRcchIiIqcTk5OfD19YWPj4/oKERah8VONcCZnUREgIODA5YtW8Z/D4lKiTFjxsDExAQLFy4UHYWIiKjEbdmyBZUqVULr1q1FRyHSOix2qgF2dhIRAfXr10ezZs2wYcMG0VGIqAhIpVJs2rQJq1atwrlz50THISIiKjHs6iQSi8VONcCZnUREf5s5cyYWL17MfxOJSgk7Ozv8+OOP8PLy4t9rIiLSGlu3bkXFihXZ1UkkCIudaoDL2ImI/ta4cWM0aNAA/v7+oqMQURHp27cv6tSpgxkzZoiOQkREVOxyc3PZ1UkkGIudaoDL2ImI/r9Zs2Zh4cKFyM7OFh2FiIqARCLB2rVrERQUhMjISNFxiIiIitWWLVtQoUIFtGnTRnQUIq3FYqca4DJ2IqL/r1mzZqhRowY2b94sOgoRFZFy5cphw4YNGDRoEFJTU0XHISIiKhbs6iRSDyx2qgF2dhIRFTRr1iz88MMPyM3NFR2FiIpI586d0alTJ0yaNEl0FCIiomKxdetW2Nvbs6uTSDAWO9UAZ3YSERXk7OyMChUqYNu2baKjEFERWrZsGY4ePYo9e/aIjkJERFSkcnNzMX/+fHZ1EqkBFjvVADs7iYjeNmvWLCxYsAB5eXmioxBRETEyMsLmzZsxatQoPH78WHQcIiKiIrN161bY2dmhbdu2oqMQaT0WO9UAZ3YSEb3NxcUF5ubm2LFjh+goRFSEWrZsiYEDB2LEiBFQqVSi4xAREX2217M658yZIzoKEYHFTrXAZexERG+TSCSYPXs2fH19oVQqRcchoiI0d+5c3L59G7/++qvoKERERJ9t27ZtKF++PLs6idQEi51qgMvYiYjerWPHjjA0NERoaKjoKERUhPT09BAYGIgpU6bg7t27ouMQERF9stezOtnVSaQ+WOxUA1zGTkT0bhKJBLNmzYKvry+XuxKVMvXr14e3tzcGDRrE7m0iItJY27Ztg62tLbs6idQIi51qgJ2dRETv99VXX0EikSA8PFx0FCIqYt7e3sjJycHKlStFRyEiIvponNVJpJ5Y7FQDnNlJRPR+r7s758+fz+5OolJGJpPh119/hZ+fH65duyY6DhER0UfZvn07bGxs2NVJpGZY7FQD7OwkIvqwbt26ITMzE7///rvoKERUxKpWrQo/Pz94eXkhOztbdBwiIqJCeXNWp0QiER2HiN7AYqca4MxOIqIPk0qlmDFjBrs7iUqpYcOGwdraGr6+vqKjEBERFUpQUBCsra3Z1UmkhiQq/tQoXEZGBsqVK8el7EREH5CXl4c6dergp59+gqurq+g4RFTEEhMT0ahRI+zZswdOTk6i4xAREb1Xbm4u6tSpg7Vr16Jdu3ai4xDRP7CzUw0oFApkZWWxW4mI6ANkMhlmzJiBefPmiY5CRMXAxsYGa9asgZeXFzIyMkTHISIieq+goCBYWVnBxcVFdBQiegd2dqoJPT09pKamQk9PT3QUIiK1lZubi5o1a2LTpk1o3bq16DhEVAz69+8PU1NTrF69WnQUIiKit+Tl5aF27dr4+eefudqISE2xs1NNcJMiIqJ/J5fLMX36dMyfP190FCIqJmvWrMGePXtw8OBB0VGIiIjeEhQUBEtLSy5fJ1JjLHaqCYVCwZmdRESF4OXlhdjYWJw8eVJ0FCIqBmXLlsXGjRsxZMgQPHv2THQcIiKifHl5eZg3bx53YCdScyx2qgl2dhIRFY6Ojg6mTp3K7k6iUqxDhw7o1q0bxo0bJzoKERFRPnZ1EmkGFjvVhL6+PoudRESFNHjwYFy+fBkxMTGioxBRMVm0aBFiYmKwY8cO0VGIiIiQl5eH+fPnw8fHh12dRGqOxU41wWXsRESFp6enh++//57dnUSlmIGBAQIDAzF+/HgkJiaKjkNERFouODgY5ubm3JSISAOw2KkmuIydiOjjDBs2DGfPnsXFixdFRyGiYtK0aVOMGjUKQ4cOhUqlEh2HiIi0FGd1EmkWFjvVBJexExF9HH19fXh7e8PX11d0FCIqRjNnzkRSUhI2bNggOgoREWkpdnUSaRYWO9UEOzuJiD7eyJEjcezYMVy9elV0FCIqJjo6OggMDMSMGTMQHx8vOg4REWkZzuok0jwsdqoJzuwkIvp4hoaGmDx5MhYsWCA6ChEVo9q1a2PGjBkYMGAA8vLyRMchIiItsmPHDpiZmaF9+/aioxBRIbHYqSbY2UlE9GnGjh2LQ4cO4ebNm6KjEFExmjBhAvT09LB06VLRUYiISEtwVieRZmKxU01wZicR0acxNjbG+PHj4efnJzoKERUjqVSKgIAALF26lBuTERFRidixYwdMTU3Z1UmkYVjsVBNcxk5E9OnGjx+Pffv24a+//hIdhYiKUYUKFbB06VJ4eXkhKytLdBwiIirFXs/qZFcnkeZhsVNNcBk7EdGnK1u2LMaMGYMffvhBdBQiKmYDBgxA1apVMXv2bNFRiIioFNu5cyfKli2LDh06iI5CRB+JxU41wWXsRESfZ9KkSQgNDcXdu3dFRyGiYiSRSLB+/Xps3rwZ0dHRouMQEVEpxFmdRJqNxU41wc5OIqLPY2ZmhuHDh2PR/2PvzsNjPN+3gZ+TPbKpkqpYs5GV2GltCUVKrW2CihBLKVIUEWQj9lJKayux1f5NbSVtI7GTEImQVVARam+EkG2e94++yU9qS5jMPTM5P8fhODozz/PMOWk7Mtdc933Nny86ChFVsBo1amDVqlUYMmQIcnJyRMchIiINs3PnTpiZmbGrk0hNsdipIrhnJxHRu5s4cSK2bduGrKws0VGIqIJ99tln6NixIyZNmiQ6ChERaRDu1Umk/ljsVBHs7CQienfm5uYYOnQoFi5cKDoKESnBkiVL8Mcff+DAgQOioxARkYbYtWsXTE1N8cknn4iOQkRvicVOFcE9O4mIFOPbb7/Fxo0b8ffff4uOQkQVzNTUFGFhYRg5ciTu3bsnOg4REak5uVzOvTqJNACLnSqCy9iJiBTjww8/xKBBg/Ddd9+JjkJEStChQwcMGDAAX331FSRJEh2HiIjU2K5du2BiYsKuTiI1x2KniuAydiIixZk6dSp+/vln3L17V3QUIlKC2bNnIzk5Gb/88ovoKEREpKbkcjmCg4PZ1UmkAVjsVBFcxk5EpDi1a9fGF198gSVLloiOQkRKYGBggM2bN2PChAnIzMwUHYeIiNRQcVdn165dRUchonfEYqeKYGcnEZFi+fn5YdWqVXjw4IHoKESkBC4uLvD19cXQoUMhl8tFxyEiIjVSvFdnYGAguzqJNACLnSqCe3YSESlW/fr10bt3byxbtkx0FCJSkqlTp+LJkydYsWKF6ChERKRGdu/eDSMjPSuiUAAAIABJREFUI3Tr1k10FCJSAJnEndxVQlxcHIYPH464uDjRUYiINMbly5fRunVrZGRkwMzMTHQcIlKC9PR0tGnTBsePH0ejRo1ExyEiIhUnl8vh7OyMhQsXonv37qLjEJECsLNTBdy9exeJiYnQ1tbG77//jsuXL4uORESkEaytrdG9e3csX74cAJCamoqIiAjs27cPUVFRXOJOpIFsbGwQEhICLy8vFBYWio5DREQqjl2dRJqHnZ2CSJKEmJgYZGVloXr16mjatCmMjIyQl5eH9PR0pKenw8jICK6urtDV1RUdl4hIbV24cAGDBw+Gv78/nJycYGVlBT09PTx+/Bhnz57FgwcPUL9+fTRr1kx0VCJSEEmS0K1bN3z00UcICAgQHYeIiFRUcVfnggUL4O7uLjoOESkIi50CPHnyBLt27YKrqyvq1KnzyuMeP36M/fv3o0WLFrCyslJiQiIizZCSkoLExER8+umnqFKlyiuPu3r1Ko4ePQoPDw8YGBgoMSERVZSsrCy4uLjgt99+Q/PmzUXHISIiFbRr1y4sWLAAZ86c4WAiIg3CYqeS5ebmYseOHRg8eDC0tbXLdE5ERAQsLS1hY2NTwemIiDTHpUuXcOfOHXTq1KlMxxcUFGDz5s0YOHAg9PX1KzgdESnD1q1bERISgri4OBgaGoqOQ0REKkQul6Nx48aYP38+uzqJNAz37FSy//3vf+UqdAJA165dkZCQgCdPnlRgMiIizfHgwQNkZGSUudAJALq6uhg0aBB2795dgcmISJkGDBiAxo0bw9/fX3QUIiJSMf/73/9gaGjIoUREGojFTiVKS0uDs7NzuQqdxT777DNERkZWQCoiIs1z5MgRfPrpp+U+T09PDw0aNMCNGzcqIBURibBixQrs3LkTUVFRoqMQEZGKkMvlCAkJQWBgIJevE2kgFjuVKDExEc7Ozm91rp6eHvLy8sBdB4iIXk8ul0OSpLf6YgkAWrdujdOnTys4FRGJ8v7772PNmjXw9vZGdna26DhERKQCwsPDoa+vz+XrRBqKxU4lycvLe+c94Fq1aoXY2FgFJSIi0kzHjx9H+/bt3/p8mUwGbW1tyOVyBaYiIpG6d+8Od3d3+Pr6io5CRESCyeVyBAcHIygoiF2dRBqKxU4luX379msnr5dF3bp1cfv2bQUlIiLSTNnZ2ahevfo7XaN69ersACPSMAsXLsTx48cRHh4uOgoREQnErk4izcdip5Lk5OTA2Nj4na/DZexERK+niPdJExMT5OTkKCANEakKY2NjbNy4EaNHj+aXx0RElRT36iSqHFjsVBJFfXDmGzIR0esp4n0yJycHpqamCkhDRKqkbdu2GDZsGEaMGMEvkImIKqFff/0Vurq6bzXIkojUB4udSlKzZk1kZma+0zWuXr2KWrVqKSgREZFmeu+99965a+vu3bssdhJpqKCgIFy/fh3r168XHYWIiJSIe3USVR4sdiqJnp4e8vPz3+ka0dHRaNq0qYISERFppo8++ggnTpx46/MlSYIkSdDS4l+RRJpIT08PmzZtwtSpU3H16lXRcYiISEnY1UlUefCTnBI1adIEcXFxb3Xus2fP8NNPP6Fnz56IiYlRcDIiIs0hk8kgk8lQWFj4Vufv2bMHO3bswPXr1xWcjIhUhZOTE6ZMmQJvb28UFRWJjkNERBWMe3USVS4sdiqRlZUVkpKSUFBQUO5z9+zZg0OHDsHd3R39+/dH9+7dcerUqQpISUSk/lxdXbF3795yn/fs2TNkZ2fDxsYGLi4umDJlCh4+fFgBCYlItIkTJ0KSJHz//feioxARUQXbs2cPtLW10aNHD9FRiEgJWOxUsv79+2Pz5s3l6jg6cOAAWrZsiWrVqmHMmDFIT09H7969MWDAAHTp0gXHjx+vwMREROrHzMwMDg4O+P3338t8Tl5eHrZu3YqBAwdi9uzZuHDhAh4+fIiGDRti8eLFyMvLq8DERKRs2traCAsLw7x583Dx4kXRcYiIqIJwr06iyofFTiUzMDCAp6cnfvnlF6Snp7/22AcPHmDLli1wdHREgwYNSu7X19fHqFGjkJaWBk9PT3h5ecHV1RXR0dEVnJ6ISH00bNgQlpaW2Lp1K7Kzs197bHJyMnbs2IFBgwZBV1cXAGBhYYE1a9YgOjoa0dHRaNSoEbZs2QK5XK6M+ESkBJaWlpg7dy4GDx78znurExGRatq7dy+7OokqGZkkSZLoEJVVQkICMjIyYGpqCmdnZ5iZmeHJkye4fPkyMjMzUa1aNbRv3x7a2tqvvU5BQQG2bNmC0NBQ1KpVCwEBAXB1deW3VkREAAoLCxEdHY3s7GzUr18flpaWMDQ0RHZ2Ns6fP48nT57Azs4O9vb2r73OkSNHMHnyZBQWFmLBggXo3Lmzkl4BEVUkSZLw2WefoXHjxpg9e7boOEREpECSJKFp06YIDg7GZ599JjoOESkJi50qIDs7GykpKcjOzoaRkRHq1auH2rVrl/s6hYWF2LZtG2bPno33338fgYGB6NKlC4ueRET/3/Xr13H9+nXk5ubiq6++wq+//gpnZ+cyny9JEnbt2oVp06bB2toa8+fPR+PGjSswMREpw99//40mTZogPDwcbdq0ER2HiIgU5Ndff0VISAjOnTvHz8VElQiLnRqoqKgIO3bswKxZs2BqaoqAgAB0796db+5ERM/p3Lkzvv32W3Tr1q3c5+bn52PVqlUIDQ1F165dMWvWLNStW7cCUhKRsuzevRt+fn6Ij4+HkZGR6DhERPSOirs6g4KC0KtXL9FxiEiJuGenBtLW1saAAQOQmJiIiRMnYurUqWjZsiX27dsH1raJiP5la2v7xr2TX0VPTw/jxo1DWloa6tSpAxcXF0ydOhX//POPglMSkbL069cPbdq0wZQpU0RHISIiBdi7dy8AcPk6USXEYqcG09bWxhdffIGEhAT4+flhxowZaNasGcLDwzlgg4gqPRsbm7cudhYzNTUtmdz+4MED2NracnI7kRpbtmwZ9u3bh4iICNFRiIjoHUiShKCgIE5gJ6qkWOysBLS0tNCvXz+cP38egYGBmD17NlxcXLBr1y4WPYmo0lJEsbNY8eT2qKgoREVFcXI7kZqqWrUq1q9fDx8fHzx48EB0HCIiekvs6iSq3LhnZyUkSRIOHDiAkJAQ5ObmYubMmejfv/8bp74TEWmS1NRUfPrpp7h8+bLCr/385PaFCxfCzc1N4c9BRBXH19cXd+7cwdatW0VHISKicpIkCc2aNUNAQAB69+4tOg4RCcBiZyUmSRIiIiIQHByM7OxszJgxAx4eHix6ElGlkJ+fD1NTU+Tk5EBXV1fh139+cruNjQ3mz59frsnvRCTO06dP0bRpUwQGBsLT01N0HCIiKoe9e/ciMDAQcXFxXMJOVElxGXslJpPJ0K1bN5w8eRJLly7Fjz/+CHt7e2zcuBGFhYWi4xERVSg9PT1YWFjg6tWrFXJ9mUyGzz//HElJSXB3d0eXLl3g7e2N69evV8jzEZHiGBoaYuPGjfD19cXNmzdFxyEiojIq3qszMDCQhU6iSozFToJMJkOXLl1w7Ngx/PTTT1i3bh0aNWqE9evXo6CgQHQ8IqIKY2Njg7S0tAp9juLJ7enp6ahduzYntxOpiRYtWmD06NEYNmwYuBCKiEg97Nu3D5IkoVevXqKjEJFAXMZOZZKfnw89PT3RMYiINIa5uTn8/Pzw9ddfQ19fX3QcInqJgoICtG3bFj4+Pvjqq69ExyEioteQJAnNmzfHjBkz0KdPH9FxiEggdnZSmdjY2GDlypXIy8sTHYWISCM8P7n9l19+4eR2IhWkq6uLTZs2YebMmUhPTxcdh4iIXmP//v0oKipiVycRsdhJZbN9+3bs3bsX1tbWWL58OZ49eyY6EhGRWnNwcMC+ffsQFhaG77//Hi1atEBkZKToWET0H40aNcLMmTMxZMgQ7mlORKSiJEnCnDlzEBgYCC0tljmIKjsuY6dyiY2NxaxZs3Du3DlMmTIFI0eOhKGhoehYRERqTZIk7Ny5E9OmTYOtrS0ntxOpGLlcji5duqBz586YNm2a6DhERPQfkiRBLpdDJpOx2ElE7Oyk8mnRogX27t2Lffv2ITo6GlZWVli8eDGePHkiOhoRkdqSyWT44osvkJycXGpye2ZmpuhoRARAS0sL69evx5IlSxAfHy86DhER/YdMJoO2tjYLnUQEgMXOcpHJZNi1a9c7XSMsLAzGxsYKSiRO06ZNER4ejt9++w0nT56ElZUVFixYgMePH4uORkQarH79+li0aFGFP4+o9+r/Tm5v0qQJJ7cTqYi6deviu+++w+DBg7mdDxEREZEKY7ET/xYxX/fH29sbAHDr1i307NnznZ7Lw8MDV65cUUBq1dCkSRPs2rULf/75J+Li4mBlZYW5c+fi0aNHoqMRkZrx9vYued/V0dFB3bp1MXr0aDx8+LDkmNjYWIwZM6bCs4h+rzY1NcXs2bNx4cIF3L9/H7a2tliyZAmHxBEJ9uWXX8LW1hYzZ84UHYWIiIiIXoF7dgL4+++/S/55//79GDFiBG7dulVyn6GhIczMzEREqxD5+fnQ09OrkGsnJSUhNDQUv//+O3x9fTFu3DiN+tkRUcXx9vZGVlYWNm3ahMLCQiQlJWHYsGFo164dtm7dKjqeUJcuXYKfnx8uXryI0NBQeHp6cpkWkSB3795F48aNsW3bNrRv3150HCIiIiL6D35SAlCzZs2SP1WrVn3hvuJi3fPL2K9duwaZTIZt27ahQ4cOMDQ0hIuLCy5cuICLFy+ibdu2MDIywscff4yrV6+WPNd/l0ZmZmaiV69eqFatGqpUqYJGjRph27ZtJY8nJiaic+fOMDQ0RLVq1eDt7Y3s7OySx2NjY/HJJ5+gevXqMDU1xccff4xTp06Ven0ymQwrVqxA3759YWRkBH9/fxQVFcHHxwcNGjSAoaEhbGxssGDBAsjl8nf6Wdrb22PLli04fvw40tPTYW1tjeDg4FKdWUREr6Kvr4+aNWuidu3a+OSTT+Dh4YHff/+95PH/LmOXyWT46aef0KtXL1SpUgW2traIiorCjRs30LVrVxgZGaFJkyaIi4srOaf4fTgyMhKOjo4wMjJCp06dXvteDQAHDhxAq1atYGhoiPfffx89e/YsWcr6suX1HTt2xNixYxXyc+HkdiLVUaNGDaxatQre3t7IyckRHYeIqNJhvxYRvQmLne8oMDAQU6dOxfnz51G1alUMHDgQ48aNQ2hoKGJiYvDs2TOMHz/+leePGTMGubm5iIqKwqVLl/D999+XFFxzc3PRrVs3GBsbIyYmBuHh4Th58iSGDRtWcn5OTg4GDx6MY8eOISYmBk2aNIG7uzvu3btX6nmCg4Ph7u6OxMREfP3115DL5bCwsMCOHTuQnJyM0NBQzJkzB+vXr1fIz6Vhw4bYsGEDTp06hb/++gs2NjaYOXMm7t+/r5DrE5Hmu3LlCg4dOgRdXd3XHjd79mx4enoiISEBzZs3x4ABA+Dj44MxY8bg/PnzqFWrVsl2JMXy8vIwd+5crFu3DqdOncI///yDr7766pXPcejQIfTq1QtdunTBuXPnEBUVhQ4dOrzzF0Tl1aFDB5w5cwZTp07FyJEj0b17d1y4cEGpGYgI6NmzJ1xdXTFhwgTRUYiIKoXnC5wymQwAlP57GBGpEYlK2blzp/SqHwsAaefOnZIkSdLVq1clANLKlStLHt+3b58EQNq9e3fJfevXr5eMjIxeedvJyUkKCgp66fOtXr1aMjU1lR49elRyX1RUlARASk9Pf+k5crlcqlmzprRp06ZSuceOHfu6ly1JkiRNnTpVcnNze+NxbyMjI0MaPny4VK1aNWnatGnS3bt3K+R5iEh9DRkyRNLW1paMjIwkAwMDCYAEQFq8eHHJMfXq1ZMWLlxYchuA5OfnV3I7MTFRAiB99913JfcVv28Wv++sX79eAiClpKSUHLN582ZJV1dXKioqKjnm+ffqtm3bSh4eHq/M/t9ckiRJHTp0kL7++uvy/hjKLC8vT1q2bJlkbm4ueXt7S9evX6+w5yKiFz169Ehq0KCBtHfvXtFRiIg03rNnz6Tjx49LI0aMkGbOnCnl5uaKjkREKoydne/I2dm55J8/+OADAICTk1Op+548eYLc3NyXnu/r64vZs2ejTZs2mDFjBs6dO1fyWHJyMpydnWFiYlJyX9u2baGlpYWkpCQAwJ07dzBq1CjY2trCzMwMJiYmuHPnDq5fv17qeZo3b/7Cc69cuRLNmzdHjRo1YGxsjCVLlrxwnqJYWlpizZo1iIuLw4MHD2Bra4spU6bgzp07FfJ8RKSe2rdvj/j4eMTExGDcuHFwd3d/bXc8ULb3YQCl3m/09fXRsGHDktu1atVCQUHBK6eenz9/Hm5ubuV/QRWoeHJ7WloaatWqhSZNmsDPz4+T24mUxMTEBBs2bMCoUaNw9+5d0XGIiDRaaGgoRo8ejQsXLmDLli1o2LBhqc/ORETPY7HzHT2/vLK4nf5l972qxd7HxwdXr17F0KFDkZaWhrZt2yIoKAjAv636xef/V/H9Q4YMQWxsLJYsWYKTJ08iPj4etWvXRn5+fqnjjYyMSt3evn07vvnmG3h7eyMiIgLx8fEYM2bMC+cpWr169bBy5UokJCQgNzcXjRo1wqRJk0oNiSKiyqtKlSqwtraGk5MTli1bhtzcXMyaNeu157zN+7COjk6pa7zrcigtLa0X9o8qKCh4q2uVl5mZGUJDQ3HhwgXcu3ePk9uJlKhdu3b48ssvMWrUKO4hR0RUQW7duoXFixdjyZIliIiIwMmTJ1GnTp2SAZaFhYUAuJcnEf0fFjtVQO3atTFy5Ejs2LEDISEhWL16NYB/h/0kJCSU2vz+5MmTkMvlsLOzAwAcP34c48aNw6effgoHBweYmJiUmiT/KsePH0erVq0wduxYNG3aFNbW1sjIyKiYF/gSderUwfLly5GYmIjCwkLY29vjm2++wc2bN5WWgYhUX2BgIObPny/8vcHFxeW1A4Fq1KhR6r332bNnSElJUUa0EhYWFli7di2ioqJw+PBhNGrUCL/88gv3syKqYCEhIUhPT8fmzZtFRyEi0khLliyBm5sb3NzcYGZmhg8++ACTJ0/Grl27kJOTU/Il9qpVq7iXOREBYLFTOF9fXxw6dAhXrlxBfHw8Dh06BHt7ewDAoEGDYGRkBC8vLyQmJuLo0aMYNWoU+vbtC2trawCAra0tNm/ejKSkJMTGxsLT0xN6enpvfF5bW1vExcXh4MGDSE9Px6xZs3DkyJEKfa0vY2FhgaVLl+LSpUvQ1taGo6Mjxo4dixs3big9CxGpno4dO8LBwQGzZ88WmmP69OnYuXMnZsyYgaSkJFy6dAlLliwp2aLE1dUVW7ZsQXR0NC5duoRhw4YprbPzv4ont69fv75kcvvhw4eFZCGqDAwMDLBp0yZMmjSpwrYDIiKqrPLz85GVlQUbGxsUFRUBAIqKiuDq6gp9fX2Eh4cDANLT0zFmzJhSW8ARUeXFYqdgcrkc48aNg729Pbp06YIPPvgAGzZsAPDvcs6IiAg8evQILVu2RK9evdCmTRusW7eu5Px169bh8ePHaNasGTw9PTFs2DDUr1//jc87atQofPHFFxg4cCBatGiBa9euYdKkSRX1Mt/oww8/xHfffYeUlBRUqVIFzs7OGD16NP766y9hmYhINUycOBE///yz0PcDd3d3hIeH4+DBg3BxcUGHDh0QFRUFLa1//xqdNm0aXF1d0atXL3zyySf4+OOP0bRpU2F5gX8LxcWT20eMGMHJ7UQVqEmTJpgwYQKGDh3KbmoiIgXS09ODp6cnrK2toa2tDQDQ1taGqakpPvroI+zbtw8A4O/vj88++wwNGjQQGZeIVIRM4sYWpILu3r2LxYsXY/Xq1ejbty/8/f3L9BdXUVERkpKSULduXZiZmSkhKRGR6svPz8eqVaswe/ZsuLu7IyQkBHXq1BEdi0ijFBYWon379vDw8ICvr6/oOEREGqN4tYyurm6puRZRUVEYNWoUdu7ciWbNmiE1NRVWVlYioxKRimBnJ6mkGjVqYO7cuUhLS0PNmjXRvHlzDBs2DA8fPnzteUlJSVi4cCHatWuHESNGvPF4IqLKgJPbiSqejo4ONm7ciFmzZiE5OVl0HCIitVf8e4quru4Lhc78/Hy0adMG1apVQ8uWLdG3b18WOomoBIudpNLef/99zJo1C5cvX0bdunVhbGz82uNr164NT09PfP311/j555+xZMkSPHv2TElpiYhUGye3E1Usa2trzJ49G15eXsL27SUi0gQPHjzA6NGjsXHjRly7dg0ASgqdwL9f5BoYGMDBwQEFBQVYuHChoKREpIpY7CS18N577yEoKKhk0t7rjnN3d8eDBw9gZWWFbt26wcDAoORxfvAgIvq/ye2HDx9GZGQk7OzsOLmdSEFGjRqF6tWrIzQ0VHQUIiK1tX79emzfvh3ff/89Jk+ejC1btiAzMxPAv1PXi4cVzZ07F3v37kW9evVExiUiFcM9O0ljPL+s4cMPP8TgwYMREBBQ0g16/fp17Ny5E7m5uRg8eHCZBjkREVUG0dHRmDJlCoqKirBw4UK4urqKjkSk1m7evAkXFxfs378fLVq0EB2HiEjtnDx5Er6+vvDy8sKePXuQkpICNzc3aGtrY/fu3bhx4wYnrxPRK7GzkzRG8bd7CxcuhLa2Nvr06VNq2fuDBw9w584dnDp1CpaWlli8eDG7mIiI8OLkdnd3dyQmJoqORaS2atWqhWXLlmHw4MHIzc0VHYeISO20bdsWrVu3xtOnT/Hnn39i6dKluH79OjZv3gxLS0scPHgQGRkZomMSkYpisZM0RvES9++//x4eHh5wdHQs9XiTJk0QGhqKoKAgAICpqamyIxKRClu3bh28vLxExxBGJpPhiy++QHJyMrp164bOnTtj6NChJUvGiKh8PDw80LRpU0ybNk10FCIitTRx4kQcOnQImZmZ6NevH7y9vWFiYoIqVapgwoQJmDRpEr9QIqKXYrGTNEJxh+aSJUsgSRL69u37wrKGoqIi6OjoYM2aNXB2dkavXr2gpVX6f4GnT58qLTMRqRZbW1ukp6eLjiGcnp4exo8fz8ntRAqwfPly7N69G5GRkaKjEBGplaKiIjRo0AAffvghAgMDAQDTpk3DnDlzcOLECSxevBitW7dGlSpVBCclIlXEPTtJrUmShMjISBgZGaFNmzaoV68e+vTpg1mzZsHExKTUPp7Av/t2WltbY+XKlRg2bFjJNWQyGa5evYqff/4Z+fn58PLyeqEzlIg02+3bt+Hg4IB79+6JjqJSsrKyEBgYiL1792LatGkYM2YM9PX1RcciUhsREREYMWIELly4gKpVq4qOQ0Sk8p7/DJeamoqJEyeiVq1a2L9/PxISEmBubi44IRGpOnZ2klorLnZ+9NFHsLKywqNHj9CvX7+Srs7ivySLOz9DQ0Nha2uLHj16lFyj+JgHDx5AJpMhOTkZzs7OnKJKVMmYm5sjPz8fDx8+FB1FpbxscvvWrVu55zFRGXXt2hU9e/bE+PHjRUchIlJpxavsnv8M17BhQ7Ru3RphYWHw9/cvKXTy9xAieh0WO0mtaWlpYe7cuUhLS0PHjh2RnZ2NadOm4fz586X+AtTS0kJWVhbCwsLg6+v70m8DmzVrhoCAAPj6+gIAHBwclPY6iEg8mUwGGxsbLmV/BUdHR+zfvx/r1q3D4sWL0bJlSxw+fFh0LCK1sGDBApw+fRq7d+8WHYWISCVlZ2cjODgY0dHRyM7OBoCSLcd8fHywdu3akr3VJUl6YTsyIqLncRk7aZRr165hypQpMDIywpo1a/DkyRNUqVIFurq6GDNmDKKiohAVFYWaNWuWOu/5pRJffvklUlNTERsbK+IlEJFAnp6e6NmzJwYNGiQ6ikqTy+XYuXMn/P390bBhQ8yfPx9OTk6iYxGptNOnT6N3796Ij49/4fcQIqLKbvTo0Vi1ahXq1q2Lnj174osvvoCzszPMzMxKHZeXl8ftdIjojfh1CGmU+vXrY8eOHfjpp5+gra2N0NBQdOrUCdu3b8emTZswceLEl37AKC50njt3Djt27IC/v7+yoxORCrCxsUFaWproGCpPS0sLHh4enNxOVA6tW7fG8OHDMWLECLDXgIjo/+Tk5OD06dNYuXIlJk2ahD179uDzzz/HjBkzcOTIkZIthi5evIiRI0fiyZMnghMTkapjsZM0koGBAWQyGb799lvUqFEDX375JZ48eQJDQ0MUFRW99By5XI6lS5fCwcEBffr0UXJiIlIFXMZePi+b3D5t2jRObid6hYCAANy7dw+3b98WHYWISGVkZmaiadOmqFmzJsaNG4fr169j5syZ2Lt3L7744gsEBATg6NGj8PX1xcOHD2FkZCQ6MhGpOC5jp0rh/v37mD59OlavXo2xY8ciJCTkhYmo8fHxaNWqFbZs2YL+/fsLSkpEIp0+fRrjxo3jNhZv6caNGwgMDMS+ffvg7++P0aNHc6kZ0X/I5XLIZLKSVSVERJWdXC5Heno6Pvjggxc+o61YsQKLFi3CP//8g+zsbKSmpsLGxkZQUiJSFyx2UqVy7949xMTEoGvXrtDW1sbNmzdhbm4OHR0dDB06FOfOnUNCQgI/gBBVUvfv34eVlRUePnzI94F3cPHiRfj5+SEpKQmhoaHw8PDgIAEiIiIqs8LCQujo6JTcLp7KvmHDBoGpiEhdsNhJlVZ2djYmT56Ms2fPYtCgQQgKCsL69evZ1UlUyVWrVg2pqamoUaOG6ChqLzo6GpMnT4YkSViwYAFcXV1FRyJSefn5+Vi6dCksLS3Rr18/0XGIiISSy+WIjY1FmzZtkJycjIYNG4qORERqgG0WVGmZmZlh8eLFaNq0KQICAvDkyRMUFBSBTD5bAAAgAElEQVTg6dOnrzxHkiTI5XIlpiQiZeO+nYrTsWNHnDlzBpMnT8aIESPg7u6OxMTEMp3L72KpssrMzER6ejpmzpyJAwcOiI5DRCSUlpYWHj9+jKlTp7LQSURlxmInVWrGxsZYu3Yt7t27h8mTJ2PQoEGYNm0aHj9+/MKxkiThzJkzcHJywtatW1856IiI1BuLnYr1ssntw4YNe+Mk1YKCAjx8+BAxMTFKSkokniRJsLKywtKlS+Ht7Y0RI0YgLy9PdCwiogonSdIrv+h0dXVFaGiokhMRkTpjsZMIgKGhIebPn4/c3FwMGjQIhoaGLxwjk8nQqlUrLF68GD/88AMcHBywefNmFBYWCkhMRBXFxsYGaWlpomNonOcnt1taWr70ffZ5Y8aMQbt27TBq1CjUr18f69evV1JSIuWTJKnU7xMGBgaYPHkyLC0t8dNPPwlMRkSkHFFRUfjtt99eWvCUyWTc+5uIyoXvGETPMTAwQIsWLaCtrf3Sx2UyGbp27YoTJ05gxYoVWL16Nezt7bFhwwYWPYk0BDs7K5aZmRlmzJjx2gFQP/74I7Zu3YoxY8Zgx44dCAgIQGhoKA4ePAiAS9xJM8jlcty8eRNFRUWQyWTQ0dEp+f+ieFp7bm4uTExMBCclIqpYkiQhICAA//zzDwdEEpFC6Lz5ECL6L5lMBjc3N7i5uSE6OhohISEICQmBv78/vLy8oKurKzoiEb0lW1tbFjuV4HUfZlauXInhw4djzJgxAP4tQJ89exZr1qxBt27dIJPJkJqayr27SG0VFBSgXr16uH37Ntq1awcjIyM0b94cLi4usLCwQLVq1bBp0ybEx8fDwsJCdFwiogp1+PBh3L17F56enqKjEJGGYGcn0Tvq2LEjDh8+jLCwMGzbtg22trZYvXo18vPzRUcjordgY2ODy5cvs3tQkPz8fFhZWZXs6Vn870GSpJLOt8TERNjZ2aFHjx7IzMwUGZforejq6mLixImQJAnjxo2Do6Mjjh49ilmzZqFHjx5o2bIl1q5dix9++AHdunUTHZeIqMJIkoSgoCAEBAS8cnUdEVF5sdhJpCDt2rXDH3/8gS1btiA8PBzW1tb48ccfOViASM2YmZnB0NAQf//9t+golZKenh46dOiAXbt2Yffu3ZDJZDhw4ABOnDgBMzMzFBUVwcnJCRkZGTA1NUW9evXg4+ODp0+fio5OVC7ffvstHB0dERkZifnz5+Pw4cM4d+4cUlNT8eeffyIjIwOjRo0qOT4rKwtZWVkCExMRKd7hw4dx584ddnUSkUKx2EmkYG3btsXBgwexc+dO/Pbbb7CyssIPP/yAZ8+eiY5GRGXEfTvFKO7i/OabbzBv3jyMGjUKrVq1gq+vLy5evAhXV1doa2ujsLAQDRo0wC+//IKzZ88iPT0dVatWxaZNmwS/AqLy2bt3L37++Wfs2bMHMpkMRUVFqFq1KlxcXKCvrw8dnX93nLp37x42bNgAPz8/FjyJSGMUd3XOnDmTXZ1EpFAsdhJVkFatWmH//v3Ys2cP/vzzT1hZWeH7779Hbm6u6GhE9AYsdipfYWEhIiMjcevWLQDAV199hXv37mH06NFwdHREmzZtMGDAAAAoKXgCwIcffgg3NzcUFBQgMTGR3fSkVurXr485c+bA29sbjx8/fuWH/erVq6NFixbIzc2Fh4eHklMSEVWMqKgodnUSUYVgsZOogjVr1gx79uzB/v37cezYMVhZWWHRokUl+9ERkephsVP57t+/j61btyIkJASPHj1CdnY2ioqKEB4ejszMTEydOhXAv3t6Fk+ufvDgAfr27Yt169Zh3bp1WLBgAfT19QW/EqLymTRpEiZMmICUlJSXPl5UVAQA6Ny5M4yNjXHy5ElERkYqMyIRkcI939VZ3MVORKQoLHYSKYmLiwt2796NiIgIxMTEwNLSEvPnz0dOTo7oaET0HzY2NkhLSxMdo1L54IMPMHr0aJw4cQL29vbo3bs3atWqhStXriAgIACfffYZAJR8INqzZw+6d++O+/fvY9WqVfD29haYnujdzJgxA82bNy91X/G2Dtra2oiPj0fTpk0RERGBlStXwsXFRURMIiKFiYqKwu3bt9nVSUQVQiZx3CyREJcuXUJoaCj+/PNPfPPNNxg7dixMTU1FxyIiAOfPn4eXlxcSExNFR6mUDhw4gIyMDNjZ2aFZs2aoVq1ayWP5+fmIiIiAj48PnJycsGrVKlhbWwP4tzgkk8lExSZ6Z+np6TAzM4O5uXnJffPnz8fMmTPh5uaGuXPnwtnZGVpa7FcgIvUlSRI6duyI4cOHY/DgwaLjEJEGYrGTSLCUlBSEhobi0KFDGD9+PMaNG4eqVauKjkVUqT1+/Bjm5uZ4/PgxiwqCyeXyUv8OZsyYgVWrVqFHjx4ICgpCvXr1XjiGSF0tW7YMO3bswPHjx3Ht2jV4eXkhLi4OgYGB8PHxKVX453/3RKSuoqKiMGrUKCQlJXEJOxFVCBY7iVREeno6QkNDsX//fnz99dfw9fUt9aGGiJSrVq1aOHPmDOrUqSM6CgHIzMzEhAkTEBERgZEjR+K7774THYlI4QoLC1G1alW0adMGsbGxcHR0xIIFC9CqVatXDi96+vQpDA0NlZyUiOjtsKuTiJSBXwcTqQgbGxuEhYXhzJkzyMrKgq2tLWbMmIH79++LjkZUKXFIkWoxNzdHzZo1sXbtWsybNw/A/w1u+S9Jkl75GJEq09HRwb59+xAZGYmePXvi119/Rdu2bV9a6Hz8+DF++uknLF26VEBSIqK3Ex0djZs3b2LAgAGioxCRBmOxk0jFWFlZYe3atYiNjcXdu3dha2sLPz8/3L17V3Q0okqFxU7Voq+vj+XLl8PDwwO6uroA8MpONwDo2LEjli5diry8PGVFJFKITp06YeTIkTh27Nhrl3caGxtDX18f+/btw/jx45WYkIjo7QUHB3MCOxFVOBY7iVRUgwYNsGrVKpw/fx6PHj1Cw4YNMXnyZNy+fVt0NKJKgcVO9SWTyfDjjz/i999/h52dHbZt2wa5XC46FlGZrVy5EhYWFoiOjn7tcQMGDEDPnj2xfPnyNx5LRCRadHQ0srKyMHDgQNFRiEjDsdhJpOLq1q2LH3/8ERcuXEBeXh7s7OwwYcIE3Lp1S3Q0Io1mY2ODtLQ00THoLTk5OeHAgQP4+eefsWjRIrRq1QpRUVGiYxGVWfES9lfJzs7G0qVLERoaii5dusDKykqJ6YiIyi8oKIhdnUSkFCx2EqmJ2rVrY9myZbh06RIAwMHBAePHj0dWVpbgZESaiZ2dmqFTp06IiYnBpEmT4OPjg08//RQXL14UHYvojWrUqAFzc3Pk5ubi2bNnpR5LSEhA7969ERISgtmzZyMiIoLD1IhIpbGrk4iUicVOIjXz4YcfYsmSJUhKSoKenh6cnJzw9ddf4/r166KjEWkUa2trXLt2jYNuNICWlhY8PT2RnJyMTz75BG5ubhg2bBhu3LghOhrRG23atAmzZ8+GJEl49uwZli9fjvbt2yMvLw8xMTHw9fUVHZGI6I2Cg4MxY8YMdnUSkVKw2EmkpmrWrIlFixYhJSUFJiYmcHFxwahRo3Dt2jXR0Yg0gqGhIWrUqMEvEjSIvr4+fH19kZaWhpo1a6Jx48bw9/dHdna26GhEr9SpUyfMmTMHixYtwqBBgzBhwgRMnDgRx44dg6Ojo+h4RERvFB0djczMTAwaNEh0FCKqJFjsJFJz5ubmmDdvHlJTU1G9enU0a9YMw4cPx5UrV0RHI1J7XMqumczMzDBnzhwkJCTg77//hq2tLZYuXYr8/HzR0YheYGtri0WLFmHq1KlISkrC8ePHERgYCG1tbdHRiIjKhBPYiUjZWOwk0hDVq1dHaGgo0tPTYWFhgZYtW2Lo0KEs1BC9AxY7NVvt2rWxbt06/PnnnyWT27dv387J7aRyJk6ciM6dO6Nu3bpo1aqV6DhERGV25MgRdnUSkdKx2EmkYapVq4bg4GBcvnwZDRo0QNu2beHl5YXU1FTR0YjUDoudlUPx5Pa1a9di4cKFnNxOKmn9+vWIjIzEgQMHREchIioz7tVJRCKw2EmkoapWrYqAgABkZGSgUaNGaNeuHQYOHIikpCTR0YjUho2NDdLS0kTHICXh5HZSZRYWFjh16hTq1asnOgoRUZkcOXIE169fx5dffik6ChFVMix2Emk4U1NT+Pv7IyMjA40bN0anTp3g4eGBxMRE0dGIVB47Oyuf5ye3d+nSBa6urvDx8eHkdlIJLVq0eOlQIkmSBKQhInq94OBgTJ8+nV2dRKR0LHYSVRImJiaYOnUqMjIy0KJFC3Tp0gX9+vVDfHy86GhEKsvS0hKZmZkoKCgQHYWUTF9fH9988w3S0tJgbm7Oye2ksiRJwpEjR/DXX3+JjkJEVOLo0aP466+/2NVJREKw2ElUyRgbG+Pbb7/FlStX8PHHH8Pd3R29e/fGuXPnREcjUjn6+vqoVasWrl27JjoKCVK1alXMnTuXk9tJZclkMpw5cwbe3t4crkVEKqN4r05dXV3RUYioEpJJXPdCVKk9ffoUa9euxfz58+Hi4oKZM2eiZcuW5bpGYmIiMjIyoK2tXbKUTltbG25ubjAwMKiI2ERK07VrV/j6+sLd3V10FFIBiYmJ8PPzQ0pKCubMmYPPP/8cWlr87pjEKioqQocOHdC/f3988803ouMQUSV39OhRDB06FCkpKSx2EpEQLHYSEQDg2bNnWLduHebNmwcHBwcEBASgTZs2rz0nMjIS//zzDxwdHdGwYcNSjz19+hSHDx/G06dP0b59e5ibm1dkfKIKM3bsWNjY2MDX11d0FFIhhw8fxpQpUyCTybBw4UJ07NhRdCSq5DIyMtC6dWscOXIE9vb2ouMQUSXm5uaGQYMGYdiwYaKjEFElxWInEZWSl5eHDRs2YM6cObC1tUVAQAA+/vjjUsfI5XJs3boVbm5uqFmz5muvJ0kS9uzZAwcHB9jY2FRkdKIKsXTpUqSnp2P58uWio5CKkcvl2L59O6ZPnw57e3vMmzfvpcNjiJRl9erVWLVqFU6fPs1uKiIS4tixYxgyZAhSU1P5PkREwnDdFRGVoq+vj5EjRyItLQ0eHh7w8vKCq6srjhw5UnLMtm3b8Nlnn72x0An8u5dY7969kZaWxmnGpJY4kZ1eRUtLCwMGDEBycjI6d+4MNzc3Tm4noUaMGIGaNWti1qxZoqMQUSXFvTqJSBWw2ElEL6WnpwcfHx+kpqbCy8sLw4cPR4cOHbBixQq0a9cOJiYm5brep59+imPHjlVQWqKKY2Njg7S0NNExSIUVT25PTU3l5HYSSiaTYe3atVi1ahXOnDkjOg4RVTLHjx/HlStXMHjwYNFRiKiSY7GTiF5LV1cX3t7eSE5OxogRI5CYmIg6deq81bUcHByQmpqq4IREFat+/fq4efMm8vLyREchFVc8uT0+Pr5kcvuyZcs4uZ2U6sMPP8Ty5cvh5eWF3Nxc0XGIqBIJDg7G9OnT2dVJRMKx2ElEZaKjo4OPP/74nTYad3Z2RmJiogJTEVU8XV1d1KtXD1euXBEdhdREnTp1sG7dOvzxxx84dOgQ7OzssH37dnCbdFKWzz//HC1atMDUqVNFRyGiSuL48eO4fPkyvLy8REchImKxk4jKLj4+Hi1atHina+jo6CgoDZHycN9OehvOzs747bffsGbNGixcuBCtWrVCdHS06FhUSfzwww/49ddf8ccff4iOQkSVAPfqJCJVwmInEZWZtrY2ZDLZO11DR0cHcrlcQYmIlIPFTnoXrq6uiImJwYQJEzBs2DD06NEDFy9eFB2LNNx7772HdevWwcfHBw8fPhQdh4g02IkTJ9jVSUQqhcVOIiozRSzB1NLSYrGT1A6LnfSu/ju53dXVFT4+PsjKyhIdjTRYly5d0KtXL4wbN050FCLSYNyrk4hUDYudRKRUBQUFXMpOaofFTlKU4sntaWlpMDc3h7OzM6ZPn87J7VRh5s+fj9jYWOzcuVN0FCLSQCdOnEB6ejq7OolIpbDYSURlVrt27Xce0lJQUKCgNETKY2Njg7S0NNExSIM8P7n91q1bnNxOFaZKlSrYtGkTxo0bh1u3bomOQ0QaprirU09PT3QUIqISLHYSUZk1bdoUcXFxb31+VlYWLCwsFJiISDnq1q2Lu3fvIjc3V3QU0jCc3E7K0LJlS4wcORLDhw/nf1tEpDAnT55EWloauzqJSOWw2ElE5WJgYPDWBZ9Tp06hdevWCk5EVPG0tbVhaWmJjIwM0VFIQz0/uX3BggWc3E4KN3PmTPz9999Ys2aN6ChEpCHY1UlEqorFTiIql65du2L79u3lHjIUGxuLBg0avPM0dyJRuG8nKYOrqytiY2MxYcIEDB06FD169MClS5dExyINoKuri02bNsHf359f3BDROzt58iRSU1MxZMgQ0VGIiF7AYicRlYuuri769euHjRs3lnn/zZiYGOTl5aFZs2YVnI6o4rDYScpSPLk9JSUFnTt3RqdOnTi5nRTC3t4e06dPx5AhQ1BUVCQ6DhGpMXZ1EpEqY7GTiMrN1NQUAwYMQHh4OA4ePPjKgRrJycnYtWsX9PT08PHHHys5JZFisdhJyvb85PYaNWpwcjsphK+vL3R1dbFo0SLRUYhITZ06dYpdnUSk0mQSdyknonfw+PFjHD58GEVFRdDW1sbVq1dhZmYGY2NjNGrUCI6OjqIjEinE4cOHERwcjCNHjoiOQpVUZmYmAgIC8Ntvv2H69On46quv2FFDb+Wvv/5C8+bNERkZCWdnZ9FxiEjNdOvWDX379sXIkSNFRyEieikWO4lIoQYMGICePXti4MCBoqMQKVRmZiZatmyJW7duiY5CldyFCxfg5+eH1NRUzJ07F59//jn3Q6ZyCwsLw+LFixEbGwt9fX3RcYhITZw6dQqenp5IT0/nF25EpLK4jJ2IFOq9997Dw4cPRccgUjgLCwtkZ2cjJydHdBSq5J6f3D5//nxObqe3MmTIEFhZWSEwMFB0FCJSI8HBwfD392ehk4hUGoudRKRQLHaSptLS0oK1tTUuX74sOgoRAE5up3cjk8mwatUqbNiwAcePHxcdh4jUwOnTp5GcnIyhQ4eKjkJE9FosdhKRQrHYSZqMQ4pI1Tw/ud3NzQ2dOnXC8OHDObmdysTc3BwrV67EkCFD2LVORG/Erk4iUhcsdhKRQrHYSZqMxU5SVfr6+pgwYQLS0tJQvXp1Tm6nMuvVqxc6dOiAb7/9VnQUIlJhp0+fRlJSErs6iUgtsNhJRArFYidpMhY7SdVVrVoV8+bNQ3x8PG7evAlbW1ssW7YM+fn5oqORCvv+++/x+++/48CBA6KjEJGKCg4OxrRp09jVSURqgcVOIlIoFjtJk7HYSeqiTp06WL9+Pf744w8cOnQIdnZ22LFjByRJEh2NVJCpqSnCwsIwcuRI3Lt3T3QcIlIxZ86cwaVLl9jVSURqg8VOIlIoFjtJk7HYSeqmeHL76tWrSya3HzlyRHQsUkEdOnSAp6cnRo8ezaI4EZVSvFenvr6+6ChERGUik/jbDBERUZlIkgRTU1NkZmaiatWqouMQlYtcLsf27dvh7+8PR0dHzJs3Dw4ODqJjkQp59uwZmjVrBn9/fwwaNEh0HCJSATExMejfvz/S09NZ7CQitcHOTiIiojKSyWTs7iS19fzkdldXV05upxcYGBhg06ZNmDBhAm7cuCE6DhGpgOK9OlnoJCJ1wmInERFRObDYSeqOk9vpdZo2bYrx48dj6NChkMvlouMQkUAxMTFITEzEsGHDREchIioXFjuJiIjKgcVO0hQvm9z+ww8/cHI7wc/PDzk5Ofjxxx9FRyEigdjVSUTqisVOIiKicmCxkzTN85PbDx48CHt7e05ur+R0dHSwceNGBAUFITU1VXQcIhIgJiYGFy5cYFcnEaklDigiIpUSFBSEXbt24eLFi6KjEL3UyZMnMWHCBJw5c0Z0FKIKERkZiSlTpkBHRwcLFixAhw4dynxuXFwcrl+/Di2tf79Pl8vlaNSoERo1alRRcakCrVixAhs3bsSJEyego6MjOg4RKVGPHj3g7u6OMWPGiI5CRFRuLHYSUQlvb2/cu3cP+/fvF5bh8ePHyMvLw/vvvy8sA9Hr3L17F7a2tnjw4AFkMpnoOEQVQi6XY9u2bZg+ffobJ7cXFhbi0KFDyMvLg4uLCywtLUs9fvHiRaSkpMDU1BRdunTh/zdqRJIkdO3aFe3atcPMmTNFxyEiJYmNjUXfvn1x+fJlLmEnIrXEZexEpFKMjY1Z6CSVVr16dUiShPv374uOQlRhtLS0MHDgwDdObn/8+DE2bdoEV1dX9OvX74VCJwA4Ojqif//+aNasGTZu3IiCggJlvQx6RzKZDOvXr8cPP/yAc+fOiY5DRErCvTqJSN2x2ElEZSKTybBr165S99WvXx+LFi0quZ2WloYOHTrAwMAADRs2xG+//QZjY2OEhYWVHJOYmIjOnTvD0NAQ1apVg7e3d6kJwEFBQXB0dKzw10P0tmQyGfftpErjZZPbZ8yYgUePHiE/Px87d+7EkCFDUKVKlTde6/3334eHhwd++eUX7geqRiwsLLB06VIMHjwYT58+FR2HiCpYbGwsEhIS4OPjIzoKEdFbY7GTiBRCLpejT58+0NHRwenTpxEWFobg4GDk5eWVHJObm4tu3brB2NgYMTExCA8Px8mTJ7nxOakdW1tbFjupUime3H7+/HncuHEDtra2CA4OxsCBA0v25ywLAwMD9OrVCwcPHqzAtKRonp6ecHJywvTp00VHIaIKFhISAj8/P3Z1EpFa407jRKQQf/zxB1JTU/H777/DwsICALBkyRJ89NFHJcds2bKlZMmjiYkJAGD16tXo1KkTLl++DGtrayHZicqLnZ1UWdWtWxdhYWE4e/YsYmNj3+rDcNWqVfH06VNIksT9O9WETCbDjz/+CGdnZ/Ts2ROdOnUSHYmIKsDZs2dx/vx57Ny5U3QUIqJ3ws5OIlKIlJQU1KpVq6TQCQAtWrQo1fGTnJwMZ2fnkkInALRt2xZaWlpISkpSal6id8FiJ1V2d+/exZAhQ976/NatW+PMmTMKTEQV7f3338fatWtf2H6GiDRH8V6dBgYGoqMQEb0TFjuJqExkMtkLe6w9P2SiLB06rzuG3T2kTljspMouLy+vTPt0voqFhQX+/vtvBSYiZejevTu6d+8OX19f0VGISMHOnTuH8+fPc69OItIILHYSUZnUqFEDt27dKrl9+/btUrft7OyQlZWFmzdvltx39uxZyOXyktv29vZISEhATk5OyX0nT56EXC6HnZ1dBb8CIsUpLnZyyApVVjo6774Tkra2tgKSkLItWrQIx48fR3h4uOgoRKRAwcHB8PPzY1cnEWkEFjuJqJRHjx4hPj6+1J9r167B1dUVK1asKNnLx9vbu9QvQ126dEHDhg0xZMgQJCQk4PTp05g4cSJ0dHRKujYHDRoEIyMjeHl5ITExEUePHsWoUaPQt29f7tdJauW9996Dnp4ebt++LToKkRCKKPTzywL1ZGxsjA0bNmDMmDG4c+eO6DhEpADnzp1DXFwchg8fLjoKEZFCsNhJRKUcO3YMLi4upf58++23+O6772BpaYmOHTuif//+GD58OMzNzUvO09LSQnh4OPLy8tCyZUsMGTIE06dPh0wmKymKVqlSBREREXj06BFatmyJXr16oU2bNli3bp2ol0v01riUnYgqq48++gje3t4YMWIEi9ZEGiA4OBhTp05lVycRaQxOYyeiEmFhYQgLC3vl4wcPHix1u1+/fqVu29ra4ujRoyW3ExISUFBQUKpr08nJCZGRka98jry8PBgbG5czOZHy2draIj09He3atRMdhUjp8vLy3mmaekFBAYtkai44OBgtW7ZEWFgYhg4dKjoOEb2luLg4nDt3Djt27BAdhYhIYVjsJCKFCQ8Ph5GREWxsbHDt2jVMnDgRjRs3RtOmTd94riRJuHLlCiIjI+Hs7KyEtETvhp2dVJk1b94c586dQ/Pmzd/q/D/++AOurq4KTkXKpKenh02bNsHV1RWdOnVC/fr1RUciorfAvTqJSBNxGTsRKUxOTg7Gjh0Le3t7DBo0CHZ2doiIiChT5092djbs7e2hp6eHmTNnKiEt0bthsZMqs/r16+PatWtvff6aNWuwceNGFBYWKi4UKZ2TkxOmTJmCIUOGlBpISETqIS4uDmfPnsWIESNERyEiUiiZxDVERERE5RYXF4ehQ4ciISFBdBQiIVJSUnDnzh20b9++XOft27cPRkZGmD17Nu7evYulS5eyy1ONFRUVoWPHjujTpw8mTpwoOg4RlUOvXr3g5uaG8ePHi45CRKRQLHYSERG9hZycHNSsWROPHz9+630LidRdbGwsHjx4gK5du5bp+EOHDqFevXqws7ODJEn49ddfMWnSJDRp0gSLFi2CpaVlBSeminDlyhW0atUK0dHRcHBwEB2HiMrg/Pnz6NGjBy5fvgxDQ0PRcYiIFIrL2ImIiN6CiYkJTExMcPPmTdFRiISpWrUqRo4ciZ9//hkZGRmvPC4xMRHbtm2DnZ0d7OzsAAAymQx9+vRBUlISmjdvjpYtW2L69Ol4/PixsuKTglhaWmLu3LkYPHgw8vPzRcchojIonsDOQicRaSJ2dhJRhfDw8ECfPn3g6ekpOgpRhWnXrh1CQkLQqVMn0VGIlO7Zs2do06YNhg8fjq+//hrnz59HRkYGdHR0oK2tDUmSIJfLUVhYCCcnJzRs2PC118vKysK0adNw+PBhzJ07F4MG/T/27jssqmt9G/AzQy82MEKiiKggorGXoEiJvYVERQREQewNlWLDaFQ02BCNorGAYsVekRg02LCggAIiKIIlGktQpEnb3x/+5DscTY5lZvYAz31dc504uz3jwZ3uekcAACAASURBVGHm3Wu9ywVSKe/LVxSCIOC7775Dy5YtsXDhQrHjENG/4KhOIqrsWOwkIrkYO3YsWrZsiXHjxokdhUhuPDw80LFjR4wePVrsKEQKN2nSJPz555/Yu3fvO60c3n68/JQWDzExMfD09ISKigqCgoLQoUMHmeQl+Xv8+DFatWqFgwcP4ptvvhE7DhH9gx9++AG2trbw9PQUOwoRkVzwdjkRyUWtWrWQlZUldgwiueKK7FRVHThwAEePHsWmTZveW9CUSCSf3MvW0tISFy9exNixY/H999/Dzc0Njx49+tzIpACGhoZYs2YNhg0bhtzcXLHjENF7xMXF4dKlS7xRS0SVGoudRCQXLHZSVcBiJ1VFGRkZGDNmDHbt2oWaNWvK5RpSqRTDhw/HrVu3YGhoiK+//hoBAQF4/fq1XK5HsjNw4EB07NgRvr6+YkchoveYP38+e3USUaXHaexEJBefM4WRqKK4fv06nJyckJSUJHYUIoUoKipCly5dMGjQIHh7eyvsurdv34a3tzcSExOxfPlyfPfdd/z9osRevHiBFi1aYMOGDejZs6fYcYjo/8THx6NPnz64c+cOi51EVKmx2ElERPSJ8vLyoK+vj9zcXC6kQlWCr68vkpKScOTIEVF+5k+ePIkpU6agbt26CAwMRLNmzRSegT5MVFQU3NzckJCQAD09PbHjEBGAAQMGwNraGlOmTBE7ChGRXPGbGRER0SfS1taGvr4+7t+/L3YUIrmLiIjAzp07sWXLFtGK+927d0d8fDz69+8POzs7TJ48GX///bcoWejfde3aFQMGDMDEiRPFjkJEeDOq8+LFixgzZozYUYiI5I7FTiIios9gamqK1NRUsWMQydXDhw/h7u6O7du3o3bt2qJmUVNTw6RJk5CcnIzi4mI0bdoUwcHBKC4uFjUXvWvx4sW4du0adu/eLXYUoipv/vz58PX15fR1IqoSWOwkIiL6DFykiCq74uJiODs7Y8KECbC2thY7TpnatWtj7dq1OHnyJMLDw9GmTRucPn1a7Fj0H7S1tREWFobJkyfjzz//FDsOUZWVkJCAmJgYjuokoiqDPTuJiIg+w7Jly/Dw4UMEBgaKHYWoyhIEAQcOHICXlxfatGmDZcuWwcTEROxY9H/mzZuHS5cu4fjx41xYikgEAwcOhJWVFaZOnSp2FCIiheDITiISRUFBAVauXCl2DKLPxpGdROKTSCQYMGAAkpOT0aZNG7Rv3x5+fn7IyckROxoBmD17Np49e4b169eLHYWoyklISMCFCxc4qpOIqhQWO4lIIf57EHlRURGmTZuGV69eiZSISDZY7CRSHlpaWpg9ezYSEhKQkZEBc3NzbNu27Z3fQaRYampq2Lp1K/z8/HD79m2x4xBVKW97dWpra4sdhYhIYTiNnYjkYv/+/WjWrBkMDAxQs2bNsudLSkoAvCl+VqtWDWlpaahXr55YMYk+W0FBAWrWrImcnByoqqqKHYeI/sOFCxfg6ekJNTU1BAUFoX379mJHqtKCgoKwe/dunD17FioqKmLHIar0rl+/jp49e+LOnTssdhJRlcKRnUQkF7Nnz0br1q0xbNgwBAcH49y5c8jKyoKKigpUVFSgqqoKDQ0NPH/+XOyoRJ9FU1MThoaGyMzMFDsKEf2XTp064dKlSxg9ejTs7e3h7u6Ox48fix2rypo0aRK0tLSwZMkSsaMQVQnz58+Hj48PC51EVOWw2ElEchEdHY3Vq1cjLy8Pc+fOhaurK4YMGQI/Pz8cP34cAKCnp4cnT56InJTo85mamiI1NVXsGERyk5GRAYlEgtjY2Ap3balUCjc3N6SkpKBOnTpo3rw5lixZgtevX8s4Kf0vUqkUISEhWLFiBeLj48WOQ1SpXb9+HefPn8fYsWPFjkJEpHAsdhKRXNSpUwceHh74/fffkZCQAF9fX9SoUQOHDh3CqFGjYGVlhYyMDOTn54sdleizsW8nVQZubm6QSCSQSCRQU1NDw4YN4e3tjdzcXBgZGeHRo0do1aoVAOCPP/6ARCLBs2fPZJrB1tYWEydOLPfcf1/7U1WvXh0BAQGIiYnB+fPn0axZMxw+fJj9PBWsfv36WL58OVxdXVFQUCB2HKJKa/78+fD29uaoTiKqkljsJCK5Ki4uxpdffolx48YhPDwc+/btg7+/P9q2bYu6deuiuLhY7IhEn83MzIzFTqoUunXrhkePHiE9PR0LFy7E2rVr4e3tDRUVFRgaGorSl1bW1zY1NcWhQ4ewZs0azJgxA7169UJycrJMzk0fxtXVFWZmZvjxxx/FjkJUKd24cQPnzp3jqE4iqrJY7CQiufrvL6dmZmZwc3NDUFAQoqKiYGtrK04wIhniyE6qLDQ0NGBoaAgjIyM4OzvDxcUFBw8eLDeVPCMjA3Z2dgCAL774AhKJBG5ubgDeLD63ZMkSNGrUCFpaWvj666+xbdu2cteYP38+jI2Ny641bNgwAG9GlkZHR2PNmjVlI0wzMjLkNoW+Z8+eSEhIQN++fWFjYwNPT09kZWXJ9Br0fhKJBOvWrcO2bdtw9uxZseMQVTpve3Xq6OiIHYWISBRcNpaI5OrZs2e4ceMGkpKScO/ePbx69QpqamqwsbHBwIEDAbz5ciyRSEROSvTpWOykykpLSwtFRUXlnjMyMsK+ffswcOBAJCUlQU9PD1paWgAAPz8/7N27F2vWrEGTJk0QExODUaNGoVatWujbty/27duHZcuWYefOnfj666/x5MkTXLx4EcCblbpTU1Nhbm6ORYsWAXhTTL1//77cXp+amhomT54MJycn/PjjjzA3N8dPP/2EUaNGcbVwOfviiy+wfv16DB8+HAkJCahWrZrYkYgqhRs3buDs2bMIDQ0VOwoRkWhY7CQiublx4wbmzp2LmJgYaGhooE6dOtDU1ERpaSmOHj2K8PBwrFy5El9++aXYUYk+i4mJCR4+fIjCwkKoq6uLHYdIJi5fvowdO3aga9eu5Z5XUVGBnp4egDf9mWvXrg0AyM3NxYoVK/Dbb7+hS5cuAN7827h8+TLWrFmDvn37IjMzE19++SV69OgBNTU11K9fH+3atQMA1KhRA+rq6tDW1oahoaECX+mbwltwcDDGjh0LT09PBAcHIygoiLMP5Kx///44dOgQpk2bhg0bNogdh6hSeNurk6M6iagq4zR2IpKLhw8fwsvLC7dv38aWLVtw8eJFREdH48SJE9i/fz/8/f1x//59rFy5UuyoRJ9NTU0N9erVw927d8WOQvRZTpw4AV1dXWhqasLS0hLW1tZYvXr1Bx2bnJyMgoIC9OrVC7q6umWP4OBg3LlzBwDg4OCAgoICmJiYwMPDA3v27FGqVdFbtmyJ06dPY86cOXBzc4ODgwMyMjLEjlWprVixAlFRUThy5IjYUYgqvMTERJw9exbjxo0TOwoRkahY7CQiubh58ybu3LmDyMhI9OjRA4aGhtDS0oK2tjbq1KkDJycnDB06FL/99pvYUYlkglPZqTKwtrZGfHw8bt26hYKCAuzfvx916tT5oGNLS0sBAEeOHEF8fHzZIykpqey93sjICLdu3cL69etRvXp1eHl5oW3btsjNzZXba/pYEokEgwYNws2bN9GyZUu0a9cOc+bMUaqMlUn16tURGhqKMWPG4OnTp2LHIarQOKqTiOgNFjuJSC50dHSQk5MDbW3tf9zn9u3b7NFFlYapqSlSU1PFjkH0WbS1tdG4cWMYGxtDTU3tH/d7266hpKSk7DkLCwtoaGggMzMTjRs3LvcwNjYu209TUxN9+/ZFYGAgrly5gqSkJJw/f77svP95TjFpaWnBz88P8fHxSE9Ph7m5OXbs2AFBEMSOVulYW1vDxcUFY8eO5d8v0SdKTEzEmTNnOKqTiAjs2UlEcmJiYgJjY2N4enpi+vTpUFFRgVQqRV5eHu7fv4+9e/fiyJEjCAsLEzsqkUyYmZkhKSlJ7BhECmFsbAyJRIJjx46hf//+0NLSQrVq1eDt7Q1vb28IggBra2vk5OTg4sWLkEqlGD16NEJDQ1FcXIyOHTtCV1cXu3fvhpqaGkxNTQEADRo0wOXLl5GRkQFdXd2y3qBiqlevHrZv347z58/D09MTa9asQVBQUFmvUZKNBQsWoH379ti2bRtcXV3FjkNU4SxYsABeXl4c1UlEBBY7iUhODA0NERgYCBcXF0RHR6NRo0YoLi5GQUEBCgsLoauri8DAQPTs2VPsqEQyYWpqioMHD4odg0gh6tati59++gmzZ8/GyJEjMWzYMISGhmLBggUwMDDAsmXLMG7cOFSvXh2tWrWCr68vAKBmzZoICAiAt7c3ioqKYGFhgf3798PExAQA4O3tjeHDh8PCwgL5+flK1Qe3c+fOuHz5MkJDQ9G/f3/07t0bixYtUvhiSpWVpqYmwsLC0L17d9ja2sLIyEjsSEQVRmJiIqKjo7F582axoxARKQWJwLkiRCRHhYWF2LNnD5KSklBcXIyaNWuiYcOGaNOmDczMzMSORyQz6enpsLOzQ2ZmpthRiEjOsrOzsXDhQmzevBnTp0/H5MmToaGhIXasSmHRokWIiorCyZMnIZWy4xbRh3B0dES7du3g4+MjdhQiIqXAYicREZEMFBcXQ1dXFy9evICmpqbYcYje69atW2jSpInYMSqNtLQ0TJs2DSkpKVixYgX69esHiUQidqwKrbi4GNbW1hgyZAgmT54sdhwipZeUlIRvv/0W6enpnMJORPR/WOwkIrl7+zbz9n8lEgm/DFKlZG5ujgMHDqBp06ZiRyF6R0FBAb755hvEx8eLHaXSOXHiBKZOnQpjY2MEBgbyPeAzpaWlwdLSEufOnYO5ubnYcYiU2pAhQ9CmTZuydiFERMTV2IlIAd4WN6VSKaRSKQudVGklJyfzizkpLS8vL7YPkZNevXrh+vXr6N27N6ytrTFlyhRkZWWJHavCMjU1xYIFC+Dq6oqioiKx4xApraSkJJw+fRrjx48XOwoRkVJhsZOIiEhGWMwnZbV3715ERERgw4YNYkeptNTU1ODp6Ynk5GQUFBSgadOmWL9+PUpKSsSOViGNHTsW+vr6WLRokdhRiJTW2xXYdXV1xY5CRKRUOI2diOTqP6euExGR4t29excdO3bEsWPH0L59e7HjVBnx8fHw9PTEy5cvERQUBBsbG7EjVTh//vknWrdujaNHj/Jnl+i/JCcnw87ODnfu3GGxk4jov3BkJxHJ1ZYtW3D8+HGxYxARVUmFhYUYMmQIZs6cyWKRgrVq1Qp//PEHZs+ejeHDh2Pw4MHIzMwUO1aF8tVXX2HVqlVwdXVFfn6+2HGIlMqCBQswbdo0FjqJiN6DxU4ikqvk5GQkJiaKHYOIqEqaNWsW6tSpgylTpogdpUqSSCRwcHDAzZs38fXXX6Nt27b48ccfkZubK3a0CsPR0RGtW7fGzJkzxY5CpDSSk5Nx6tQpTJgwQewoRERKicVOIpKrWrVqcZEGov9TUFCAvLw8sWNQFXH06FGEh4cjNDSUrUREpqWlhTlz5iAuLg63b99G06ZNsXPnTrCb1IdZs2YN9u7di6ioKLGjECkFjuokIvp37NlJRHK1bt06xMXFYf369WJHIRLd2rVr8ezZM8yePRsqKipix6FK7MGDB2jbti327dsHKysrsePQfzl37hw8PT2hpaWFoKAgtG3bVuxISi8yMhKjRo3C9evXUbNmTbHjEMmVIAiIiYnBkydPIJX+//FJqqqqqFu3Lnr06MFenVRlxMXFITMzEyoqKuVuEnbt2hU6OjoiJiNlpip2ACKq3Diyk6qSTZs2wcrKCqampigtLYVEIilX1DQyMkJwcDCcnJxgamoqYlKqzIqLi+Hs7AxPT08WOpWUlZUVLl++jNDQUPTr1w99+/aFv78/DAwMxI6mtHr27Il+/fph8uTJ2Lp1q9hxiOSitLQUx44dQ2FhISwtLdGpU6dy23Nzc7F161a4ubmhuLhYpJRE8icIAk6ePIns7Gy0bt0a33//fbntr1+/xqlTp5CTkwMrKyt8+eWXIiUlZcVp7EQkVyx2UlUyY8YMnD59GlKpFKqqqmWFzlevXiE5ORn37t1DUlISEhISRE5KldlPP/0EDQ0NzJgxQ+wo9C9UVFTg4eGBlJQU1KpVC82aNcOyZctQWFgodjSltXTpUsTExGDfvn1iRyGSuYKCAmzZsgW2trYYOHAgvvrqq3f20dHRwbhx4/Dzzz/jt99+w71790RISiRfJSUl2L59O1q1aoVBgwahUaNG7+yjoaGB3r17w8HBAVevXsXNmzdFSErKjNPYiUiurly5gnHjxiE2NlbsKERyZ29vj5ycHNjZ2eH69etIS0vDn3/+iZycHEilUtSpUwfa2tr4+eef0bdvX7HjUiX0+++/Y9iwYbh27RoMDQ3FjkMfITU1FdOmTUNqaioCAwPRp08f9lp9j5iYGPzwww+Ij4/nzzhVGqWlpdiyZQuGDh0KNTW1Dz5u7969sLOzg76+vhzTESnW9u3bYW9v/1FtGiIjI2Fubg5jY2M5JqOKhCM7iUiuOLKTqpJOnTrh9OnTOHToEPLz82FlZQVfX1+EhITgyJEjOHToEA4dOgRra2uxo1Il9Ndff2H48OHYunUri0AVkJmZGY4ePYqgoCB4eXmhT58+SElJETuW0rG0tISHhwdGjRrFBZ6o0oiIiMCgQYM+qtAJAAMHDsTJkyfllKpqevXqFaZMmQJjY2NoaWmhU6dOuHLlStn2nJwcTJo0CfXq1YOWlhaaNGmCwMBAERNXLtHR0bCzs/vofrQ9e/bEhQsX5JSKKiL27CQiuWKxk6qS+vXro1atWtixYwf09PSgoaEBLS0tLkZEcldaWoqhQ4dixIgR6Natm9hx6DP07t0b3bp1wy+//IIuXbpg6NChmDt37gctylNcXAxV1cr/8X7u3Lno2LEjNm/eDA8PD7HjEH0WQRCQn5+PatWqffSxEokEX331FZ48eYI6derIIV3VM3LkSFy/fh1btmxBvXr1sG3bNnTr1g3JycmoW7cupk2bht9//x1hYWEwMTHBmTNnMGrUKNSuXRuurq5ix6/wnj59Chsbm086tmXLlkhKSkKzZs1knIoqIo7sJCK5qlmzJrKzs1FaWip2FCK5a968OTQ1NfHVV19BX18furq6ZYVOQRDKHkSy9vPPP+P169eYO3eu2FFIBtTU1DB16lQkJSUhLy8P5ubmiIyM/Nf3D0EQcOLECYwfPx67du1SYFrFU1dXR1hYGGbMmIH09HSx4xB9ltjYWLRv3/6Tj7eyssK5c+dkmKjqys/Px759+/Dzzz/D1tYWjRs3xrx589C4cWMEBwcDAC5cuABXV1fY2dmhQYMGGDZsGL755htcunRJ5PQVX0ZGBho0aPDJx1tYWLB3J5VhsZOI5EpFRQU6OjrIzs4WOwqR3DVt2hSzZs1CSUkJcnJysHfvXiQlJQF4M/ri7YNIls6dO4dVq1Zhx44dVWJUX1VSp04drF+/HhEREf+z/UVxcTGys7OhoqKCMWPGwNbWFs+ePVNQUsVr3rw5ZsyYATc3N5SUlIgdh+iTPXz48LP6DEqlUkil/FovC8XFxSgpKYGmpma557W0tMoKylZWVjhy5Aju378P4E3xMz4+Hr169VJ43somISEBbdu2/axz8HMQvcV3RSKSO05lp6pCVVUVEyZMQPXq1ZGfn48FCxbAysoK48aNw40bN8r240hnkpXnz5/D2dkZmzZtQr169cSOQ3LSunVraGpq/uvNEjU1NTg7O2P16tVo0KAB1NXV8fLlSwWmVLwpU6ZAIpGwXx5VaLJodcN2ObJRrVo1WFpaYuHChXj48CFKSkqwbds2xMTE4NGjRwCAVatWoVWrVqhfvz7U1NRgY2ODgIAA9OvXT+T0FZ9UKv3sQQFqamq8AUYAWOwkIgVgsZOqkreFTF1dXWRlZWHJkiUwMzPDgAEDMH36dFy8eJEjMEgmBEGAm5sbHBwc0LdvX7HjkJz9ry+AhYWFAN6sYpuZmYnJkyejUaNGACrvDRYVFRWEhoYiICCg3A0loopEFu1tEhMTy80g4ePfH//2nhgWFgapVIp69epBQ0MDq1atgpOTU1lBefXq1Th//jwOHz6Mq1evIjAwEN7e3jhx4sQ75yotLYWXl5for7eiPFavXv3Z/xZUVFRY7CQALHYSkQKw2ElVydsP0RoaGjAyMsKzZ88wdepUnD9/HiUlJfjll1+waNEipKamih2VKriVK1fir7/+wuLFi8WOQiITBAHq6uoAgBkzZsDJyQmWlpZl2wsLC5GWlobt27cjMjJSrJhyYWJigoCAALi6upYVfIkqElkUOy0sLMr1Bufj3x//dtO5UaNGiI6ORk5ODu7fv4/Lly+jqKgIJiYmyM/Px8yZM7FkyRL0798fLVq0wMSJEzFkyBAsW7bsnXNJpVIsX75c9NdbUR4TJkz47H8Lr1+/Lvt9SFUbi51EJHcsdlJVIpFIyvpntW3bFomJiQCAkpISjBkzBnXq1IGfnx8WLFggclKqyK5cuYLFixdj9+7d/FBPZaNYZsyYARUVFQwbNgz6+vpl26dOnYpvv/0WixcvxvDhw9G5c+eyfnOVgbu7O+rXr4+ffvpJ7ChEH6169eqf3V+3uLhYRmnoLR0dHXz55ZfIyspCZGQk7O3tUVRUhKKionfaBqioqFTaEfSKZGJi8tmDAYqKimSUhio6dm8lIrljsZOqkuzsbOzbtw+PHj3C+fPnkZqaiqZNmyI7OxuCIMDAwAB2dnaoU6eO2FGpgnr58iUcHR2xdu1amJiYiB2HRFZaWgpVVVXcu3cPa9aswaxZs9CyZcuy7YsWLUJYWBhWrlyJfv36QU1NDd9//z3CwsIwa9YsEZPLjkQiwYYNG9CyZUv07dsXnTp1EjsS0Qd5+fIlLl68iLNnz+LHH3/8pHPExcWhVatWMk5WdUVGRqK0tBTm5ua4ffs2fHx80KRJE7i7u5f16JwxYwZ0dXVhbGyM6OhobN26FUuWLBE7eoXXokUL7Nu3D2ZmZp90/IMHD1C3bl0Zp6KKisVOIpI7FjupKsnKysKMGTNgZmYGdXV1lJaWYtSoUahevToMDAxQu3Zt1KhRA1988YXYUakCEgQBI0eORK9evTBo0CCx45DIbty4AQ0NDZiZmcHT0xPNmjXD999/D21tbQDApUuXsHDhQixevBgjR44sO+7bb7/F1q1b4ePjAzU1NbHiy5SBgQGCg4MxbNgwxMfHQ1dXV+xIRP/o0aNHWLlyJTZu3IjevXujc+fOKCkp+aSFhm7fvg0HBwc5pKyaXr58iZkzZ+LBgwfQ09PDwIED4e/vX/ZeuWvXLsycORMuLi74+++/YWxsjAULFmDixIkiJ68ctLS0kJOT80nv4TExMfxsRGUkgiB8fpMQIqJ/sWjRIrx69Yp95ajKOH/+PPT19fHo0SP06NEDubm5nGpMMrFu3ToEBwfj0qVL0NTUFDsOiai0tBQzZszAsmXL4OzsjMOHD2P9+vVwdHQs60c3aNAgZGZm4sqVKwDeFMslEglGjBiBjIwMnDp1CgCQm5uL8PBwtGjRAm3bthXtNcnC8OHDoa2tjeDgYLGjEL3j1q1bWLp0Kfbv3w9XV1dMnToVDRo0QF5eHvbv3w8XFxdIJB++GvWpU6dQv359NG7cWI6piRSnuLgYYWFhGDZs2EcV/y9fvgw1NTW0bt1ajumoImHPTiKSO47spKqmc+fOMDc3h7W1NRITE99b6GRvJ/pY169fx5w5cxAeHs5CJ0EqlWLJkiXYuXMnrly5gpycHDx58qSsUJKZmYmDBw+WTY0tKSmBRCJBSkoKMjIy0Lp167I+f9HR0Th+/DicnZ3RvXv3Ct3Pc9WqVTh+/DgiIiLEjkJU5tKlSxgwYAC6dOkCIyMjpKamIigoCA0aNAAAaGtro2fPntixY8cHfz6IioqCnp4eC51UqaiqqmLw4MHYunUrXr9+/UHHXLx4EcXFxSx0Ujmcxk5EcsdiJ1U1paWlkEqlUFFRQZMmTZCamoqMjAzk5eWhsLAQ7du3Z69F+ig5OTkYPHgwAgMD0aRJE7HjkBJxdHSEo6Mj5s+fDx8fH/z1119YtGgRIiIiYGZmhjZt2gBA2QiZvXv34sWLF7C2toaq6puvAn369EHDhg0REREBLy8vnDhxAqNGjRLtNX2OGjVqICQkBMOGDcP169ehp6cndiSqogRBQEREBJYsWYKMjAx4eXkhLCwMOjo6793/iy++gL29Pfbs2YNatWrBzs7unTYTgiAgNjYWmZmZaNWqFQudVCnp6OjAxcUFhw8fhqamJrp27QotLa139ouJiUFmZiYsLCzQokULEZKSMuM0diKSu8jISCxfvhy//fab2FGIFCY/Px9r167FunXrcP/+fRQWFgIAzMzMYGBgAAcHB/Z3og82fPhwSKVShISEiB2FlNiLFy+QkJAAGxsbHDp0CG5uboiNjUWjRo0AABEREfj555/RuHFjbNq0CcCbKYOqqqrIycmBh4cHEhMTkZSUJObLkImpU6fi0aNH2LVrl9hRqIopKirC7t27sWTJEkgkEvj6+mLw4MEf1R83Ozsbp0+fhiAIUFFRwduv7G9vmBobG8srPpFSyc/PR1RUFIqKispNay8sLMS2bdtga2uLKVOmiJiQlBVHdhKR3HFkJ1VFv/76K4KCgtCnTx+Ympri1KlTKCoqwpQpU3Dnzh3s2LED6urqGD16tNhRSclt2bIFly9fRmxsrNhRSMnVrFkTNjY2AABzc3MYGxsjIiICgwYNQnp6OiZNmoTmzZtj8uTJAP5/obO0tBSRkZHYs2dP2Y3Jt9sqqkWLFqFNmzbYtWsXhgwZInYcqgJyc3OxadMmrFixAiYmJliyZAl69uz5UT0436pevTrs7e3lkJKoYtHS0kK/fv3eu61e8kej9wAAIABJREFUvXpwdnbGpEmTPmlxL6rcOLKTiOQuLS0NvXv3xu3bt8WOQqQQaWlpcHJywsCBAzF16lRoamoiLy8PK1aswIULF3D8+HEEBQVh48aNuHHjhthxSYmlpKSgS5cuOHXqFL7++mux41AFs3v3bkyYMAE1atRAXl4e2rZti4CAADRr1gzA/1+w6N69e3BwcICenh4iIiLKnq/oYmNj0adPH8TFxaFu3bpix6FK6tmzZ1i9ejWCg4PRpUsXTJ8+HR06dBA7FlGV0LFjR8yaNYs3B+gdXKCIiOSOIzupqpFKpUhPT4enp2fZQjLa2tpo164dkpOTAQBdu3bFvXv3xIxJSi4/Px+DBw+Gv78/C530SRwdHcsKMefPn8fhw4fLCp2lpaWQSCQoLCzEvn37EBsbi19//bVsW2XQrl07TJw4ESNGjADHd5CsZWRkYNKkSTAzM8OjR49w9uxZ7Nu3j4VOIgXy9PREUFCQ2DFICbHYSURyV7NmTbx8+bLSfHki+l9MTEwglUoRExNT7vn9+/fD0tISJSUlyMnJQY0aNfDixQuRUpKymzp1KiwsLCrsQjGkPN4uQPRWXl4eXr16BQC4desWli1bBk9PTxgZGaGkpKRSTQecOXMmsrKysG7dOrGjUCWRkJAAFxcXtG3bFjo6OkhKSsKvv/7KxeOIRDBo0CDcunUL169fFzsKKZmK24iHiCoMVVVVaGtr49WrV6hRo4bYcYjkTiqVwtPTEx4eHrCyskL9+vURFxeH06dP48iRI1BRUYGBgQG2bt363tUlicLDw/H777/j2rVrlWI6MSkHqfTNOIdDhw5h2bJlGDp0KNLT01FUVIQVK1YAQKX7eVNTU0NYWBisrKzQrVs3mJqaih2JKiBBEPDHH38gICAA169fx5QpU7B27Vp+riUSmbq6OsaPH4+goKCyhfeIAPbsJCIFMTY2RnR0NBo0aCB2FCKFKC4uRnBwMKKjo/H06VMYGBhg6tSpsLS0FDsaKbk7d+7A0tISERERaNu2rdhxqJJaunQp5s2bh/z8fHh5eWHp0qWVblTnf1q9ejV27NiBs2fPVuiFl0ixSkpKcPDgQQQEBCA7Oxs+Pj4YOnQoNDQ0xI5GRP/n6dOnMDMzQ2pqKr744gux45CSYLGTiBSiVatWCAkJQevWrcWOQqRQL168QFFREWrXrl3pRkyR7BUWFqJz584YOnQoPD09xY5Dldzr168xc+ZMrFy5EkOGDMH69etRrVq1d/YTBAFFRUVQV1cXIaVslJaWokePHrCzs8Ps2bPFjkNKrqCgAGFhYVi6dCn09PQwffp02Nvbl42OJiLl4uHhgYYNG/L9ncrw3ZqIFIKLFFFVVbNmTXzxxRcsdNIHmTFjBr766itMnjxZ7ChUBWhoaGDFihW4du0azMzMUFhY+M4+giBg3759aNGiBSIiIkRIKRtSqRQhISEICgpCXFyc2HFISb148QI///wzGjZsiIMHD2Ljxo2IiYnBDz/8wEInkRLz9PTE2rVr3/t7jKomzuEgIoVgsZOI6N8dPnwY+/btQ1xcHIvjpFCtWrVCq1at3rtNIpFg0KBB0NbWxpQpU/DLL78gMDAQZmZmCk75+YyMjLBixQq4uroiNjYWmpqaYkciJfHnn39i5cqV2LRpE/r06YPIyEh8/fXXYsciog/UokULPHz4UOwYpER4e4qIFILFTiKif3bv3j2MGjUKO3fuhJ6enthxiN7Rp08f3LhxA127dkXnzp3h7e2Nly9fih3ro7m4uKBp06bw8/MTOwopgZSUFHh4eKB58+Z4/fo1rl27hrCwMBY6iYgqOBY7iUghWOwkInq/4uJiODs7Y+rUqejUqZPYcYj+kbq6OqZNm4bExES8fPkS5ubm2LhxI0pKSsSO9sEkEgmCg4OxY8cOREdHix2HRHLx4kX88MMPsLGxgbGxMdLS0hAUFARjY2OxoxERkQyw2ElECsFiJ1VVxcXFyM/PFzsGKbG5c+dCR0cHvr6+Ykch+iAGBgbYsGEDjh07hi1btqBDhw44d+6c2LE+WO3atbFhwwa4ubkhOztb7DikIIIg4NixY7CxsYGTkxO6du2Ku3fv4scff4S+vr7Y8YiISIZY7CQihWCxk6qqJUuWYN68eWLHICX122+/ITQ0FGFhYVz8giqcNm3a4MyZM/Dx8YGzszOcnJxw//59sWN9kL59+6J79+6YOnWq2FFIzoqKihAWFoYWLVpg9uzZGDNmDNLS0jBx4kRoa2uLHY+IiOSAn6qJSK6Ki4tx8uRJ5OXlQUtLC0eOHMGBAwfw4MEDsaMRKYSpqSnS0tLEjkFK6NGjRxg+fDjCwsJQp04dseMQfRKJRIIhQ4YgJSUFTZo0QevWrTF//nzk5eWJHe1/Wr58Of744w8cPnxY7CgkBzk5OQgKCkLjxo0REhKCZcuWIS4uDs7OzlBVVd51ekNDQ6Grq6vQa/7xxx+QSCR49uyZQq9LVU9GRgYkEgliY2PFjkKVnEQQBEHsEERU+WRlZeHUqVNQUVGBnZ0datSoUbZNEARcvHgRDx8+hJGRETp27ChiUiL5io+Px9ChQ5GYmCh2FFIiJSUl6NGjB6ysrPDTTz+JHYdIZjIzM+Hr64uLFy9i6dKlcHBwgEQiETvWPzp37hwGDx6MhIQEfPHFF2LHIRl4+vQpVq9ejeDgYNja2sLX1xft27eX+XVsbW3RvHlz/PLLL+WeDw0NxcSJE5GTk/NJ583Pz8erV68UehOssLAQf//9NwwMDJT63yspNzc3Nzx79gxHjx4t93xsbCzat2+Pu3fvwsjICE+fPkXt2rWV+qYDVXwc2UlEMpeeno6oqCgMGDAA33//fblCJ/BmFIilpSUGDRoEPT09HDhwQKSkRPLXuHFjpKeno7S0VOwopEQWL16MkpIS/Pjjj2JHIZIpY2Nj7N69G2FhYVi8eDFsbW0RHx8vdqx/ZGVlBVdXV4wZMwYcA6J8Pub/k7t372LixIlo0qQJ/vrrL1y4cAF79uyRS6HzUxUWFv7PfbS0tBQ+2l9dXR2GhoYsdJLcqaiowNDQ8F8LnUVFRQpMRJUVi51EJFN//vknEhMTMWjQoA/6wGRqagpLS0scOnRIAemIFE9XVxe1atVi6wYqc+bMGfzyyy/Yvn07VFRUxI5DJBfW1taIjY2Fi4sLevXqhTFjxuDp06dix3qv+fPn4/bt29i6davYUeg/vHjx4oM+S8bHx8PZ2Rnt27dHtWrVkJycjPXr18PU1FQBKf+dm5sb+vXrh4CAANSrVw/16tVDaGgoJBLJOw83NzcA75/GfuzYMXTs2BFaWlrQ19dH//79UVBQAOBNAXX69OmoV68edHR00L59e0RGRpYd+3aKelRUFDp27AhtbW20a9cO165de2cfTmMnefvvaexvf/aOHz+ODh06QF1dHZGRkbh//z7s7e2hp6cHbW1tmJubY9euXWXnuXHjBrp16wYtLS3o6enBzc0NL1++BABERkZCXV0dz58/L3ftWbNmoWXLlgCA58+fw8nJCfXq1YOWlhaaNWuGkJAQBf0tkCKw2ElEMnX69Gl89913H3WMoaEhTE1Ny33oIqpM2LeT3nr27BlcXFwQEhKCunXrih2HSK5UVFQwevRopKSkQEdHBxYWFli5cqXSjdrR0NBAWFgYvL29kZmZKXacKi8xMRF9+/ZF06ZNkZSU9I/7CYKAoKAg9O3bF61bt0Z6ejoWL14MQ0NDBab936Kjo3H9+nWcOHECUVFRcHR0xKNHj8oebwszNjY27z3+xIkTsLe3R/fu3XH16lWcPn0aNjY2ZTNG3N3dER0djR07duDGjRsYPnw4+vfvj4SEhHLnmTlzJn7++Wdcu3YN+vr6cHFx4WhmUhrTp0/HwoULkZKSgo4dO2L8+PHIy8vD6dOnkZSUhJUrV6JmzZoAgLy8PPTq1Qu6urq4fPkyDhw4gAsXLmDEiBEAgG7dukFfXx979uwpO78gCNi5cyeGDh0KACgoKECbNm1w9OhRJCUlwdPTE2PGjEFUVJTiXzzJh0BEJCNJSUlCUlLSJx+/Z88eGaYhUh4jR44UgoODxY5BIispKRH69u0r+Pj4iB2FSBQ3b94UevXqJZibmwsRERFix3nH4sWLBTs7O6GkpETsKFVSbGys0KlTJ0FDQ0NwcHAQbt269a/7l5aWCvn5+UJBQYGCEpZnY2MjTJgw4Z3nQ0JCBB0dHUEQBGH48OFC7dq1/zHjkydPBGNjY8HT0/O9xwuCIHTq1ElwdHR87/G3b98WJBKJkJmZWe55e3t7Ydy4cYIgCMLp06cFAMKJEyfKtp87d04AINy/f7/cPk+fPv2Ql070XsOHDxdUVFQEHR2dcg8tLS0BgHD37l3h7t27AgDhypUrgiD8/5+9vXv3ljvX119/LcybN++91/n111+F6tWrC9nZ2WXPvT1PWlqaIAiCMGXKFMHKyqps+9mzZwWpVCo8ePDgH/M7OjoKHh4en/z6SblwZCcRyczNmzdhYWHxycfr6em9M92AqDLgyE4CgMDAQDx//hz+/v5iRyEShbm5OY4fP45ly5Zh8uTJ6NevH1JTU8WOVcbHxwevX7/GqlWrxI5S5aSnp8Pd3R2ZmZl4/PgxwsPDYWZm9q/HSCQSaGpqQkNDQ0EpP03z5s3fm7GwsBA//PADmjZtiuXLl//j8XFxcejatet7t127dg2CIMDCwgK6urplj2PHjuHOnTvl9m3RokXZf3/11VcAgCdPnnzKSyL6R9bW1oiPjy/32LFjx/88rl27duX+7OnpiYULF8LS0hJ+fn64evVq2babN2+iRYsWqFatWtlznTp1glQqRXJyMgBg6NChOH/+fNlo/e3bt8PW1rZsVk1JSQn8/f3RokUL6OvrQ1dXF/v378e9e/c++++AlAOLnUQkE4IgfHbvORsbG5w/f15GiYiUB4uddOnSJQQEBGDnzp1QU1MTOw6RaCQSCfr27YvExETY2dmhc+fO8PHxKeu1JiYVFRVs3boVCxcuLPvCTPLz119/lf13w4YNy6auP378GL///jvc3d0xZ86ccn36lEn16tXf+3P74sWLcotz6ujovPf4sWPHIisrC7t37/7kz9ClpaWQSCS4cuVKueLSzZs3sXnz5nL7/ufvnre9ULl4IsmatrY2GjduXO5Rr169/3ncf/878fDwwN27d+Hu7o7U1FR06tQJ8+bNA/Dme+c/9fN9+3zbtm1hbm6OHTt2oKioCHv27Cmbwg4Ay5Ytw/Lly+Hj44OoqCjEx8fj+++//6BFxKhiYLGTiGQiPz//nWbqH0tFRYWrQFKlZGpqqlSjl0ixXrx4gSFDhmDdunVo0KCB2HGIlIK6ujq8vLyQmJiIrKwsmJubY9OmTaIXXxo1agR/f38MGzZM6XqLVgalpaVYuHAhmjVrBgcHB0yfPr2sL2evXr3w4sULfPPNNxg/fjy0tbURHR0NZ2dnLFiwQCkK4v+pSZMmZSMr/9O1a9fQpEmTfz122bJlOHLkCI4ePYrq1av/676tW7f+xz6CrVu3hiAIePz48TsFJvaFpoquXr16GD16NMLDwzF//nz8+uuvAAALCwskJCTg1atXZfteuHABpaWlaNq0adlzLi4u2L59O06cOIHc3FwMHDiwbNu5c+fQv39/uLq6olWrVmjUqBE/q1cyLHYSkUwUFRXJZLTSf39gJKoMGjVqhIyMDBQXF4sdhRRMEASMHDkS/fr1w4ABA8SOQ6R0DAwMsHHjRhw9ehQhISHo0KGD6LM8Ro8ejTp16mDhwoWi5qhsMjIy0K1bNxw6dAh+fn7o1asXIiIisGbNGgBvZvj06NEDEydORFRUFNasWYMzZ84gMDAQoaGhOHPmjMivoLxx48YhPT0dkyZNQkJCAm7duoXAwEDs3LkT3t7e/3jc77//jlmzZmHt2rXQ0tLC48eP8fjx438s5s6ePRt79uyBn58fkpOTkZSUhMDAQOTl5cHMzAwuLi5wc3PD3r17kZ6ejtjYWCxbtgz79++X10snkjtPT0+cOHEC6enpiI+Px4kTJ8rapbm4uEBHRwfDhg3DjRs3cObMGYwZMwYDBgxA48aNy84xdOhQJCcnY86cOfjuu+/K3VgwMzNDVFQUzp07h5SUFEycOBF3795V+Osk+WGxk4hkolq1asjOzhY7BpFS0tLSgoGBAfsAVUHBwcFIT0/H0qVLxY5CpNTatm2Ls2fPwsvLC0OGDIGzszMePHggShaJRIJNmzZh3bp1uHz5sigZKqOzZ88iMzMTx44dg5OTE2bNmoWGDRuiuLgYr1+/BgCMHDkSEydOhJGRUdlxnp6eyMvLw61bt8SK/l4NGzbEmTNnkJaWhh49eqBDhw7YtWsX9uzZgz59+vzjcefOnUNRUREGDx6ML7/8suzh6en53v379OmDAwcOICIiAq1bt4aNjQ1Onz4NqfTNV/mQkBC4u7vD19cX5ubm6NevH86cOQNjY2O5vG4iRSgtLcWkSZNgYWGB7t27w8DAAFu2bAHwZqp8ZGQksrOz0aFDB9jb28PS0vKd1g3GxsawsrJCQkJCuSnsAODn54cOHTqgd+/esLa2ho6ODlxcXBT2+kj+JAKHURGRjOzbt6/c9ICPlZaWhry8PLRs2VKGqYiUQ7du3eDj44OePXuKHYUUJD4+Ht27d8eFCxdgamoqdhyiCiM3NxdLlizBmjVr4OnpCW9vb2hpaSk8x549ezBnzhxcu3YN2traCr9+ZTN//nxERUVhy5YtaNCgAQRBgL29Pdzd3fHDDz+8s78gCBAEAa9fv4aJiQk8PDy4wBsREX0QjuwkIpn5p0btH+r69essdFKlxUWKqpZXr17B0dERQUFBLHQSfSQdHR389NNPiI2NxY0bN9C0aVPs2bNH4a1uHBwc0LZtW8yYMUOh162sBg8ejBcvXmDkyJEYOXIkqlWrhsuXL8PLywtjx45953ekRCKBVCpFSEgIvvrqK4wcOVKk5EREVNGw2ElEMmNnZ4dTp0590rF5eXmijNogUhQWO6sOQRAwbtw4dOnSBc7OzmLHIaqwGjRogPDwcGzZsgX+/v6ws7NDQkKCQjP88ssvOHDgAE6ePKnQ61ZG5ubmOHDgQNk0682bNyMlJQULFixAamoqvLy8ALz5TLh+/Xps2LABVlZWWLBgAUaOHAljY2P2diciog/CYicRyYyqqir09fWRkpLyUccJgoDw8HB069ZNTsmIxMdiZ9URGhqKuLg4rFq1SuwoRJWCjY0Nrl69CicnJ/Ts2RNjx47F06dPFXLtWrVqYfPmzRgxYgSysrIUcs3KrGHDhkhOTkbnzp0xePBg1KxZEy4uLujduzcyMzPx9OlTaGtr4/79+1i5ciW6dOmCtLQ0jB8/HlKpFBKJROyXQEREFQCLnUQkU9bW1sjIyEBycvIH7V9cXIywsDD88MMPUFdXl3M6IvGYmpoiNTVV7BgkZ8nJyfDx8UF4eDh7/BHJkIqKCsaMGYObN29CS0sLzZo1Q1BQEIqKiuR+7e7du8Pe3h6TJ0+W+7Uqk6KiondGYgqCgGvXrsHS0rLc85cvX0b9+vVRrVo1AMD06dORlJSExYsXQ1dXV2GZiYiocmCxk4hkrlevXvj777+xb98+/PXXX+/dp6SkBKdOncKePXswaNAg1KhRQ8EpiRSrYcOGuH//vkK+mJM48vLy4OjoiICAADRr1kzsOESVUq1atRAYGIjo6GgcP34cLVq0QGRkpNyvu2TJEly+fBl79+6V+7Uquri4ODg5OcHJyemdbRKJBG5ubli3bh1WrVqFO3fuwM/PDzdu3ICLiws0NTUBoKzoSURE9Cm4GjsRyY0gCDh37hz++usv5Ofno6CgAIaGhmXFHhsbG+jr64uckkhxGjVqhIiICJiZmYkdheRg9OjRyM3NxbZt2zjVkkgBBEHAsWPHMHXqVDRt2hTLly+X64Jgly5dwnfffYf4+Hh8+eWXcrtORSQIAk6dOoWAgAAkJydj6tSpGDVqFKpXr/7OvkVFRXByckJiYiIKCwuhr68Pf39/9OjRQ4TkRFSVXL9+Hb1790ZGRgbU1NTEjkNyxGInESnExo0bERMTg02bNokdhUg0vXr1wqRJk9C3b1+xo5CM7dq1C3PmzMG1a9c4IolIwV6/fo1Vq1YhICAAI0aMgJ+f33uLbLLw9t/50aNHeVMDb2bq7N+/HwEBAcjNzYWvry9cXFw+qDXRrVu3oKKigsaNGysgKRHRG3Z2dhg9evR7R59T5cFp7ESkEFlZWahVq5bYMYhExUWKKqfbt29j0qRJ2L17NwudRCLQ0NCAj48PEhMT8fz5c5ibmyMkJASlpaUyv9acOXPw+PFjbNy4Uebnrkjy8/Oxbt06NGnSBIGBgZgzZw6SkpLg7u7+wT3YmzRpwkInESnclClTsHLlSrFjkJyx2ElECsFiJxGLnZXR69ev4ejoiLlz56JNmzZixyGq0gwNDbFp0yYcPnwYGzduRIcOHXDhwgWZXkNdXR1hYWGYNWsW0tPTZXruiiArKwuLFi1Cw4YNcezYMYSGhuLChQuwt7eHVMqvlkSk/Pr164enT5/i4sWLYkchOeJvJCJSCBY7iVjsrIx8fX1hbGyMCRMmiB2FiP5Pu3btcO7cOUybNg2Ojo5wcXHBgwcPZHZ+CwsLzJo1C8OGDUNJSYnMzqvMHjx4AG9vbzRu3Bi3bt3CyZMnceTIEVhZWYkdjYjoo6ioqGDSpEkICgoSOwrJEYudRKQQLHYSsdhZ2Rw8eBCHDh3Cpk2b2LuPSMlIJBI4OzsjJSUFDRs2RKtWrbBw4ULk5+fL5Pyenp5QVVXF8uXLZXI+ZXXz5k24u7ujRYsWKCkpQVxcHLZs2YLmzZuLHY2I6JONGDECkZGRMr0RRsqFxU4iUggWO4mABg0a4NGjRygoKBA7Cn2mzMxMjBkzBrt27eJ7G5ES09HRwYIFCxAbG4uEhARYWFhg3759+Nw1WqVSKbZs2YKlS5fi+vXrMkqrPN5OTbe1tUWjRo1w+/ZtBAYGon79+mJHIyL6bDVq1MDQoUOxdu1asaOQnLDYSUQKwWInEaCqqgpjY+Mq2eetMikqKoKTkxO8vb3xzTffiB2HiD5AgwYNsGfPHoSEhGD+/Pn49ttvP7tIaWxsjKVLl8LV1RWvX7+WUVLxlJaWlk1NHzp0KHr27ImMjAz4+flBT09P7HhERDI1adIkbNy4UWYj/km5sNhJRArBYifRG5zKXvHdvXsXenp68PLyEjsKEX0kW1tbXL16FY6OjujevTvGjRuHZ8+effL5hg8fDhMTE8ybN092IRWssLAQW7ZsQYsWLTB37lxMnDgRqampGD9+PLS0tMSOR0QkF6ampujQoQO2b98udhSSAxY7iUgh0tLSYGZmJnYMItGx2FnxmZqa4vDhw1x5mKiCUlVVxdixY5GSkgINDQ1YWFhg1apVKCoq+uhzSSQS/PrrrwgNDcX58+flkFZ+cnJyEBgYiMaNGyMsLAyBgYG4evUqhgwZAlVVVbHjERHJnaenJ1auXPnZrU1I+fBTOhERkQKx2FnxSSQSFjqJKoFatWph5cqV+OOPP3D06FG0bNkSv/3220efp06dOli3bh2GDRuGnJwcOSSVrSdPnsDPzw8mJiaIiYnBgQMH8Pvvv6N79+5cbI2IqpRu3bpBEAScOnVK7CgkY/ykTkREpEAsdhIRKRcLCwtERkYiICAAEyZMgL29PW7fvv1R57C3t4e1tbVSt7e4c+cOxo8fD3Nzczx//hwxMTEIDw9H27ZtxY5GRCQKiUQCT09PBAUFiR2FZIzFTiIiIgVisZOISPlIJBL0798fiYmJ6Ny5M7755htMnz4dr169+uBzBAUFITIyEsePH5dj0o937do1ODo6omPHjqhVqxZu3ryJ4OBgNG7cWOxoRESiGzp0KGJiYj76JhcpNxY7iYiIFMjIyAjPnj1DXl6e2FHoPW7evIm9e/fizJkzePTokdhxiEjBNDQ04Ovri8TERDx9+hRNmjRBaGgoSktL/+ex1atXR2hoKEaNGoXnz58rIO0/EwShbGq6vb09OnbsiLt378Lf3x8GBgaiZiMiUiba2toYOXIkVq9eLXYUkiEWO4lIZiQSCfbu3Svz8y5btgwNGjQo+/O8efPQvHlzmV+HSBFUVFRgYmLCu8dK6ODBgxg8eDDGjx8PBwcHbNmypdx2Nq8nqjoMDQ2xefNmHDp0COvXr0fHjh0RExPzP4+ztbXFkCFDMG7cOFHeM0pKShAeHo527dph8uTJcHFxwZ07dzBt2jRUq1ZN4XmIiCqC8ePHIywsDNnZ2WJHIRlhsZOoCnNzc4NEIsHIkSPf2ebr6wuJRIJ+/fqJkOzfeXt7Izo6WuwYRJ/MzMyMU9mVzJMnT+Du7o6RI0ciLS0NPj4++PXXX5GdnQ1BEFBQUMCFO4iqoPbt2+PChQuYMmUKHBwc4OrqiocPH/7rMf7+/khKSsLOnTsVlBLIz89HcHAwzMzMEBQUhLlz5yIxMRFubm5QV1dXWA4ioorIyMgI3bt3R0hIiNhRSEZY7CSq4oyMjLB7927k5uaWPVdcXIywsDDUr19fxGT/TFdXF/r6+mLHIPpk7NupfJYsWQJbW1t4enqiRo0a8PDwQJ06dTBixAh88803GDduHK5evSp2TCISgUQigYuLC1JSUmBsbIyWLVvC398fBQUF791fU1MTYWFhmDJlCh48eCDXbFlZWfD394eJiQkiIiKwdetWnD9/Ht999x2kUn7VIyL6UJ6enli1ahVKSkrEjkIywN+ARFVcixYtYGpqivDw8LLnjh07Bk1NTdja2pbbNyQkBBYWFtDU1ISZmRkCAwPf6WH1999/w8HBATo6OmjYsCG2bdtWbvuMGTPQpEkTaGlpoUGDBvD19X3ny8KSJUtoTZfdAAAgAElEQVRgaGgIXV1dDBs2DDk5OeW2//c09itXrqBHjx6oXbs2qlevDisrqw+aakYkFhY7lY+Wlhby8/ORlZUFAPDz80NGRgasra3Rq1cv3L59Gxs3bkRhYaHISYlILLq6uli4cCGuXLmCuLg4WFhYYP/+/e+drt6mTRtMnjwZ7u7uKC0thSAIOHv2LA4dOoQjR47g8OHDOHToEKKioj7pi/X9+/fh5eWFRo0aIS0tDVFRUTh8+DA6d+4si5dKRFTlWFpaQl9fH8eOHRM7CskAi51EBA8PD2zevLnsz5s3b4a7u3u5KZsbNmzArFmzMH/+fNy8eRPLly9HQEAA1q5dW+5c8+fPh729PRISEuDo6IgRI0YgMzOzbLuOjg42b96MmzdvYu3atdi1axf8/f3LtoeHh8PPzw8//fQTrl27hiZNmmDFihX/mv/Vq1dwdXXF2bNncfnyZbRq1Qp9+vTBs2fPPvevhkgu/h979x3W1NmwAfwOGxFBtoCKksSBq7j3tra4aRU3gqN1oRarfbV1t1ZtFbW2LkRRaxW0zmrrqgP3qgNlCagoU5G9cr4//MxbXhyMwEnI/bsurjY5Izf8EXPuPOd5WHaqHxsbG4SEhGDGjBnw9vbG+vXrcejQIUydOhULFiyAu7s7duzYwUWLiAh16tRBUFAQNm3ahPnz56N79+74559/iuw3e/ZspKamYs6cOdi7dy/kcjn69++Pvn37ol+/fujfvz9cXV1x4MABBAcHIysr672vfe/ePXh6eqJp06YAgFu3biEgIAAuLi4q/z2JiLSJRCKBj48P/Pz8xI5CqiAQkdYaPXq04ObmJqSkpAhGRkZCWFiY8PTpU8HAwECIiYlRbhcEQahZs6awbdu2QsevXLlSaNCggfIxAGH27NnKx3l5eYKxsbEQGBj41gw///yz4OzsrHzctm1bYezYsYX26d69u1C7dm3l43nz5gkuLi5vPadCoRDs7Oze+bpEYnr06JFgZ2cndgz6H8uWLRMGDx4sfPfdd4Krq6sQHx8v5OfnC4IgCJcuXRJcXV2F0NBQkVMSkTrJy8sT1q1bJ9jY2AgTJ04UkpKSlNvS0tKE1atXC5mZmcU6z9atW4XExMQ3bj937pzQt29fwdbWVli8eLGQkpKist+BiIheycnJEWrUqCH8888/YkehMuLITiJC9erVMXDgQPj7+2Pr1q3o0qVLofk6ExMT8ejRI0yYMAFVq1ZV/syePRuRkZGFztWkSRPl/+vp6cHa2hoJCQnK54KCgtChQwflberTp09HbGyscntoaCjatm1b6Jz/+/h/JSQkYMKECZDL5TAzM4OpqSkSEhIKnZdIndjb2+Ply5dc8VFkeXl5SE5OVj6eOXMmdu3ahcGDByMvLw95eXnQ1dWFIAj44YcfYGVlhfr164uYmIjUjZ6eHj7//HOEhoZCV1cXDRo0wJo1a5CZmYk9e/Zg4sSJMDY2LtZ5Ro4ciaNHjyrnUVcoFMpb00eNGoWPPvoIDx8+xJw5c1C9evXy/tWIiLSOgYEBJk6cyNGdlYCe2AGISD14eXlh9OjRqFq1KhYuXFho2+t5OX/55Re0a9funefR19cv9FgikSiPv3jxIjw8PDBv3jysXLkS5ubmOHDgAHx9fcuUffTo0YiPj8fKlSvh5OQEQ0NDdO/enXPrkdrS0dGBs7MzIiIi4OrqKnYcrRQQEIDDhw/j2LFjGDp0KFatWgVjY2NIJBLUqlUL1apVQ/PmzdG3b1/ExcUhNDQU169fFzs2EakpCwsLrF69GhMmTMC0adNw6NAh7N+/H7q6usU+h0QiwdChQ7Fnzx5kZ2dj+fLlMDIywqxZs+Du7l6icxERUem8HkSzdOlSWFlZiR2HSokjO4kIANC9e3cYGBggKSkJAwYMKLTN1tYWDg4OiIyMhFQqLfJTXOfPn4eDgwO+/vprtGzZEjKZrNB8ngDQoEEDXLx4sdBz//v4f507dw5TpkyBm5sbXFxcYGpqynn1SO3J5XLO2ymS48eP44svvkD9+vWxfPlybNy4sdC8xXp6ejhy5AiGDRuG69evo1mzZti7dy/Mzc1FTE1EmsDFxQV//PEHPDw8YGRkVOLjdXV18eLFC2zbtg1+fn64evUqBg8ezKKTiKiCWFtbY+DAgdiwYYPYUagMOLKTiAC8Gk3wzz//QBAEGBoaFtk+f/58TJkyBebm5vj444+Rl5eH69ev48mTJ/jqq6+K9RpyuRxPnjzBjh070LZtWxw7dgy//vproX18fHwwatQotGzZEl26dEFQUBAuXboECwuLd553+/btaN26NTIyMvDll1/CwMCgZH8AogrGRYrEkZWVBW9vb8ydOxfTp08HAERHRyM9PR0LFy6ElZUVZDIZevbsiR9//BHZ2dmlKiyISHudPXsW/fr1K/XxY8aMgYODA3r06KHCVEREVFw+Pj5wc3PDzJkzi9y5SJqBZScRKZmamr5129ixY2FiYoLly5fjq6++grGxMVxcXDB58uRin79v376YOXMmpk2bhqysLPTq1QsLFy7ExIkTlfsMGTIEUVFRmDNnDjIzM9GvXz/MmDEDAQEBbz2vv78/xo8fj+bNm8Pe3h7z589HYmJisXMRiUEmk+Hvv/8WO4bW+eWXX+Dq6govLy/lc3/99RdevHiBmjVr4smTJ7CysoKjoyMaNGjwxi9/iIjeJTU1FZaWlqU+3tDQEAUFBSpMREREJdG0aVPIZDIEBQVh6NChYsehUpAIgiCIHYKIiEjbnD17FrNmzUJISIjYUbTKxYsXERMTA3d3d+jp6WHp0qVYtmwZzpw5g0aNGiElJQXOzs74/PPP8e2334odl4g00MGDB9G3b1/Rz0FERKX3+++/Y+nSpe+dUo3UE+fsJCIiEgFvYxdHmzZtMGjQIOjp6SEvLw/16tXDX3/9hUaNGkGhUMDCwgK9evVC1apVxY5KRBqKY0mIiDRf3759kZCQwLJTQ7HsJCIiEoGtrS2ys7Px/PlzsaNohZcvXyr/X0/v1Sw++vr66N+/P5o3bw4A0NHRQVpaGqKiolC9enVRchIRASxMiYjEpquriylTpsDPz0/sKFQKLDuJiIhEIJFIOLqzgkyfPh3ff/89YmJiALz6278uEnR0/vtRSKFQYMaMGcjPz8fnn38uSlYi0nw6OjrIzs4u9fEKhQJ5eXkqTERERKXh5eWFY8eOIT4+XuwoVEIsO4mIiEQil8tZdpazzZs3w8/PD35+fvjyyy9x6dIl5OfnQyKRFNrv1q1b8PLywp9//on9+/eLlJaIKoPu3bvjxIkTpT7+3Llz6NixowoTERFRaZiZmSE6Oho2NjZiR6ESYtlJREQkEo7sLF8pKSkICgrC0qVLsX//fly+fBne3t4IDg7GixcvCu1bp04dtGrVClu2bEGtWrVESkxElYGxsTGysrJKfSt6QkICL6yJiNSEqalpkS/JSf2x7CQiIhIJy87ypaOjg169esHFxQXdu3dHaGgoZDIZJkyYgB9//BFRUVEAgLS0NAQFBWHMmDHo1q2byKmJqDLo1q0bgoODS3zckSNH0Lp163JIREREpcGiUzNJBM5+TUTl6IcffsDjx4+xcuVKsaMQqZ0LFy7Ax8cHly9fFjtKpZWVlQVjY+NCz61cuRJff/01evTogS+++AJr165FdHQ0Ll26JFJKIqqMYmJicPXqVQwaNKhYF8t//PEHnJyc0KBBgwpIR0REVHnpiR2AiCq358+fc1Vjord4PbJTEAR+a1xO/l10FhQUQFdXF9OnT0enTp0wcuRI9OnTB5mZmbh9+7aIKYmoMqpduzZMTEywe/duVKtWDR9++GGhRdGAV6uuX7x4EY8fP0br1q05jQYRkQbJyMjAhQsXUL16ddSvXx8mJiZiR6L/x7KTiMrV8+fPUb9+fbFjEKklS0tLAEBycjKsrKxETlP56erqQhAECIKA5s2bY+vWrWjdujV27NjB9ykiKhdWVlYYMmQIOnTogBs3bqBhw4aF3ovy8/PRunVrtG3bVuyoRERUAsnJyfDw8EBiYiLi4+Ph5uaGTZs2iR2L/h9vYyeicvX6LYaj1ojerFWrVli1ahXatWsndhStkpKSgjZt2qBevXo4ePCg2HGIqBKLiIhA+/bt8ejRIxgYGIgdh4iISkGhUODIkSPYsGEDWrVqBalUioULF2LVqlUwMjLCuHHj8NVXX8HT01PsqAQuUERE5UwikbDoJHoHLlJUvt72na4gCBg2bBiLTiIqd/7+/hgxYgSLTiIiDebp6YkvvvgCzZs3x5kzZ/DNN9+gV69e6NWrFzp16oTx48djzZo1Ysek/8eyk4iISERyuZxlZzlJTExEbm7uGwtPS0tLzJs3T4RURKRN8vPzERAQAG9vb7GjEBFRKT148ACXLl3CuHHjMG/ePBw7dgwTJ07E7t27lfvUqFEDhoaGSExMFDEpvcayk4iISEQc2Vk+8vPz8cknn2DlypVvHV3OUedEVN5er7DesGFDsaMQEVEp5ebmQqFQwMPDA8Crz5AeHh5ITk6Gj48PlixZgmXLlsHFxQXW1tZvvbOIKg7LTiIiIhGx7CwfixYtgr6+PmbOnCl2FCLSYps3b+aoTiIiDde4cWMIgoBDhw4pnztz5gxkMhlsbGxw+PBh2NvbY/To0QD4hbo64AJFREREInrx4gVq1qyJly9f8oORipw8eRIjRozA9evXYWdnJ3YcItJSz549Q4MGDRAbGwtTU1Ox4xARURls3LgRa9euRffu3dGiRQvs3LkTdnZ22LRpE548eYJq1arxvV6N6IkdgIiISJuZm5vDyMgI8fHxLOZUID4+HiNHjsTWrVv59yQiUW3duhXu7u68+CUiqgTGjRuHtLQ0bN++Hfv374elpSXmz58PAHBwcADwar54a2trEVPSaxzZSUREJLJ27dph6dKl6NSpk9hRNJpCocBHH32EFi1aYMmSJWLHISItJggC6tevj4CAALRt21bsOEREpCLx8fFITU2FXC4HAKSmpmL//v346aefYGhoCGtrawwaNAj9+vXjl10i4pydRKQyBQUFhR7zuxSi4uG8naqxbNkyZGRkYMGCBWJHISItJ5FI8ODBAxadRESVjI2NDeRyOXJzc7F48WLIZDJ4enoiMTER7u7uqFOnDrZs2YKxY8eKHVWr8TZ2IlIZXV3dQo8lEgkSExORnZ0Nc3NzfrNF9BZyuZxlZxmdP38eK1euxNWrV6Gnx483RERERKR6EokECoUCCxcuxJYtW9ChQweYm5sjOTkZZ8+eRVBQEMLCwtChQwccPXoUvXv3FjuyVuLITiJSiezsbIwfPx55eXkAgNzcXKxbtw7e3t4YN24cpk2bhps3b4qckkg9cWRn2aSkpGDYsGHYtGkTatasKXYcIiIiIqrErl69ih9++AG+vr5Yv349/P39sW7dOsTExGDFihWQy+Xw8PDAjz/+KHZUrcWyk4hUIj4+Hps2bYK+vj5yc3Oxdu1aTJs2DSYmJpDJZLh48SJ69OiBmJgYsaMSqR2WnaUnCALGjBkDd3d39O3bV+w4RERERFTJXbp0Cd26dYOPj49yQSIHBwd069YN9+7dAwD07t0bDRs2RHZ2tphRtRbv8yIilUhJSYGZmRkA4OHDh9i4cSNWrVqFiRMnAng18rN///74/vvvsW7dOjGjEqkdqVSKyMhIKBQK6Ojwe8iSWL16NeLi4rBnzx6xoxARERGRFrC0tERoaCjy8/NhYGAAAAgLC8O2bdvg6+sLAGjTpg3atWsHIyMjMaNqLV5REZFKJCQkoHr16gCgfNMfNWoUFAoFCgoKYGRkhE8//RS3bt0SOSmR+jE1NUW1atUQFxcndhSNcvXqVSxevBi//fab8oMmEZHY5s+fj0aNGokdg4iIysmwYcOgq6uL2bNnw9/fH/7+/pg7dy5kMhkGDRoEALCwsIC5ubnISbUXy04iUonU1FRER0fDz88PS5YsAQDk5ORAR0dHuXBRWlpakRXbiegV3speMqmpqfDw8MBPP/2EunXrih2HiDSEp6cnJBKJ8sfKygp9+vTB/fv3xY5WIU6fPg2JRIKkpCSxoxARabSAgADExcVhwYIFWLVqFZKSkjB79mzUqVNH7GgE3sZORCpiZWWFZs2a4eDBg0hOToZcLsfTp09haWkJ4FXRGRoaCrlcLnJSIvUkk8kQFhaGrl27ih1F7QmCgPHjx6Nnz54YPHiw2HGISMP06NEDgYGBAIC4uDjMnDkTAwcORGhoqMjJ3i03N5ej2ImI1ET79u3RunVrPHv2DM+fP0fjxo3FjkT/wpGdRKQSXbp0wV9//YV169Zh/fr1mDlzJmxtbZXbw8PDkZ6ejt69e4uYkkh9yeVyjuwspo0bN+L+/ftc4ZKISsXQ0BB2dnaws7ODq6srpk+fjvv37yMrKwvR0dGQSCS4evVqoWMkEgmCgoKUj+Pi4jB8+HBYWlqiSpUqaNasGU6dOlXomF27dsHZ2RmmpqYYMGBAodGUV65cQa9evWBlZYVq1aqhQ4cOuHDhQpHX/OmnnzBo0CCYmJjgP//5DwDg3r17cHNzg6mpKWxsbDB06FA8e/ZMedzt27fRvXt3VKtWDaampmjatClOnTqF6Oho5Rdq1tbWkEgk8PT0VMnflIhIG+np6cHR0ZFFpxriyE4iUokTJ04gLS1NOUfJa4IgQCKRwNXVFTt37hQpHZH6k8lkCAkJETuG2rt9+zbmzJmDs2fPwtjYWOw4RKTh0tLS8Ntvv6Fx48bFfk/JyMhA586dYWNjg3379sHBwaHInOTR0dH47bffsG/fPmRkZMDDwwNz5szB+vXrla87cuRI+Pn5QSKRYO3atfj4448RHh4OKysr5XkWLFiAb7/9FitWrIBEIsHTp0/RqVMneHt7Y8WKFcjLy8OcOXPQr18/XLx4ETo6Ohg2bBiaNm2Ky5cvQ09PD7dv34aRkRFq1qyJ4OBguLu74+7du7CwsOD7KBERVUosO4lIJfbu3Yv169ejd+/eGDJkCPr27QsLCwtIJBIAr0pPAMrHRFQY5+x8v4yMDAwePBg//PAD6tevL3YcItJQR48eRdWqVQG8el+pWbMmjhw5Uuzjd+7ciWfPnuHChQvKYtLZ2bnQPvn5+QgICICZmRkAYPz48diyZYtye7du3Qrtv2bNGgQHB+Po0aMYMWKE8vkhQ4Zg7NixysfffPMNmjZtiu+//1753LZt22BhYYGrV6+iVatWiImJga+vr/J9UiqVKve1sLAAANjY2BQqVYmIqGxeX+8CvOZVB7yNnYhU4t69e/jwww9hYmKCuXPnYvTo0dixY4dydenXCwEQ0Zs5Ozvj4cOHXMTrHSZPnozWrVtj1KhRYkchIg3WqVMn3Lx5Ezdv3sSlS5fQrVs39OrVC48ePSrW8Tdu3ECTJk3eWRbWrl1bWXQCgL29PRISEpSPExISMGHCBMjlcpiZmcHU1BQJCQmIjY0tdJ4WLVoUenzt2jWcOXMGVatWVf7UrFkTABAZGQkAmDFjBsaOHYtu3bphyZIlWrP4EhGRmCQSCZYsWQJ/f3+xoxBYdhKRisTHx8PLywuBgYFYsmQJcnNzMWvWLHh6emL37t2FPuATUVFVqlSBlZVVsS+2tU1gYCAuXLiAtWvXih2FiDRclSpVIJVKIZVK0apVK2zevBkvX77Ehg0boKPz6vLo3yN08vLyCh3/721vo6+vX+ixRCKBQqFQPh49ejSuXLmClStXIiQkBDdv3oSjoyNyc3MLHWdiYlLosUKhgJubm7Ksff0THh6OPn36AADmz5+Pe/fuYcCAAQgJCUGTJk148U1EVAFatWoFPz+/Yv07QeWLZScRqURaWhqMjIxgZGSEUaNG4ciRI1i1ahUkEgnGjBmDfv36ISAgoMiHeCL6L97K/mYPHjzAjBkzsHv3buWtp0REqiKRSKCjo4PMzExYW1sDAJ4+farcfvPmzUL7u7q64p9//im04FBJnTt3DlOmTIGbmxtcXFxgampa6DXfxtXVFXfv3kXt2rWVhe3rH1NTU+V+MpkMU6dOxeHDh+Ht7Y1NmzYBgHI1d95FQESkej179kR+fn6RBeuo4rHsJCKVyMjIUF4g5OfnQ1dXF5988gmOHTuGP/74A/b29vDy8lLe1k5ERclkMoSFhYkdQ61kZWVh8ODBWLx4MZo0aSJ2HCKqBHJycvDs2TM8e/YMoaGhmDJlCtLT09G3b18YGxujTZs2+P7773H37l2EhITA19e30PHDhg2DjY0NBgwYgLNnz+Lhw4c4cOBAiS5u5XI5tm/fjnv37uHKlSvw8PBQFpHvMmnSJKSmpmLIkCG4dOkSoqKicPz4cYwfPx5paWnIysrCpEmTcPr0aURHR+PSpUs4d+4cGjZsCODV7fUSiQSHDx9GYmIi0tPTS/bHIyKit5JIJPDx8YGfn5/YUbQey04iUonMzEzl3FR6eq/WPlMoFBAEAZ06dcLevXtx69YtODo6ihmTSK1xZGdRX3zxBerXr4/x48eLHYWIKonjx4+jRo0aqFGjBlq3bo0rV65gz5496NKlCwAob/lu2bIlJkyYgMWLFxc63sTEBH///TccHBzQt29fuLi4YN68eSWam9zf3x/p6elo3rw5PDw84OXlBScnp/ceZ29vj/Pnz0NHRwe9e/eGi4sLJk2aBENDQxgaGkJXVxfPnz/H6NGjUa9ePQwcOBBt27bFjz/+CABwcHDAggULMGfOHNja2mLy5MnFzkxERO83cuRIhISEKOdRJnFIBE4mQEQqkJKSAnNzc+VcV/8mCAIEQXjjNiL6rwMHDmD9+vU4fPiw2FHUQlBQEGbNmoXr168XWuiDiIiIiEhdzZo1Czk5OVi1apXYUbQWy04iIiI1ERoaiv79+/NWdgBRUVFo06YNDh8+jJYtW4odh4iIiIioWGJjY9GsWTNER0ejWrVqYsfRShxmRUTl4vVoTiIqvrp16yI2Nhb5+fliRxFVbm4uPDw88J///IdFJxERERFplFq1aqFHjx4ICAgQO4rWYtlJROXiwoULOHfunNgxiDSKoaEhatSogejoaLGjiOqrr76CnZ0dfHx8xI5CRERERFRiPj4+WL16NRQKhdhRtBLLTiIqF8eOHcOJEyfEjkGkcbR9kaJDhw5hz5492LJlS4kW+yAiIiIiUhft2rVD9erVORe/SFh2ElG5eP78OapXry52DCKNI5PJtHbOzsePH2Ps2LHYuXMnLC0txY5DRERERFQqEokEPj4+8PPzEzuKVmLZSUTlgmUnUelo68jO/Px8DB06FD4+PujQoYPYcYiI3qlt27Y4dOiQ2DGIiEiNDR48GPfu3cOdO3fEjqJ1WHYSUblg2UlUOnK5XCvLzvnz58PY2BizZs0SOwoR0TvdvXsXsbGx6N27t9hRiIhIjRkYGOCzzz7j6E4RsOwkonLBspOodLRxZOfx48exZcsWBAYGQkeHH02ISL1t3rwZnp6e0NPTEzsKERGpuc8++wxBQUFISkoSO4pW4RUFEZULlp1EpePk5IS4uDjk5uaKHaVCPHv2DKNGjcK2bdtga2srdhwionfKycnB9u3b4eXlJXYUIiLSADY2NhgwYAA2btwodhStwrKTiMoFy06i0tHX10fNmjURFRUldpRyp1AoMHLkSIwdOxbdu3cXOw4R0XsdOHAAjRo1grOzs9hRiIhIQ/j4+OCnn35CXl6e2FG0BstOIioXLDuJSk9bbmVfunQpcnJy8M0334gdhYioWDZv3gxvb2+xYxARkQZp1qwZpFIpgoODxY6iNVh2EpHKZWVlAQCMjY1FTkKkmbSh7Dx79ixWr16NnTt3ct47ItIIsbGxuHLlCgYNGiR2FCIi0jA+Pj5cqKgCsewkIpXjqE6ispHJZAgLCxM7RrlJSkrC8OHDsXnzZjg6Ooodh4ioWLZs2YKhQ4fyy1wiIiqxfv364dmzZ7h8+bLYUbQCy04iUjmWnURlI5fLK+3ITkEQMGbMGAwePBhubm5ixyEiKhaFQoEtW7bwFnYiIioVXV1dTJ48maM7KwjLTiJSOZadRGVTmW9jX7VqFRISEvDtt9+KHYWIqNhOnDgBCwsLfPDBB2JHISIiDeXt7Y0//vgDT548ETtKpceyk4hUjmUnUdnUqlULiYmJyvlvK4vLly/ju+++w65du2BgYCB2HCKiYtu0aRPGjh0rdgwiItJg5ubmGDZsGH7++Wexo1R6LDuJSOVYdhKVja6uLpycnBAZGSl2FJVJTU2Fh4cHfv75Z9SpU0fsOERExZaUlIRjx45h2LBhYkchIiINN2XKFGzYsKHSDWpQNyw7iUjlWHYSlV1lupVdEASMHTsWH330Edzd3cWOQ0RUItu3b0efPn1gbm4udhQiItJw9erVQ8uWLbFz506xo1RqLDuJSOVYdhKVXWUqO9evX4/w8HD88MMPYkchIioRQRCwefNm3sJOREQq4+PjAz8/PwiCIHaUSotlJxGpHMtOorKTyWQICwsTO0aZ3bp1C19//TV2794NIyMjseMQEZXIlStXkJWVhc6dO4sdhYiIKomePXsiPz8fp0+fFjtKpcWyk4hUjmUnUdlVhpGd6enpGDx4MFauXAm5XC52HCKiEtu0aRO8vLwgkUjEjkJERJWERCLB1KlT4efnJ3aUSotlJxGpHMtOorKTy+UaX3ZOmjQJ7du3x4gRI8SOQkRUYhkZGQgKCoKnp6fYUYiIqJIZOXIkzp07V6kWJFUnLDuJSOVYdhKVnYODA168eIH09HSxo5TK1q1bceXKFaxZs0bsKEREpbJnzx60b98e9vb2YkchIqJKxsTEBN7e3li7dq3YUSollp1EpHIsO4nKTkdHB87OzoiIiBA7SomFhobC19cXu3fvhomJidhxiIhKZdOmTVyYiIiIys2kSZOwbds2vHz5UuwolTlX4AAAACAASURBVA7LTiJSOZadRKqhifN2ZmVlYciQIfj222/RqFEjseMQEZXK/fv3ERkZiY8//ljsKEREVEnVqlUL3bp1Q0BAgNhRKh2WnUSkciw7iVRDE8vO6dOnw8XFhaOhiEij+fv7Y9SoUdDX1xc7ChERVWLTpk3DmjVroFAoxI5SqbDsJCKVys7OhkKhgLGxsdhRiDSeTCZDWFiY2DGK7bfffsPx48exfv16rlxMRBorLy8P27Ztg7e3t9hRiIiokmvXrh3MzMxw5MgRsaNUKiw7iUilXo/qZNFBVHaaNLIzMjISU6ZMwe7du1GtWjWx4xARldqhQ4cgl8shl8vFjkJERJWcRCKBj48P/Pz8xI5SqbDsJCKV4i3sRKojl8s1ouzMycnBkCFDMHfuXLi6uoodh4ioTDZv3sxRnUREVGEGDx6MO3fu4M6dO2JHqTRYdhKRSrHsJFIdOzs7ZGVlITU1Vewo7zR79mw4OjpiypQpYkchIiqTJ0+eICQkBJ988onYUYiISEsYGhri888/x+rVq8WOUmmw7CQilWLZSaQ6EokEUqlUrUd3HjhwAPv27YO/vz+nryAijRcQEIDBgwfDxMRE7ChERKRFJkyYgD179iA5OVnsKJUCy04iUimWnUSqpc7zdsbGxmLcuHHYuXMnLCwsxI5DRFQmCoWCt7ATEZEobG1t0b9/f2zYsEHsKJUCy04iUimWnUSqpa5lZ15eHoYOHYoZM2agXbt2YschIiqz06dPw9TUFC1atBA7ChERaSEfHx+sW7cOeXl5YkfReCw7iUilWHYSqZa6lp3z5s2DqakpZs6cKXYUIiKVCA4Ohre3N6fkICIiUXzwwQeoW7cu9u7dK3YUjceyk4hUimUnkWrJZDKEhYWJHaOQP//8E9u2bcO2bdugo8OPEkSk+QRBwNq1azFp0iSxoxARkRbz8fGBn5+f2DE0Hq9QiEilWHYSqZZcLlerkZ1Pnz6Fp6cnAgMDYWNjI3YcIiKVkEgkkEgk0NXVFTsKERFpsf79++Pp06e4fPmy2FE0GstOIiqz5ORk7N+/HwcOHICBgQESExNx6dIlCIIgdjQijWdlZQWFQqEWKzMWFBRgxIgRGD9+PLp27Sp2HCIiIiKiSkVXVxeTJ0/m6M4ykghsI4iolG7cuIGoqChYWFigU6dOhUZDxMbG4vLly9DX10evXr1gbGwsYlIizdayZUusWbMGbdq0ETXHokWLcPLkSRw/fpyjn4iIiIiIysGLFy9Qt25d3LlzB/b29mLH0UgsO4moVA4ePIi6devCxcXlnfvl5ubit99+Q+/evWFtbV1B6Ygql2HDhuGjjz7CyJEjRcvw999/Y8iQIbh+/To/dBERERERlaNJkybBwsICixYtEjuKRuJt7ERUYgcPHsQHH3zw3qITAAwMDDBixAj89ddfSE1NrYB0RJWP2CuyJyYmYsSIEdiyZQuLTiIiIiKicjZ16lRs2LAB2dnZYkfRSCw7iahErl+/DmdnZzg6Ohb7GIlEAg8PDxw+fLgckxFVXmKWnQqFAqNHj1aOLiUi0lSJiYnYtGkTfvnlF/z88884f/682JGIiIjeqF69emjevDl27twpdhSNpCd2ACLSLA8fPoS7u3uJj9PR0UHdunXx+PHjEhWlRPSq7AwLCxPltX/88Uc8f/4cixcvFuX1iYhUYf/+/Vi+fDnu3r0LExMTODg4ID8/H7Vr18ann36Kfv36wcTEROyYRERESj4+Pvjyyy8xZswYSCQSseNoFI7sJKJiS0xMhJWVVamPb926NS5duqTCRETa4fXIzoqeZvvSpUtYtmwZdu3aBX19/Qp9bSIiVZo1axZat26NqKgoPH78GCtWrMDgwYORn5+PZcuWYfPmzWJHJCIiKqRXr17Iy8vD6dOnxY6icVh2ElGxhYSEoGPHjqU+XiKRQEeHbztEJWVhYQEDAwMkJCRU2Gs+f/4cHh4eWL9+PWrXrl1hr0tEpGpRUVF48eIFZsyYgerVqwMAOnbsiFmzZmHdunUYMGAApk2bhl9//VXkpERERP8lkUgwdepU+Pn5iR1F47B1IKJi09HRKXNZqaenV+Gj04gqg4qct1MQBIwdOxZ9+/bFwIEDK+Q1iYjKi0QigaWlJdavXw/g1XtcQUEBBEGAo6Mj5s2bB09PTxw/fhx5eXkipyUiIvqvkSNH4ty5c4iKihI7ikZh2UlExaaKklIikfBCgqgUKrLsXLduHaKjo7F8+fIKeT0iovJUp04dfPrpp9i1axd27doFANDV1S00/1ndunVx7949TtlBRERqxcTEBF5eXli7dq3YUTQKFygiogoVGRkJKysrSKVSyGQySKXSQj92dnacfJnoDSqq7Lx58ybmz5+PkJAQGBoalvvrERGVJ0EQIJFIMGnSJCQmJmLkyJFYuHAhPvvsM3z44YeQSCS4ceMGduzYgYkTJ4odl4iIqIjJkyfjgw8+wIIFC2Bqaip2HI0gEXg/KREV09mzZyGXy2Fra1vqcwQFBaF79+6IiIgo8hMeHo7MzMwiBejrH3t7e875SVpr165dCA4Oxp49e8rtNdLS0tC8eXMsWLAAQ4cOLbfXISKqSKmpqUhLS4MgCEhOTkZQUBB27tyJmJgY1KlTB6mpqfDw8MCqVaugq6srdlwiIqIiPv30U3Tq1AlTpkwRO4pGYNlJRMUmCAL27t0Ld3f3Uh3//PlzXL9+Hd27d3/rPqmpqYiMjHxjEZqamgpnZ+c3FqE1a9ZkEUqV2rVr1+Dl5YVbt26Vy/kFQcDIkSNhbGyMjRs3lstrEBFVpNTUVPj7+2PhwoWoUaMGCgoKYGtrix49emDAgAHQ19fHjRs38MEHH6BBgwZixyUiInqrc+fOYcyYMXjw4AGve4uBt7ETUbG9Xk09Pz8fenolf/s4ffo0+vXr9859zMzM4OrqCldX1yLb0tPTCxWhV69exa+//oqIiAgkJyejTp06RUpQmUyGmjVrliovkTqRyWSIiIhQ3pKpagEBAbh58yYuX76s8nMTEYlhyZIlOHfuHH755RdYWFhg7dq1OHjwILKysnDy5EmsWLECw4YNEzsmERHRe7Vv3x7VqlXDkSNH0KdPH7HjqD2O7CSiEklPT8eBAwdKfHEQFhaGuLg4dOnSpVxyZWZmIioqqtBI0Nf/Hx8fj9q1axcpQaVSKWrXrs3FCEhj2NnZ4dq1a3BwcFDpee/du4fOnTvj9OnTcHFxUem5iYjE4uDggA0bNsDNzQ0AkJiYiBEjRqBz5844fvw4Hj9+jMOHD0Mmk4mclIiI6P0CAwOxbds2/PXXX2JHUXssO4moxJ48eYKQkBB88sknxRphFhYWhvDwcOXFRkXLzs7Gw4cPi5SgERERiIuLg6OjY5ESVCqVok6dOjAwMBAlM9GbdOzYEYsWLVLplwaZmZlo1aoVZsyYAS8vL5Wdl4hITBEREfj000+xevVqdOzYUfm8jY0Nrly5gtq1a6N+/fr47LPPMG3atHIbNU9ERKQqOTk5cHJywvHjxzlA4T1YdhJRqSQnJ+Po0aNo0KDBG285B4AXL17g1KlTMDc3R9euXSs4YfHk5uYiOjq6SAkaERGBR48eoUaNGm9cOb5u3bowMjISOz5pGS8vL7Rt2xbjxo1T2TnHjRuHrKwsBAYG8kKfiCoFQRBQUFCAQYMGwczMDBs3bkRmZiYCAwPx7bffIj4+HgDg6+uL6Oho7Nq1i9PdEBGRRliwYAHi4uKwfv16saOoNf6rTkSlYmlpieHDhyMyMhJBQUHQ1dWFoaEhDA0NkZ6ejry8PJiZmaFv375qfQFhYGAAuVwOuVxeZFteXh5iY2MLFaEnT55EREQEoqOjYWNjU6QElUqlcHZ2RpUqVUT4baiyk8lkCA8PV9n5fv31V/z999+4du0ai04iqjQkEgn09PTwySef4PPPP0dISAhMTEyQmpqKZcuWFdo3NzdXrT+nEBER/dtnn32G+vXrY/r06bh//36hxYpMTU3RuXNnLmAEjuwkIhXKy8tDbm4uqlSpUumLk4KCAsTGxhYZDRoREYGoqChYWlq+cdV4qVSKqlWrVkjGrKws7NmzB7du3YKpqSk+/PBDtGzZkhd1GiwoKAg7duzAvn37ynyu8PBwtGvXDn/++Sc++OADFaQjIlI/iYmJ8Pf3R0JCAkaPHo0mTZoAAO7fv4/OnTtj48aN7108kYiISF1cv34dO3fuRNeuXfHRRx8VKjaTkpJw5swZCIKAHj16wMzMTMSk4mLZSUSkYgUFBXjy5EmREjQ8PByRkZEwMzN7axGqyn+QHj16hKVLlyI9PR2BgYHo3bs3AgICYGNjAwC4cuUKjh8/jqysLMjlcrRp0wbOzs6FimrOYaZebt26heHDh+POnTtlOk9OTg7atWsHLy8vTJo0SUXpiIg0Q1paGn777TecPHkSO3fuFDsOERFRsRw8eBDOzs5o2LDhO/dTKBTYs2cP2rRpg9q1a1dQOvXCspOIqAIpFAo8ffq0SAn6+v+rVKlSpAB9fat89erVS/RaBQUFiIuLQ82aNdG8eXN07twZixcvVt5i7+npiaSkJBgYGODx48fIzs7G4sWLlSNcFAoFdHR08OLFCzx79gx2dnYwNzdX+d+Eii8jIwNWVlbIyMgo0+0pPj4+ePToEYKDg1lmE5FWio+PhyAIsLOzEzsKERHRex06dAjNmjWDo6NjsY/Zt28f2rVrB1tb23JMpp5YdhIRqQlBEBAfH//GEjQ8PBz6+vpFStBevXrB2tr6vYWVnZ0dZs6cienTpytLsgcPHsDExASOjo5QKBTw9fXF1q1bce3aNTg5OQF4dZvfggULEBISgvj4eLRo0QIBAQGQSqXl/eegt3B0dMT58+dL/S3t77//junTp+P69eslLtCJiIiIiKhi/fPPPwCgnIqluARBwK+//ophw4aVRyy1xrKTiEgDCIKApKSkIiXoV199hUaNGr2z7MzIyICNjQ38/f0xZMiQt+6XkpICGxsbXLhwAS1btgQAtG/fHpmZmfjll1/g6OgIb29v5OXl4dChQzA2Nlb570nv17VrV8yZMwc9evQo8bExMTFo2bIlDhw4gDZt2pRDOiIi9fP6cocj2YmISBMFBwfD3d29VMfeuXMH+vr6qFevnopTqTeuUkFEpAEkEgmsra1hbW2Ntm3bFuuY1/NtPnz4EBKJRDlX57+3vz43AOzfvx/6+vqQyWQAgJCQEFy4cAE3b95Ufou4cuVKuLi44OHDh++dK4bKx+sV2Utadubl5cHDwwNffvkli04i0ipTp07F119/XeTfQSIiInX34sWLMk0l1qhRI+zdu1fryk6uR09EVEkpFAoAQGhoKKpVqwYLC4tC2/+9+ND27dsxb948TJ8+Hebm5sjJycGxY8fg6OiIJk2aID8/HwBgZmYGOzs73L59u2J/GVJ6XXaW1Ndff43q1atjxowZ5ZCKiEg9RUVFYdeuXVq9Ii0REWmus2fPokuXLmU6R1nm+tdUHNlJRFTJ3bt3DzY2Nsr5GQVBgEKhgK6uLjIyMjB//nwEBwdj4sSJmD17NoBXq3WHhoZCLpcD+G9xGh8fD2tra6SmpirPxdsCK5ZMJsOZM2dKdMzRo0exY8cOXL9+XSs/7BCR9tqyZQuGDx8OQ0NDsaMQERGViq6ubpmOr1q1KrKysrRqGjKWnURElZAgCHjx4gUsLS0RFhYGJycn5aiW10XnrVu34OPjgxcvXmDdunXo3bt3ofIyPj5eeav661veY2NjoaurW2SU6Ot94uPjYWVlBT09/vNSXko6sjMuLg5jxozBrl27YG1tXY7JiIjUS0FBAbZs2YI//vhD7ChERESloopldgwNDZGdnc2yk4iINNuTJ0/Qq1cvZGdnIzo6GnXq1MH69evRuXNntG7dGoGBgfjhhx/Qvn17fPfdd6hWrRqAV/N3CoKAatWqITMzE1WrVgXw328Tb926BWNjY+Vq7f87qrN37964f/8+atWqVWTleKlUCicnJ+jr61fcH6IScnZ2RnR0NPLz899bKhcUFGD48OGYOHEiOnfuXEEJiYjUw7Fjx+Dg4IDGjRuLHYWIiEg0qampWjedC8tOIqJKyMHBAbt27cKNGzcQFxeHa9eu4eeff8alS5ewevVqTJ8+HSkpKbC3t8eKFStQr149yGQyNG7cGIaGhpBIJKhXrx4uXryIuLg42NvbA3i1iJGrq6vy9vZ/k0gkuHnzJnJycvDw4UPlivEPHjzA4cOHERERgSdPnsDBwaFICSqVSlGnTh3eZlgMRkZGsLW1RUxMDJydnd+57+LFi6Gjo4P//Oc/FZSOiEh9bN68Gd7e3mLHICIiKrVatWohMjLyvZ/73yU3N1frprKSCKoYE0tERBrl/v37CA8Px99//43bt28jKioKMTEx8PPzw4QJE6Cjo4MbN25g2LBhcHNzw8cff4xffvkFx48fx6lTp9C0adNSvW5ubi5iYmIQERGB8PBwZSEaERGB2NhY2NnZvbEIrVu3rlbddvE+PXv2xBdffIHevXu/dZ9Tp05h2LBhuH79OmrUqFGB6YiIxBcfH4969eohNjZWefcCERGRJgoODoa7u3upjk1LS8OFCxfQq1cvFadSbyw7iYhISaFQFPrWb9++fVi2bBmioqLQsmVLzJ8/Hy1atCiX187Pz0dsbGyREjQiIgIPHz6EtbV1kRJUKpXC2dkZJiYm5ZJJXU2cOBENGjTAlClT3rg9ISEBrq6u8Pf317oPNkREALBixQrcvXsXW7ZsETsKERFRmRw+fBjdunUr1eCPAwcO4KOPPtK6qcRYdhJRmXl6eiIpKQmHDh0SOwqVIzFXXi8oKMCjR4+KlKARERGIioqCubl5kRL09Y+pqakomctLfn4+Zs+ejZcvX6JPnz6QSCRwcnJSzkmnUCjg5uaGZs2a4bvvvhM5LRFRxRMEAQ0bNsTGjRvRoUMHseMQERGVSW5uLn799VeMGjWqRNdj4eHhePToEbp161aO6dQTy04iLeDp6YmtW7cCAPT09FC9enW4uLjgk08+wfjx48v8LY8qys7Xi+hcuXKl3EYOUuWkUCjw5MmTIiVoeHg4IiMjYWpq+sYSVCqVwtzcXOz4xRYfH4/z589DR0cHnTt3RvXq1ZXbHjx4gDt37sDY2Bg3b97E4cOHcfr0aa37BpeICADOnz8Pb29vhIaGivYlHRERkSqlpKTg8OHDGD58eLHm3wwPD0dYWBjc3NwqIJ364QJFRFqiR48eCAwMREFBARITE3Hy5EnMmzcPgYGBOHHixBtvA87NzYWBgYEIaYmKT0dHBzVr1kTNmjXRtWvXQtsEQcDTp08LlaB79+5V3ipvZGT0xhJUJpPBwsJCpN+oqMuXL+PFixcYOHDgGy/c69Wrh3r16iEjIwOHDh3C6tWrWXQSkdZ6vTARi04iIqosLCwsMHDgQOzatQu1atVC+/bt3/jvXEpKCk6fPg0LCwutLToBjuwk0gpvG3l5584duLq64quvvsKCBQvg5OQET09PxMbGYu/evejZsyf27NmD27dvY/r06Th//jyMjY3Rr18/+Pn5wczMrND527RpgzVr1iAjIwOffvop1q1bp5xXRBAELF++HOvXr0dcXBykUilmzZqFESNGAECRN+rOnTvj9OnTuHLlCubMmYPr168jNzcXTZo0wfLly9G2bdsK+MtRZSYIAhISEoqMBn39X11d3TeWoFKpFFZWVhV2EX358mXo6OgUe8SzIAjYvXs3evToAUtLy3JOR0SkXl6+fInatWvj/v37sLW1FTsOERGRyj179gznz5+HRCKBnp4edHR0oFAokJOTA0tLS3Tu3Bm6urpixxQVy04iLfCu28z79euHqKgo3LlzB05OTkhJScHcuXMxaNAgCIIABwcHyGQytGzZEosWLUJKSgrGjRuHxo0bIzg4WHn+4OBg9O7dG/PmzcOTJ0/g5eUFd3d3rF69GgAwZ84cBAUFwc/PD/Xq1cOFCxcwbtw47N69G25ubrhy5QpatWqFo0ePomnTpjAwMICFhQVOnjyJJ0+eoEWLFpBIJFi7di127NiB8PBwWFlZVejfkbSHIAhITk4uUoK+/snPz39jCSqVSmFra6uyIjQ+Ph43b97Ehx9+WOL8O3bsUH6ZQESkLTZu3IgjR45g3759YkchIiIqd4IgQKFQaH25+b9YdhJpgXeVnbNnz8bq1auRmZmpXOTk4MGDyu0bN26Er68vHj9+rFzo5fTp0+jatSvCw8MhlUrh6emJ33//HY8fP0bVqlUBANu3b4e3tzdSUlIAAFZWVvjzzz/RsWNH5bmnTZuGsLAwHDlypNhzdgqCAHt7eyxfvpxFDokmJSUFkZGRb1w5PjMz840lqFQqRY0aNYo1x85re/fufeut6+9z//595Ofno1GjRiU+lohIU7Vp0wZff/21Vt+6R0REpO04ZyeRlvvfFbb/t2gMDQ1FkyZNCq1o3a5dO+jo6ODevXuQSqUAgCZNmiiLTgBo27YtcnNzERkZiZycHGRnZ6N3796FXisvLw9OTk7vzJeQkICvv/4ap06dQnx8PAoKCpCVlYXY2Niy/NpEZWJhYQELCwu0bNmyyLbU1NRCRei5c+cQEBCAiIgIpKamwtnZ+Y0rxzs6OhYqQgsKCiCRSEo9SrR+/foICgpi2UlEWuPOnTt49OhRiUfDExERUeXCspNIy927dw9169ZVPv7fhYr+twz9t+KWMAqFAgBw8OBB1KpVq9C29y2iMnr0aMTHx2PlypVwcnKCoaEhunfvjtzc3GK9NlFFMzMzg6urK1xdXYtsS0tLQ2RkpHIU6OXLl7Fz505EREQgOTkZdevWVZafhoaGmDlzZpmyGBkZIScnB4aGhmU6DxGRJti8eTM8PT2hp8dLHCIiIm3GTwJEWuzOnTs4evQo5s6d+9Z9GjZsCH9/f6SlpSlHd4aEhEChUKBBgwbK/W7fvo2MjAxlWXrx4kUYGBjA2dkZCoUChoaGiImJQbdu3d74Oq9XfS8oKCj0/Llz57B69Wrl7Wjx8fF4+vRp6X9pIhGZmpqiWbNmaNasWZFtGRkZiIqKUhah9+/fR/Xq1cv0enZ2dkhOToa9vX2ZzkNEpO5ycnKwfft2XLx4UewoREREJDKWnURaIicnB8+ePYNCoUBiYiJOnDiBb7/9Fs2bN4evr+9bjxs+fDjmzZuHUaNGYeHChXj+/DkmTJiAQYMGKW9hB4D8/Hx4eXnhm2++QVxcHGbPno1x48Ypy09fX1/4+vpCEAR06tQJ6enpuHjxInR0dDB+/HjY2NjA2NgYx44dg5OTE4yMjGBmZga5XI7t27ejdevWyMjIwJdffqksRokqExMTEzRu3BiNGzcGABw4cKDM56xSpQoyMjLKfB4iInW3f/9+NG7cGM7OzmJHISIiIpEVf5UEItJox48fR40aNVCrVi10794dBw4cwLx583DmzJkit67/W5UqVXDs2DG8fPkSrVq1Qv/+/dG2bVv4+/sX2q9z585wcXFB165dMXDgQHTr1g3Lli1Tbl+0aBHmz5+PFStWwMXFBT179kRwcDDq1KkDANDT08Pq1auxadMm2Nvbo3///gAAf39/pKeno3nz5vDw8ICXl9d75/kkqgxUsaJ7amoqzM3NVZCGiEi9bd68GWPHjhU7BhEREakBrsZORESkhm7fvg0DAwPUq1ev1OfYu3cvBgwYUKIV4ImINE1MTAyaN2+OR48ewdjYWOw4REREJDJe/RAREamhxo0b486dO6U+/vXCYCw6iaiy27JlCzw8PFh0EhEREQDO2UlERKS2jI2NCy38VRJnzpxBp06dyiEVEZH6KCgowJYtW7B//36xoxAREZGa4HAPIiIiNdW9e3fs3bsXJZ1xJjU1FUlJSbCysiqnZERE6uHEiROwsrJCs2bNxI5CREREaoJlJxERkZoyNDTEhx9+iF27dhW78ExNTcXvv/8Od3f3ck5HRCS+TZs2wdvbW+wYREREpEa4QBEREZGaS0lJweHDh9GiRQs0aNDgjfsoFAr8/fffSE5Ohru7u0pWcyciUmdJSUmQSqWIjo6Gubm52HGIiIhITbDsJCIi0hB37tzBgwcPYGRkBFtbW1SpUgWpqal4+vQpAKBTp068dZ2ItMaqVatw7do1BAYGih2FiIhIpZ49e4ZRo0bh/PnzyMzMLPG0Vv/m6emJpKQkHDp0SIUJ1RvLTiIiIg2Tm5uLpKQkZGZmwszMDJaWllx1nYi0iiAIaNy4MdauXYsuXbqIHYeIiKhEPD09sXXr1iLPt27dGhcvXoSvry+OHj2Kffv2wdTUFHZ2dqV+rdTUVAiCoFV3QXA1diIiIg1jYGAAe3t7sWMQEYnm8uXLyMnJQefOncWOQkREVCo9evQocneCgYEBACAiIgLNmzeHTCYr9fnz8/Ohq6sLMzOzMuXURBwGQkREREREGmXTpk3w8vLi/MRERKSxDA0NYWdnV+jHwsICTk5O2L9/P7Zt2waJRAJPT08AQGxsLAYOHAhTU1OYmppi0KBBePz4sfJ88+fPR6NGjRAQEABnZ2cYGhoiIyMDnp6e6NOnj3I/QRCwbNkyODs7w9jYGI0bN8b27dsr+tcvVxzZSUREREREGiM9PR1BQUG4e/eu2FGIiIhU7sqVKxg2bBgsLCzg5+cHY2NjCIKAAQMGwMjICCdPnoREIsHkyZMxYMAAXLlyRfnl38OHD7Fz507s2bMHBgYGMDIyKnL+uXPnIigoCD/99BPq1auHCxcuYNy4cahevTrc3Nwq+tctFyw7iYiIiIhIY+zZswcdO3bkdB5ERKTRjh49iqpVqxZ6btKkSfj+++9haGgIY2Nj5Vydf/31F27duoXIyEg4OTkBAHbu3AmpVIoTJ06gR48eAF7N7R8YGAhbW9s3vmZGRgZ+/PFH/PnnEHzM9wAAELRJREFUn+jYsSMAoE6dOrh8+TJ++uknlp1EREREREQVbdOmTfjyyy/FjkFERFQmnTp1woYNGwo997ZFhEJDQ2Fvb68sOgGgbt26sLe3x71795Rlp6Oj41uLTgC4d+8esrOz0bt370JTweTl5RU6t6Zj2UlERERERBohNDQUUVFR+Pjjj8WOQkREVCZVqlSBVCot1r6CILx1nup/P29iYvLO8ygUCgDAwYMHUatWrULb9PX1i5VFE7DsJCIiIiIijeDv74/Ro0dXqgsyIiKi92nYsCGePHmC6Oho5QjMqKgoxMXFoWHDhiU6j6GhIWJiYtCtW7dySis+lp1ERERERKT2cnNzsW3bNpw9e1bsKERERGWWk5ODZ8+eFXpOV1cX1tbWRfbt0aMHmjZtiuHDh2P16tUQBAFTpkyBq6triUpLU1NT+Pr6wtfXF4IgoFOnTkhPT8fFixeho6OD8ePHl/n3UgcsO4mIiIiISO0dOnQI9evXh1wuFzsKERFRmR0/fhw1atQo9JyDgwMeP35cZF+JRILff/8dU6dORZcuXQC8KkDXrFnz1tvb32bRokWwtbXFihUr8Pnnn6NatWpo1qxZpZoPWyIIgiB2CCIiIiIiondxc3PDkCFDMGrUKLGjEBERkRpj2UlERERERGrt8ePHaNKkCR4/fowqVaqIHYeIiIjUmI7YAYiIiIiIiN4lICAAQ4YMYdFJRERE78WRnUREREREpLYUCgWkUil2796NFi1aiB2HiIiI1BxHdhIREWmY+fPno1GjRmLHICKqEKdOnYKpqSmaN28udhQiIiLSACw7/6+9+4/Vuqz/B/68ETkczoFNzrAfgMQRISg4SSAWzjlxobDmPFGK0YaDTQJmbZoZmzSiWBlqLsBsUpow1MCs4a9Vp0z/MGQHiMLDDx2K6CjAgiO/jp3780f7su8JEPCc0+HcPB5/8b7u68frvv86e3Jd7wsA2smuXbvyta99LRdeeGHKysrSt2/fXHPNNXn66adbNe9tt92W559/vo2qBDizLV26NNOnTz/t22YBgLOTY+wA0A62b9+esWPHpmfPnvnOd76TmpqaNDc35/e//33uuuuuvPHGG8eMOXLkSLp169YB1QKcmfbu3Zvq6uq89tpr6d27d0eXAwB0AnZ2AkA7mDlzZorFYtauXZsvfelLGTJkSIYOHZrZs2dnw4YNSZJCoZDFixentrY2FRUVmTNnTv79739n2rRpGThwYMrLy3PRRRflrrvuSnNz89G5//sYe3Nzc+bPn5/+/funrKwsw4cPz69//eujn3/mM5/Jrbfe2qK+ffv2pby8PL/61a+SJMuWLcvo0aPTs2fPnH/++fniF7+YnTt3tudPBHBSy5cvzzXXXCPoBABOmbATANrY3r178+yzz2b27NmprKw85vPzzjvv6L/nzZuXCRMmZOPGjZk1a1aam5vTt2/fPP7443nllVfyve99LwsWLMjPf/7zE65333335Yc//GF+8IMfZOPGjbnuuutSW1ub9evXJ0mmTJmSRx99tEVgumrVqpSXl2fixIlJ/rOrdN68edmwYUNWr16d3bt3Z/LkyW31kwCctmKxmAcffDDTp0/v6FIAgE7EMXYAaGNr1qzJmDFj8sQTT+S66647Yb9CoZDZs2fnxz/+8fvOd8cdd2Tt2rX53e9+l+Q/OztXrlyZv/71r0mSvn375uabb87cuXOPjrniiivSr1+/LFu2LHv27MlHPvKRPPPMMxk3blyS5KqrrsqFF16YBx544LhrNjQ0ZOjQodmxY0f69et3Wt8foC38v53x27ZtS5cu9mgAAKfGXw0A0MZO5/8RR40adUzbT37yk4waNSp9+vRJZWVl7r333uO+4zP5z3H0t956K2PHjm3Rftlll2XTpk1JkqqqqowfPz7Lly9Pkrz99tv5wx/+kClTphztX19fn2uvvTYDBgxIz549j9Z1onUB2tvSpUtz0003CToBgNPiLwcAaGMXXXRRCoVCXnnllZP2raioaPH82GOP5etf/3qmTp2a5557LuvXr8/MmTNz5MiR953neLcU//9tU6ZMyapVq3Lo0KGsWLEi/fv3z2WXXZYkeffddzN+/Pj06NEjjzzySF5++eU8++yzSXLSdQHaw4EDB/LYY49l6tSpHV0KANDJCDsBoI317t0748ePz6JFi9LY2HjM5//85z9POPbFF1/MmDFjMnv27IwcOTKDBg3Kq6++esL+vXr1ykc/+tG8+OKLx8wzbNiwo8/XXnttkmT16tVZvnx5vvzlLx8NQxsaGrJ79+4sWLAgl19+eT7+8Y/n73//+2l9Z4C2tHLlylx66aXp379/R5cCAHQywk4AaAdLlixJsVjMqFGj8stf/jKbN29OQ0ND7r///owYMeKE4wYPHpz6+vo888wz2bp1a+bPn5/nn3/+fdf6xje+kYULF2bFihXZsmVL5s6dmxdeeKHFDezdu3dPbW1tvvvd76a+vr7FEfYLLrggZWVlWbRoUV577bU89dRTufPOO1v/IwB8QEuXLs20adM6ugwAoBPq2tEFAEApGjhwYOrr67NgwYJ885vfzM6dO1NVVZWampoTXgqUJDfffHPWr1+fG2+8McViMV/4whdy66235mc/+9kJx9xyyy3Zv39/br/99uzatStDhgzJqlWr8qlPfapFv6985St56KGHMnLkyAwdOvRoe58+ffLwww9nzpw5Wbx4cUaMGJF77rknV199det/CIDTtGXLljQ0NOTzn/98R5cCAHRCbmMHAADOGHfccUfee++9LFy4sKNLAQA6IWEnAABwRnjvvffSv3//1NXVtdiBDgBwqryzEwAAOCM8/fTTqa6uFnQCAB+YsBMAADgjPPjggy4mAgBaxTF2AACgw7311lv5xCc+kR07dqSysrKjywEAOik7OwEAgA738MMPZ9KkSYJOAKBV7OwEAAA6VLFYzODBg/PII4/k0ksv7ehyAIBOzM5OAACgQ/3pT39KWVlZxowZ09GlAACdXNeOLgAAADg7HD58OHV1dWlqajrads4552TZsmWZNm1aCoVCB1YHAJQCYScAANCu3nzzzbz00kspKyvLuHHj0qNHj6OfHTx4MFu3bk1VVVVef/31DBgwoAMrBQA6O+/sBAAA2k19fX327NmTq6666qQ7N+vq6tKzZ8+MHj36f1QdAFBqhJ0AAEC7+Mtf/pLGxsZ89rOfPeUxa9asSdeuXTNy5Mh2rAwAKFUuKAIAANrcoUOHsnnz5tMKOpPkkksuyeuvv5533323nSoDAEqZsBMAAGhzdXV1mThx4gcaO2HChNTV1bVxRQDA2UDYCQAAtLmDBw+2uIjodJSVleXw4cPxxi0A4HQJOwEAgDa1bdu2DB48uFVz1NTU5G9/+1sbVQQAnC2EnQAAQJt68803M2DAgFbNccEFF2Tnzp1tVBEAcLYQdgIAAG3q8OHDKSsra9Uc5557bpqamtqoIgDgbCHsBAAA2tR5552Xd955p1Vz7Nu3L7169WqjigCAs4WwEwAAaFPDhw9PfX19q+b485//nIsvvriNKgIAzhbCTgAAoE2Vl5fn4MGDrZqjsbExPXv2bKOKAICzhbATAABoczU1NVm3bt0HGrtp06YMHTq0jSsCAM4Gwk4AAKDNDRo0KA0NDWlsbDytcQcOHEh9fX2GDRvWTpUBAKVM2AkAALSL66+/PitXrsy//vWvU+q/f//+PP7447nhhhvauTIAoFQVisVisaOLAAAASlNzc3OefPLJlJeXZ9y4cenWrdsxfZqamlJXV5f9+/entrY2XbrYkwEAfDDCTgAAoN01Njamrq4uTU1NOffcc9OtW7ccOXIkTU1N6dq1a6688koXEgEArSbsBAAA/qeKxeLR0LNQKHR0OQBACRF2AgAAAAAlwctwAAAAAICSIOwEAAAAAEqCsBMAAAAAKAnCTgAAAACgJAg7AQAAAICSIOwEAAAAAEqCsBMAAAAAKAnCTgAAAACgJAg7AQAAAICSIOwEAAAAAEqCsBMAAAAAKAnCTgAAoFU+9rGPZeHChf+Ttf74xz+mUChk9+7d/5P1AIDOpVAsFosdXQQAAHBm2rVrV77//e9n9erV2bFjR3r16pVBgwZl8uTJuemmm1JZWZl//OMfqaioSI8ePdq9niNHjmTv3r350Ic+lEKh0O7rAQCdS9eOLgAAADgzbd++PWPHjk2vXr0yf/78jBgxIs3NzdmyZUt+8YtfpKqqKjfeeGP69OnT6rWOHDmSbt26nbRft27d8uEPf7jV6wEApckxdgAA4Li++tWvpkuXLlm7dm1uuOGGDBs2LJ/85CdTW1ubJ598MpMnT05y7DH2QqGQlStXtpjreH0WL16c2traVFRUZM6cOUmSp556KkOGDEn37t1z+eWX59FHH02hUMj27duTHHuM/aGHHkplZWWLtRx1B4Czl7ATAAA4xt69e/Pcc89l1qxZqaioOG6f1h4jnzdvXiZMmJCNGzdm1qxZeeONN1JbW5uJEydmw4YNueWWW3L77be3ag0A4Owi7AQAAI6xdevWFIvFDBkypEV7v379UllZmcrKysyYMaNVa1x//fWZPn16qqurM3DgwNx///2prq7O3XffnSFDhmTSpEmtXgMAOLsIOwEAgFP2wgsvZP369bnkkkty6NChVs01atSoFs8NDQ0ZPXp0ix2jY8aMadUaAMDZxQVFAADAMQYNGpRCoZCGhoYW7QMHDkyS9715vVAopFgstmhramo6pt9/H48vFounfTS+S5cup7QWAHB2sLMTAAA4RlVVVT73uc9l0aJFaWxsPK2xffr0ydtvv330edeuXS2eT2To0KF5+eWXW7StWbPmpGsdOHAg+/btO9q2fv3606oXACgdwk4AAOC4lixZkubm5nz605/OihUrsmnTpmzZsiUrVqzIhg0bcs455xx33JVXXpnFixdn7dq1WbduXaZOnZru3bufdL0ZM2bk1VdfzW233ZbNmzfniSeeyAMPPJDkxJchjRkzJhUVFfnWt76Vbdu2ZdWqVVmyZMkH/9IAQKcm7AQAAI6ruro669aty9VXX50777wzF198cUaOHJl77rknM2fOzI9+9KPjjrv77rtTXV2dK664IpMmTcr06dNz/vnnn3S9AQMGZNWqVfnNb36Tmpqa3Hvvvfn2t7+dJCcMS3v37p3ly5fnt7/9bYYPH56f/vSnmT9//gf/0gBAp1Yo/vcLbgAAAM4Q9913X+bOnZt33nknXbrYqwEAvD8XFAEAAGeMxYsXZ/To0enTp09eeumlzJ8/P1OnThV0AgCnRNgJAACcMbZt25YFCxZkz5496devX2bMmJG5c+d2dFkAQCfhGDsAAAAAUBKcBQEAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCcJOAAAAAKAkCDsBAAAAgJIg7AQAAAAASoKwEwAAAAAoCf8HebVl/k0i9zQAAAAASUVORK5CYII=",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "show_map(romania_graph_data)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Voila! You see, the romania map as shown in the Figure[3.2] in the book. Now, see how different searching algorithms perform with our problem statements."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## SIMPLE PROBLEM SOLVING AGENT PROGRAM\n",
+ "\n",
+ "Let us now define a Simple Problem Solving Agent Program. Run the next cell to see how the abstract class `SimpleProblemSolvingAgentProgram` is defined in the search module."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "class SimpleProblemSolvingAgentProgram : \n",
+ "\n",
+ " """Abstract framework for a problem-solving agent. [Figure 3.1]""" \n",
+ "\n",
+ " def __init__ ( self , initial_state = None ): \n",
+ " """State is an abstract representation of the state \n",
+ " of the world, and seq is the list of actions required \n",
+ " to get to a particular state from the initial state(root).""" \n",
+ " self . state = initial_state \n",
+ " self . seq = [] \n",
+ "\n",
+ " def __call__ ( self , percept ): \n",
+ " """[Figure 3.1] Formulate a goal and problem, then \n",
+ " search for a sequence of actions to solve it.""" \n",
+ " self . state = self . update_state ( self . state , percept ) \n",
+ " if not self . seq : \n",
+ " goal = self . formulate_goal ( self . state ) \n",
+ " problem = self . formulate_problem ( self . state , goal ) \n",
+ " self . seq = self . search ( problem ) \n",
+ " if not self . seq : \n",
+ " return None \n",
+ " return self . seq . pop ( 0 ) \n",
+ "\n",
+ " def update_state ( self , state , percept ): \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def formulate_goal ( self , state ): \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def formulate_problem ( self , state , goal ): \n",
+ " raise NotImplementedError \n",
+ "\n",
+ " def search ( self , problem ): \n",
+ " raise NotImplementedError \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(SimpleProblemSolvingAgentProgram)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The SimpleProblemSolvingAgentProgram class has six methods: \n",
+ "\n",
+ "* `__init__(self, intial_state=None)`: This is the `contructor` of the class and is the first method to be called when the class is instantiated. It takes in a keyword argument, `initial_state` which is initially `None`. The argument `initial_state` represents the state from which the agent starts.\n",
+ "\n",
+ "* `__call__(self, percept)`: This method updates the `state` of the agent based on its `percept` using the `update_state` method. It then formulates a `goal` with the help of `formulate_goal` method and a `problem` using the `formulate_problem` method and returns a sequence of actions to solve it (using the `search` method).\n",
+ "\n",
+ "* `update_state(self, percept)`: This method updates the `state` of the agent based on its `percept`.\n",
+ "\n",
+ "* `formulate_goal(self, state)`: Given a `state` of the agent, this method formulates the `goal` for it.\n",
+ "\n",
+ "* `formulate_problem(self, state, goal)`: It is used in problem formulation given a `state` and a `goal` for the `agent`.\n",
+ "\n",
+ "* `search(self, problem)`: This method is used to search a sequence of `actions` to solve a `problem`."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let us now define a Simple Problem Solving Agent Program. We will create a simple `vacuumAgent` class which will inherit from the abstract class `SimpleProblemSolvingAgentProgram` and overrides its methods. We will create a simple intelligent vacuum agent which can be in any one of the following states. It will move to any other state depending upon the current state as shown in the picture by arrows:\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class vacuumAgent(SimpleProblemSolvingAgentProgram):\n",
+ " def update_state(self, state, percept):\n",
+ " return percept\n",
+ "\n",
+ " def formulate_goal(self, state):\n",
+ " goal = [state7, state8]\n",
+ " return goal \n",
+ "\n",
+ " def formulate_problem(self, state, goal):\n",
+ " problem = state\n",
+ " return problem \n",
+ " \n",
+ " def search(self, problem):\n",
+ " if problem == state1:\n",
+ " seq = [\"Suck\", \"Right\", \"Suck\"]\n",
+ " elif problem == state2:\n",
+ " seq = [\"Suck\", \"Left\", \"Suck\"]\n",
+ " elif problem == state3:\n",
+ " seq = [\"Right\", \"Suck\"]\n",
+ " elif problem == state4:\n",
+ " seq = [\"Suck\"]\n",
+ " elif problem == state5:\n",
+ " seq = [\"Suck\"]\n",
+ " elif problem == state6:\n",
+ " seq = [\"Left\", \"Suck\"]\n",
+ " return seq"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, we will define all the 8 states and create an object of the above class. Then, we will pass it different states and check the output:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Left\n",
+ "Suck\n",
+ "Right\n"
+ ]
+ }
+ ],
+ "source": [
+ "state1 = [(0, 0), [(0, 0), \"Dirty\"], [(1, 0), [\"Dirty\"]]]\n",
+ "state2 = [(1, 0), [(0, 0), \"Dirty\"], [(1, 0), [\"Dirty\"]]]\n",
+ "state3 = [(0, 0), [(0, 0), \"Clean\"], [(1, 0), [\"Dirty\"]]]\n",
+ "state4 = [(1, 0), [(0, 0), \"Clean\"], [(1, 0), [\"Dirty\"]]]\n",
+ "state5 = [(0, 0), [(0, 0), \"Dirty\"], [(1, 0), [\"Clean\"]]]\n",
+ "state6 = [(1, 0), [(0, 0), \"Dirty\"], [(1, 0), [\"Clean\"]]]\n",
+ "state7 = [(0, 0), [(0, 0), \"Clean\"], [(1, 0), [\"Clean\"]]]\n",
+ "state8 = [(1, 0), [(0, 0), \"Clean\"], [(1, 0), [\"Clean\"]]]\n",
+ "\n",
+ "a = vacuumAgent(state1)\n",
+ "\n",
+ "print(a(state6)) \n",
+ "print(a(state1))\n",
+ "print(a(state3))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## SEARCHING ALGORITHMS VISUALIZATION\n",
+ "\n",
+ "In this section, we have visualizations of the following searching algorithms:\n",
+ "\n",
+ "1. Breadth First Tree Search\n",
+ "2. Depth First Tree Search\n",
+ "3. Breadth First Search\n",
+ "4. Depth First Graph Search\n",
+ "5. Best First Graph Search\n",
+ "6. Uniform Cost Search\n",
+ "7. Depth Limited Search\n",
+ "8. Iterative Deepening Search\n",
+ "9. Greedy Best First Search\n",
+ "9. A\\*-Search\n",
+ "10. Recursive Best First Search\n",
+ "\n",
+ "We add the colors to the nodes to have a nice visualisation when displaying. So, these are the different colors we are using in these visuals:\n",
+ "* Un-explored nodes - white \n",
+ "* Frontier nodes - orange \n",
+ "* Currently exploring node - red \n",
+ "* Already explored nodes - gray "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 1. BREADTH-FIRST TREE SEARCH\n",
+ "\n",
+ "We have a working implementation in search module. But as we want to interact with the graph while it is searching, we need to modify the implementation. Here's the modified breadth first tree search."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def tree_breadth_search_for_vis(problem):\n",
+ " \"\"\"Search through the successors of a problem to find a goal.\n",
+ " The argument frontier should be an empty queue.\n",
+ " Don't worry about repeated paths to a state. [Figure 3.7]\"\"\"\n",
+ " \n",
+ " # we use these two variables at the time of visualisations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " #Adding first node to the queue\n",
+ " frontier = deque([Node(problem.initial)])\n",
+ " \n",
+ " node_colors[Node(problem.initial).state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " while frontier:\n",
+ " #Popping first node of queue\n",
+ " node = frontier.popleft()\n",
+ " \n",
+ " # modify the currently searching node to red\n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " # modify goal node to green after reaching the goal\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ " \n",
+ " frontier.extend(node.expand(problem))\n",
+ " \n",
+ " for n in node.expand(problem):\n",
+ " node_colors[n.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ "\n",
+ " # modify the color of explored nodes to gray\n",
+ " node_colors[node.state] = \"gray\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " return None\n",
+ "\n",
+ "def breadth_first_tree_search(problem):\n",
+ " \"Search the shallowest nodes in the search tree first.\"\n",
+ " iterations, all_node_colors, node = tree_breadth_search_for_vis(problem)\n",
+ " return(iterations, all_node_colors, node)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now, we use `ipywidgets` to display a slider, a button and our romania map. By sliding the slider we can have a look at all the intermediate steps of a particular search algorithm. By pressing the button **Visualize**, you can see all the steps without interacting with the slider. These two helper functions are the callback functions which are called when we interact with the slider and the button."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "a, b, c = breadth_first_tree_search(romania_problem)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=breadth_first_tree_search, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. DEPTH-FIRST TREE SEARCH\n",
+ "Now let's discuss another searching algorithm, Depth-First Tree Search."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def tree_depth_search_for_vis(problem):\n",
+ " \"\"\"Search through the successors of a problem to find a goal.\n",
+ " The argument frontier should be an empty queue.\n",
+ " Don't worry about repeated paths to a state. [Figure 3.7]\"\"\"\n",
+ " \n",
+ " # we use these two variables at the time of visualisations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " #Adding first node to the stack\n",
+ " frontier = [Node(problem.initial)]\n",
+ " \n",
+ " node_colors[Node(problem.initial).state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " while frontier:\n",
+ " #Popping first node of stack\n",
+ " node = frontier.pop()\n",
+ " \n",
+ " # modify the currently searching node to red\n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " # modify goal node to green after reaching the goal\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ " \n",
+ " frontier.extend(node.expand(problem))\n",
+ " \n",
+ " for n in node.expand(problem):\n",
+ " node_colors[n.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ "\n",
+ " # modify the color of explored nodes to gray\n",
+ " node_colors[node.state] = \"gray\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " return None\n",
+ "\n",
+ "def depth_first_tree_search(problem):\n",
+ " \"Search the deepest nodes in the search tree first.\"\n",
+ " iterations, all_node_colors, node = tree_depth_search_for_vis(problem)\n",
+ " return(iterations, all_node_colors, node)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=depth_first_tree_search, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "## 3. BREADTH-FIRST GRAPH SEARCH\n",
+ "\n",
+ "Let's change all the `node_colors` to starting position and define a different problem statement."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def breadth_first_search_graph(problem):\n",
+ " \"[Figure 3.11]\"\n",
+ " \n",
+ " # we use these two variables at the time of visualisations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " node = Node(problem.initial)\n",
+ " \n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ " \n",
+ " frontier = deque([node])\n",
+ " \n",
+ " # modify the color of frontier nodes to blue\n",
+ " node_colors[node.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " explored = set()\n",
+ " while frontier:\n",
+ " node = frontier.popleft()\n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " explored.add(node.state) \n",
+ " \n",
+ " for child in node.expand(problem):\n",
+ " if child.state not in explored and child not in frontier:\n",
+ " if problem.goal_test(child.state):\n",
+ " node_colors[child.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, child)\n",
+ " frontier.append(child)\n",
+ "\n",
+ " node_colors[child.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " node_colors[node.state] = \"gray\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return None"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=breadth_first_search_graph, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. DEPTH-FIRST GRAPH SEARCH \n",
+ "Although we have a working implementation in search module, we have to make a few changes in the algorithm to make it suitable for visualization."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def graph_search_for_vis(problem):\n",
+ " \"\"\"Search through the successors of a problem to find a goal.\n",
+ " The argument frontier should be an empty queue.\n",
+ " If two paths reach a state, only use the first one. [Figure 3.7]\"\"\"\n",
+ " # we use these two variables at the time of visualisations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " frontier = [(Node(problem.initial))]\n",
+ " explored = set()\n",
+ " \n",
+ " # modify the color of frontier nodes to orange\n",
+ " node_colors[Node(problem.initial).state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " while frontier:\n",
+ " # Popping first node of stack\n",
+ " node = frontier.pop()\n",
+ " \n",
+ " # modify the currently searching node to red\n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " # modify goal node to green after reaching the goal\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ " \n",
+ " explored.add(node.state)\n",
+ " frontier.extend(child for child in node.expand(problem)\n",
+ " if child.state not in explored and\n",
+ " child not in frontier)\n",
+ " \n",
+ " for n in frontier:\n",
+ " # modify the color of frontier nodes to orange\n",
+ " node_colors[n.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ "\n",
+ " # modify the color of explored nodes to gray\n",
+ " node_colors[node.state] = \"gray\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " return None\n",
+ "\n",
+ "\n",
+ "def depth_first_graph_search(problem):\n",
+ " \"\"\"Search the deepest nodes in the search tree first.\"\"\"\n",
+ " iterations, all_node_colors, node = graph_search_for_vis(problem)\n",
+ " return(iterations, all_node_colors, node)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=depth_first_graph_search, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 5. BEST FIRST SEARCH\n",
+ "\n",
+ "Let's change all the `node_colors` to starting position and define a different problem statement."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def best_first_graph_search_for_vis(problem, f):\n",
+ " \"\"\"Search the nodes with the lowest f scores first.\n",
+ " You specify the function f(node) that you want to minimize; for example,\n",
+ " if f is a heuristic estimate to the goal, then we have greedy best\n",
+ " first search; if f is node.depth then we have breadth-first search.\n",
+ " There is a subtlety: the line \"f = memoize(f, 'f')\" means that the f\n",
+ " values will be cached on the nodes as they are computed. So after doing\n",
+ " a best first search you can examine the f values of the path returned.\"\"\"\n",
+ " \n",
+ " # we use these two variables at the time of visualisations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " f = memoize(f, 'f')\n",
+ " node = Node(problem.initial)\n",
+ " \n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ " \n",
+ " frontier = PriorityQueue('min', f)\n",
+ " frontier.append(node)\n",
+ " \n",
+ " node_colors[node.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " explored = set()\n",
+ " while frontier:\n",
+ " node = frontier.pop()\n",
+ " \n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ " \n",
+ " explored.add(node.state)\n",
+ " for child in node.expand(problem):\n",
+ " if child.state not in explored and child not in frontier:\n",
+ " frontier.append(child)\n",
+ " node_colors[child.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " elif child in frontier:\n",
+ " incumbent = frontier[child]\n",
+ " if f(child) < incumbent:\n",
+ " del frontier[child]\n",
+ " frontier.append(child)\n",
+ " node_colors[child.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ "\n",
+ " node_colors[node.state] = \"gray\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 6. UNIFORM COST SEARCH\n",
+ "\n",
+ "Let's change all the `node_colors` to starting position and define a different problem statement."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def uniform_cost_search_graph(problem):\n",
+ " \"[Figure 3.14]\"\n",
+ " #Uniform Cost Search uses Best First Search algorithm with f(n) = g(n)\n",
+ " iterations, all_node_colors, node = best_first_graph_search_for_vis(problem, lambda node: node.path_cost)\n",
+ " return(iterations, all_node_colors, node)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=uniform_cost_search_graph, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 7. DEPTH LIMITED SEARCH\n",
+ "\n",
+ "Let's change all the 'node_colors' to starting position and define a different problem statement. \n",
+ "Although we have a working implementation, but we need to make changes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def depth_limited_search_graph(problem, limit = -1):\n",
+ " '''\n",
+ " Perform depth first search of graph g.\n",
+ " if limit >= 0, that is the maximum depth of the search.\n",
+ " '''\n",
+ " # we use these two variables at the time of visualisations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " frontier = [Node(problem.initial)]\n",
+ " explored = set()\n",
+ " \n",
+ " cutoff_occurred = False\n",
+ " node_colors[Node(problem.initial).state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " while frontier:\n",
+ " # Popping first node of queue\n",
+ " node = frontier.pop()\n",
+ " \n",
+ " # modify the currently searching node to red\n",
+ " node_colors[node.state] = \"red\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " # modify goal node to green after reaching the goal\n",
+ " node_colors[node.state] = \"green\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " return(iterations, all_node_colors, node)\n",
+ "\n",
+ " elif limit >= 0:\n",
+ " cutoff_occurred = True\n",
+ " limit += 1\n",
+ " all_node_colors.pop()\n",
+ " iterations -= 1\n",
+ " node_colors[node.state] = \"gray\"\n",
+ "\n",
+ " \n",
+ " explored.add(node.state)\n",
+ " frontier.extend(child for child in node.expand(problem)\n",
+ " if child.state not in explored and\n",
+ " child not in frontier)\n",
+ " \n",
+ " for n in frontier:\n",
+ " limit -= 1\n",
+ " # modify the color of frontier nodes to orange\n",
+ " node_colors[n.state] = \"orange\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ "\n",
+ " # modify the color of explored nodes to gray\n",
+ " node_colors[node.state] = \"gray\"\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " return 'cutoff' if cutoff_occurred else None\n",
+ "\n",
+ "\n",
+ "def depth_limited_search_for_vis(problem):\n",
+ " \"\"\"Search the deepest nodes in the search tree first.\"\"\"\n",
+ " iterations, all_node_colors, node = depth_limited_search_graph(problem)\n",
+ " return(iterations, all_node_colors, node) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=depth_limited_search_for_vis, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 8. ITERATIVE DEEPENING SEARCH\n",
+ "\n",
+ "Let's change all the 'node_colors' to starting position and define a different problem statement. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def iterative_deepening_search_for_vis(problem):\n",
+ " for depth in range(sys.maxsize):\n",
+ " iterations, all_node_colors, node=depth_limited_search_for_vis(problem)\n",
+ " if iterations:\n",
+ " return (iterations, all_node_colors, node)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=iterative_deepening_search_for_vis, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 9. GREEDY BEST FIRST SEARCH\n",
+ "Let's change all the node_colors to starting position and define a different problem statement."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def greedy_best_first_search(problem, h=None):\n",
+ " \"\"\"Greedy Best-first graph search is an informative searching algorithm with f(n) = h(n).\n",
+ " You need to specify the h function when you call best_first_search, or\n",
+ " else in your Problem subclass.\"\"\"\n",
+ " h = memoize(h or problem.h, 'h')\n",
+ " iterations, all_node_colors, node = best_first_graph_search_for_vis(problem, lambda n: h(n))\n",
+ " return(iterations, all_node_colors, node)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=greedy_best_first_search, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 10. A\\* SEARCH\n",
+ "\n",
+ "Let's change all the `node_colors` to starting position and define a different problem statement."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def astar_search_graph(problem, h=None):\n",
+ " \"\"\"A* search is best-first graph search with f(n) = g(n)+h(n).\n",
+ " You need to specify the h function when you call astar_search, or\n",
+ " else in your Problem subclass.\"\"\"\n",
+ " h = memoize(h or problem.h, 'h')\n",
+ " iterations, all_node_colors, node = best_first_graph_search_for_vis(problem, \n",
+ " lambda n: n.path_cost + h(n))\n",
+ " return(iterations, all_node_colors, node)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False, \n",
+ " algorithm=astar_search_graph, \n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 11. RECURSIVE BEST FIRST SEARCH\n",
+ "Let's change all the `node_colors` to starting position and define a different problem statement."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def recursive_best_first_search_for_vis(problem, h=None):\n",
+ " \"\"\"[Figure 3.26] Recursive best-first search\"\"\"\n",
+ " # we use these two variables at the time of visualizations\n",
+ " iterations = 0\n",
+ " all_node_colors = []\n",
+ " node_colors = {k : 'white' for k in problem.graph.nodes()}\n",
+ " \n",
+ " h = memoize(h or problem.h, 'h')\n",
+ " \n",
+ " def RBFS(problem, node, flimit):\n",
+ " nonlocal iterations\n",
+ " def color_city_and_update_map(node, color):\n",
+ " node_colors[node.state] = color\n",
+ " nonlocal iterations\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " \n",
+ " if problem.goal_test(node.state):\n",
+ " color_city_and_update_map(node, 'green')\n",
+ " return (iterations, all_node_colors, node), 0 # the second value is immaterial\n",
+ " \n",
+ " successors = node.expand(problem)\n",
+ " if len(successors) == 0:\n",
+ " color_city_and_update_map(node, 'gray')\n",
+ " return (iterations, all_node_colors, None), infinity\n",
+ " \n",
+ " for s in successors:\n",
+ " color_city_and_update_map(s, 'orange')\n",
+ " s.f = max(s.path_cost + h(s), node.f)\n",
+ " \n",
+ " while True:\n",
+ " # Order by lowest f value\n",
+ " successors.sort(key=lambda x: x.f)\n",
+ " best = successors[0]\n",
+ " if best.f > flimit:\n",
+ " color_city_and_update_map(node, 'gray')\n",
+ " return (iterations, all_node_colors, None), best.f\n",
+ " \n",
+ " if len(successors) > 1:\n",
+ " alternative = successors[1].f\n",
+ " else:\n",
+ " alternative = infinity\n",
+ " \n",
+ " node_colors[node.state] = 'gray'\n",
+ " node_colors[best.state] = 'red'\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " result, best.f = RBFS(problem, best, min(flimit, alternative))\n",
+ " if result[2] is not None:\n",
+ " color_city_and_update_map(node, 'green')\n",
+ " return result, best.f\n",
+ " else:\n",
+ " color_city_and_update_map(node, 'red')\n",
+ " \n",
+ " node = Node(problem.initial)\n",
+ " node.f = h(node)\n",
+ " \n",
+ " node_colors[node.state] = 'red'\n",
+ " iterations += 1\n",
+ " all_node_colors.append(dict(node_colors))\n",
+ " result, bestf = RBFS(problem, node, infinity)\n",
+ " return result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\n",
+ "display_visual(romania_graph_data, user_input=False,\n",
+ " algorithm=recursive_best_first_search_for_vis,\n",
+ " problem=romania_problem)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "all_node_colors = []\n",
+ "# display_visual(romania_graph_data, user_input=True, algorithm=breadth_first_tree_search)\n",
+ "algorithms = { \"Breadth First Tree Search\": tree_breadth_search_for_vis,\n",
+ " \"Depth First Tree Search\": tree_depth_search_for_vis,\n",
+ " \"Breadth First Search\": breadth_first_search_graph,\n",
+ " \"Depth First Graph Search\": graph_search_for_vis,\n",
+ " \"Best First Graph Search\": best_first_graph_search_for_vis,\n",
+ " \"Uniform Cost Search\": uniform_cost_search_graph,\n",
+ " \"Depth Limited Search\": depth_limited_search_for_vis,\n",
+ " \"Iterative Deepening Search\": iterative_deepening_search_for_vis,\n",
+ " \"Greedy Best First Search\": greedy_best_first_search,\n",
+ " \"A-star Search\": astar_search_graph,\n",
+ " \"Recursive Best First Search\": recursive_best_first_search_for_vis}\n",
+ "display_visual(romania_graph_data, algorithm=algorithms, user_input=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## RECURSIVE BEST-FIRST SEARCH\n",
+ "Recursive best-first search is a simple recursive algorithm that improves upon heuristic search by reducing the memory requirement.\n",
+ "RBFS uses only linear space and it attempts to mimic the operation of standard best-first search.\n",
+ "Its structure is similar to recursive depth-first search but it doesn't continue indefinitely down the current path, the `f_limit` variable is used to keep track of the f-value of the best _alternative_ path available from any ancestor of the current node.\n",
+ "RBFS remembers the f-value of the best leaf in the forgotten subtree and can decide whether it is worth re-expanding the tree later.\n",
+ " \n",
+ "However, RBFS still suffers from excessive node regeneration.\n",
+ " \n",
+ "Let's have a look at the implementation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def recursive_best_first_search ( problem , h = None ): \n",
+ " """[Figure 3.26] Recursive best-first search (RBFS) is an \n",
+ " informative search algorithm. Like A*, it uses the heuristic \n",
+ " f(n) = g(n) + h(n) to determine the next node to expand, making \n",
+ " it both optimal and complete (iff the heuristic is consistent). \n",
+ " To reduce memory consumption, RBFS uses a depth first search \n",
+ " and only retains the best f values of its ancestors.""" \n",
+ " h = memoize ( h or problem . h , 'h' ) \n",
+ "\n",
+ " def RBFS ( problem , node , flimit ): \n",
+ " if problem . goal_test ( node . state ): \n",
+ " return node , 0 # (The second value is immaterial) \n",
+ " successors = node . expand ( problem ) \n",
+ " if len ( successors ) == 0 : \n",
+ " return None , infinity \n",
+ " for s in successors : \n",
+ " s . f = max ( s . path_cost + h ( s ), node . f ) \n",
+ " while True : \n",
+ " # Order by lowest f value \n",
+ " successors . sort ( key = lambda x : x . f ) \n",
+ " best = successors [ 0 ] \n",
+ " if best . f > flimit : \n",
+ " return None , best . f \n",
+ " if len ( successors ) > 1 : \n",
+ " alternative = successors [ 1 ] . f \n",
+ " else : \n",
+ " alternative = infinity \n",
+ " result , best . f = RBFS ( problem , best , min ( flimit , alternative )) \n",
+ " if result is not None : \n",
+ " return result , best . f \n",
+ "\n",
+ " node = Node ( problem . initial ) \n",
+ " node . f = h ( node ) \n",
+ " result , bestf = RBFS ( problem , node , infinity ) \n",
+ " return result \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(recursive_best_first_search)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is how `recursive_best_first_search` can solve the `romania_problem`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['Sibiu', 'Rimnicu', 'Pitesti', 'Bucharest']"
+ ]
+ },
+ "execution_count": 37,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "recursive_best_first_search(romania_problem).solution()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`recursive_best_first_search` can be used to solve the 8 puzzle problem too, as discussed later."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['UP', 'LEFT', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'RIGHT', 'DOWN']"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "puzzle = EightPuzzle((2, 4, 3, 1, 5, 6, 7, 8, 0))\n",
+ "assert puzzle.check_solvability((2, 4, 3, 1, 5, 6, 7, 8, 0))\n",
+ "recursive_best_first_search(puzzle).solution()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## A* HEURISTICS\n",
+ "\n",
+ "Different heuristics provide different efficiency in solving A* problems which are generally defined by the number of explored nodes as well as the branching factor. With the classic 8 puzzle we can show the efficiency of different heuristics through the number of explored nodes.\n",
+ "\n",
+ "### 8 Puzzle Problem\n",
+ "\n",
+ "The *8 Puzzle Problem* consists of a 3x3 tray in which the goal is to get the initial configuration to the goal state by shifting the numbered tiles into the blank space.\n",
+ "\n",
+ "example:- \n",
+ "\n",
+ " Initial State Goal State\n",
+ " | 7 | 2 | 4 | | 1 | 2 | 3 |\n",
+ " | 5 | 0 | 6 | | 4 | 5 | 6 |\n",
+ " | 8 | 3 | 1 | | 7 | 8 | 0 |\n",
+ " \n",
+ "We have a total of 9 blank tiles giving us a total of 9! initial configuration but not all of these are solvable. The solvability of a configuration can be checked by calculating the Inversion Permutation. If the total Inversion Permutation is even then the initial configuration is solvable else the initial configuration is not solvable which means that only 9!/2 initial states lead to a solution.\n",
+ " \n",
+ "Let's define our goal state."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "goal = [1, 2, 3, 4, 5, 6, 7, 8, 0]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Heuristics :-\n",
+ "\n",
+ "1) Manhattan Distance:- For the 8 puzzle problem Manhattan distance is defined as the distance of a tile from its goal state( for the tile numbered '1' in the initial configuration Manhattan distance is 4 \"2 for left and 2 for upward displacement\").\n",
+ "\n",
+ "2) No. of Misplaced Tiles:- The heuristic calculates the number of misplaced tiles between the current state and goal state.\n",
+ "\n",
+ "3) Sqrt of Manhattan Distance:- It calculates the square root of Manhattan distance.\n",
+ "\n",
+ "4) Max Heuristic:- It assign the score as the maximum between \"Manhattan Distance\" and \"No. of Misplaced Tiles\"."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Heuristics for 8 Puzzle Problem\n",
+ "import math\n",
+ "\n",
+ "def linear(node):\n",
+ " return sum([1 if node.state[i] != goal[i] else 0 for i in range(8)])\n",
+ "\n",
+ "def manhattan(node):\n",
+ " state = node.state\n",
+ " index_goal = {0:[2,2], 1:[0,0], 2:[0,1], 3:[0,2], 4:[1,0], 5:[1,1], 6:[1,2], 7:[2,0], 8:[2,1]}\n",
+ " index_state = {}\n",
+ " index = [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [2,0], [2,1], [2,2]]\n",
+ " x, y = 0, 0\n",
+ " \n",
+ " for i in range(len(state)):\n",
+ " index_state[state[i]] = index[i]\n",
+ " \n",
+ " mhd = 0\n",
+ " \n",
+ " for i in range(8):\n",
+ " for j in range(2):\n",
+ " mhd = abs(index_goal[i][j] - index_state[i][j]) + mhd\n",
+ " \n",
+ " return mhd\n",
+ "\n",
+ "def sqrt_manhattan(node):\n",
+ " state = node.state\n",
+ " index_goal = {0:[2,2], 1:[0,0], 2:[0,1], 3:[0,2], 4:[1,0], 5:[1,1], 6:[1,2], 7:[2,0], 8:[2,1]}\n",
+ " index_state = {}\n",
+ " index = [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [2,0], [2,1], [2,2]]\n",
+ " x, y = 0, 0\n",
+ " \n",
+ " for i in range(len(state)):\n",
+ " index_state[state[i]] = index[i]\n",
+ " \n",
+ " mhd = 0\n",
+ " \n",
+ " for i in range(8):\n",
+ " for j in range(2):\n",
+ " mhd = (index_goal[i][j] - index_state[i][j])**2 + mhd\n",
+ " \n",
+ " return math.sqrt(mhd)\n",
+ "\n",
+ "def max_heuristic(node):\n",
+ " score1 = manhattan(node)\n",
+ " score2 = linear(node)\n",
+ " return max(score1, score2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can solve the puzzle using the `astar_search` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "execution_count": 41,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Solving the puzzle \n",
+ "puzzle = EightPuzzle((2, 4, 3, 1, 5, 6, 7, 8, 0))\n",
+ "puzzle.check_solvability((2, 4, 3, 1, 5, 6, 7, 8, 0)) # checks whether the initialized configuration is solvable or not"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This case is solvable, let's proceed.\n",
+ " \n",
+ "The default heuristic function returns the number of misplaced tiles."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['UP', 'LEFT', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'RIGHT', 'DOWN']"
+ ]
+ },
+ "execution_count": 42,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "astar_search(puzzle).solution()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In the following cells, we use different heuristic functions.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['UP', 'LEFT', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'RIGHT', 'DOWN']"
+ ]
+ },
+ "execution_count": 43,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "astar_search(puzzle, linear).solution()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['LEFT', 'UP', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'DOWN', 'RIGHT']"
+ ]
+ },
+ "execution_count": 44,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "astar_search(puzzle, manhattan).solution()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['LEFT', 'UP', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'DOWN', 'RIGHT']"
+ ]
+ },
+ "execution_count": 45,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "astar_search(puzzle, sqrt_manhattan).solution()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['LEFT', 'UP', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'DOWN', 'RIGHT']"
+ ]
+ },
+ "execution_count": 46,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "astar_search(puzzle, max_heuristic).solution()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "And here's how `recursive_best_first_search` can be used to solve this problem too."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['LEFT', 'UP', 'UP', 'LEFT', 'DOWN', 'RIGHT', 'DOWN', 'UP', 'DOWN', 'RIGHT']"
+ ]
+ },
+ "execution_count": 47,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "recursive_best_first_search(puzzle, manhattan).solution()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Even though all the heuristic functions give the same solution, the difference lies in the computation time.\n",
+ " \n",
+ "This might make all the difference in a scenario where high computational efficiency is required.\n",
+ " \n",
+ "Let's define a few puzzle states and time `astar_search` for every heuristic function.\n",
+ "We will use the %%timeit magic for this."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "puzzle_1 = EightPuzzle((2, 4, 3, 1, 5, 6, 7, 8, 0))\n",
+ "puzzle_2 = EightPuzzle((1, 2, 3, 4, 5, 6, 0, 7, 8))\n",
+ "puzzle_3 = EightPuzzle((1, 2, 3, 4, 5, 7, 8, 6, 0))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The default heuristic function is the same as the `linear` heuristic function, but we'll still check both."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3.24 ms ± 190 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "astar_search(puzzle_1)\n",
+ "astar_search(puzzle_2)\n",
+ "astar_search(puzzle_3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3.68 ms ± 368 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "astar_search(puzzle_1, linear)\n",
+ "astar_search(puzzle_2, linear)\n",
+ "astar_search(puzzle_3, linear)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3.12 ms ± 88.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "astar_search(puzzle_1, manhattan)\n",
+ "astar_search(puzzle_2, manhattan)\n",
+ "astar_search(puzzle_3, manhattan)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "22.7 ms ± 1.69 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "astar_search(puzzle_1, sqrt_manhattan)\n",
+ "astar_search(puzzle_2, sqrt_manhattan)\n",
+ "astar_search(puzzle_3, sqrt_manhattan)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "3.91 ms ± 434 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "astar_search(puzzle_1, max_heuristic)\n",
+ "astar_search(puzzle_2, max_heuristic)\n",
+ "astar_search(puzzle_3, max_heuristic)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can infer that the `manhattan` heuristic function works the fastest.\n",
+ " \n",
+ "`sqrt_manhattan` has an extra `sqrt` operation which makes it quite a lot slower than the others.\n",
+ " \n",
+ "`max_heuristic` should have been a bit slower as it calls two functions, but in this case, those values were already calculated which saved some time.\n",
+ "Feel free to play around with these functions."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For comparison, this is how RBFS performs on this problem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "140 ms ± 9.89 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "recursive_best_first_search(puzzle_1, linear)\n",
+ "recursive_best_first_search(puzzle_2, linear)\n",
+ "recursive_best_first_search(puzzle_3, linear)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is quite a lot slower than `astar_search` as we can see."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## HILL CLIMBING\n",
+ "\n",
+ "Hill Climbing is a heuristic search used for optimization problems.\n",
+ "Given a large set of inputs and a good heuristic function, it tries to find a sufficiently good solution to the problem. \n",
+ "This solution may or may not be the global optimum.\n",
+ "The algorithm is a variant of generate and test algorithm. \n",
+ " \n",
+ "As a whole, the algorithm works as follows:\n",
+ "- Evaluate the initial state.\n",
+ "- If it is equal to the goal state, return.\n",
+ "- Find a neighboring state (one which is heuristically similar to the current state)\n",
+ "- Evaluate this state. If it is closer to the goal state than before, replace the initial state with this state and repeat these steps.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def hill_climbing ( problem ): \n",
+ " """From the initial node, keep choosing the neighbor with highest value, \n",
+ " stopping when no neighbor is better. [Figure 4.2]""" \n",
+ " current = Node ( problem . initial ) \n",
+ " while True : \n",
+ " neighbors = current . expand ( problem ) \n",
+ " if not neighbors : \n",
+ " break \n",
+ " neighbor = argmax_random_tie ( neighbors , \n",
+ " key = lambda node : problem . value ( node . state )) \n",
+ " if problem . value ( neighbor . state ) <= problem . value ( current . state ): \n",
+ " break \n",
+ " current = neighbor \n",
+ " return current . state \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(hill_climbing)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will find an approximate solution to the traveling salespersons problem using this algorithm.\n",
+ " \n",
+ "We need to define a class for this problem.\n",
+ " \n",
+ "`Problem` will be used as a base class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class TSP_problem(Problem):\n",
+ "\n",
+ " \"\"\" subclass of Problem to define various functions \"\"\"\n",
+ "\n",
+ " def two_opt(self, state):\n",
+ " \"\"\" Neighbour generating function for Traveling Salesman Problem \"\"\"\n",
+ " neighbour_state = state[:]\n",
+ " left = random.randint(0, len(neighbour_state) - 1)\n",
+ " right = random.randint(0, len(neighbour_state) - 1)\n",
+ " if left > right:\n",
+ " left, right = right, left\n",
+ " neighbour_state[left: right + 1] = reversed(neighbour_state[left: right + 1])\n",
+ " return neighbour_state\n",
+ "\n",
+ " def actions(self, state):\n",
+ " \"\"\" action that can be excuted in given state \"\"\"\n",
+ " return [self.two_opt]\n",
+ "\n",
+ " def result(self, state, action):\n",
+ " \"\"\" result after applying the given action on the given state \"\"\"\n",
+ " return action(state)\n",
+ "\n",
+ " def path_cost(self, c, state1, action, state2):\n",
+ " \"\"\" total distance for the Traveling Salesman to be covered if in state2 \"\"\"\n",
+ " cost = 0\n",
+ " for i in range(len(state2) - 1):\n",
+ " cost += distances[state2[i]][state2[i + 1]]\n",
+ " cost += distances[state2[0]][state2[-1]]\n",
+ " return cost\n",
+ "\n",
+ " def value(self, state):\n",
+ " \"\"\" value of path cost given negative for the given state \"\"\"\n",
+ " return -1 * self.path_cost(None, None, None, state)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will use cities from the Romania map as our cities for this problem.\n",
+ " \n",
+ "A list of all cities and a dictionary storing distances between them will be populated."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['Arad', 'Bucharest', 'Craiova', 'Drobeta', 'Eforie', 'Fagaras', 'Giurgiu', 'Hirsova', 'Iasi', 'Lugoj', 'Mehadia', 'Neamt', 'Oradea', 'Pitesti', 'Rimnicu', 'Sibiu', 'Timisoara', 'Urziceni', 'Vaslui', 'Zerind']\n"
+ ]
+ }
+ ],
+ "source": [
+ "distances = {}\n",
+ "all_cities = []\n",
+ "\n",
+ "for city in romania_map.locations.keys():\n",
+ " distances[city] = {}\n",
+ " all_cities.append(city)\n",
+ " \n",
+ "all_cities.sort()\n",
+ "print(all_cities)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we need to populate the individual lists inside the dictionary with the manhattan distance between the cities."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "for name_1, coordinates_1 in romania_map.locations.items():\n",
+ " for name_2, coordinates_2 in romania_map.locations.items():\n",
+ " distances[name_1][name_2] = np.linalg.norm(\n",
+ " [coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]])\n",
+ " distances[name_2][name_1] = np.linalg.norm(\n",
+ " [coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The way neighbours are chosen currently isn't suitable for the travelling salespersons problem.\n",
+ "We need a neighboring state that is similar in total path distance to the current state.\n",
+ " \n",
+ "We need to change the function that finds neighbors."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 59,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def hill_climbing(problem):\n",
+ " \n",
+ " \"\"\"From the initial node, keep choosing the neighbor with highest value,\n",
+ " stopping when no neighbor is better. [Figure 4.2]\"\"\"\n",
+ " \n",
+ " def find_neighbors(state, number_of_neighbors=100):\n",
+ " \"\"\" finds neighbors using two_opt method \"\"\"\n",
+ " \n",
+ " neighbors = []\n",
+ " \n",
+ " for i in range(number_of_neighbors):\n",
+ " new_state = problem.two_opt(state)\n",
+ " neighbors.append(Node(new_state))\n",
+ " state = new_state\n",
+ " \n",
+ " return neighbors\n",
+ "\n",
+ " # as this is a stochastic algorithm, we will set a cap on the number of iterations\n",
+ " iterations = 10000\n",
+ " \n",
+ " current = Node(problem.initial)\n",
+ " while iterations:\n",
+ " neighbors = find_neighbors(current.state)\n",
+ " if not neighbors:\n",
+ " break\n",
+ " neighbor = argmax_random_tie(neighbors,\n",
+ " key=lambda node: problem.value(node.state))\n",
+ " if problem.value(neighbor.state) <= problem.value(current.state):\n",
+ " \"\"\"Note that it is based on negative path cost method\"\"\"\n",
+ " current.state = neighbor.state\n",
+ " iterations -= 1\n",
+ " \n",
+ " return current.state"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "An instance of the TSP_problem class will be created."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 60,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tsp = TSP_problem(all_cities)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can now generate an approximate solution to the problem by calling `hill_climbing`.\n",
+ "The results will vary a bit each time you run it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['Arad',\n",
+ " 'Timisoara',\n",
+ " 'Lugoj',\n",
+ " 'Mehadia',\n",
+ " 'Drobeta',\n",
+ " 'Craiova',\n",
+ " 'Pitesti',\n",
+ " 'Giurgiu',\n",
+ " 'Bucharest',\n",
+ " 'Urziceni',\n",
+ " 'Eforie',\n",
+ " 'Hirsova',\n",
+ " 'Vaslui',\n",
+ " 'Iasi',\n",
+ " 'Neamt',\n",
+ " 'Fagaras',\n",
+ " 'Rimnicu',\n",
+ " 'Sibiu',\n",
+ " 'Oradea',\n",
+ " 'Zerind']"
+ ]
+ },
+ "execution_count": 50,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "hill_climbing(tsp)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The solution looks like this.\n",
+ "It is not difficult to see why this might be a good solution.\n",
+ " \n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## SIMULATED ANNEALING\n",
+ "\n",
+ "The intuition behind Hill Climbing was developed from the metaphor of climbing up the graph of a function to find its peak. \n",
+ "There is a fundamental problem in the implementation of the algorithm however.\n",
+ "To find the highest hill, we take one step at a time, always uphill, hoping to find the highest point, \n",
+ "but if we are unlucky to start from the shoulder of the second-highest hill, there is no way we can find the highest one. \n",
+ "The algorithm will always converge to the local optimum.\n",
+ "Hill Climbing is also bad at dealing with functions that flatline in certain regions.\n",
+ "If all neighboring states have the same value, we cannot find the global optimum using this algorithm.\n",
+ " \n",
+ " \n",
+ "Let's now look at an algorithm that can deal with these situations.\n",
+ " \n",
+ "Simulated Annealing is quite similar to Hill Climbing, \n",
+ "but instead of picking the _best_ move every iteration, it picks a _random_ move. \n",
+ "If this random move brings us closer to the global optimum, it will be accepted, \n",
+ "but if it doesn't, the algorithm may accept or reject the move based on a probability dictated by the _temperature_. \n",
+ "When the `temperature` is high, the algorithm is more likely to accept a random move even if it is bad.\n",
+ "At low temperatures, only good moves are accepted, with the occasional exception.\n",
+ "This allows exploration of the state space and prevents the algorithm from getting stuck at the local optimum.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 62,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def simulated_annealing ( problem , schedule = exp_schedule ()): \n",
+ " """[Figure 4.5] CAUTION: This differs from the pseudocode as it \n",
+ " returns a state instead of a Node.""" \n",
+ " current = Node ( problem . initial ) \n",
+ " for t in range ( sys . maxsize ): \n",
+ " T = schedule ( t ) \n",
+ " if T == 0 : \n",
+ " return current . state \n",
+ " neighbors = current . expand ( problem ) \n",
+ " if not neighbors : \n",
+ " return current . state \n",
+ " next_choice = random . choice ( neighbors ) \n",
+ " delta_e = problem . value ( next_choice . state ) - problem . value ( current . state ) \n",
+ " if delta_e > 0 or probability ( math . exp ( delta_e / T )): \n",
+ " current = next_choice \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(simulated_annealing)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The temperature is gradually decreased over the course of the iteration.\n",
+ "This is done by a scheduling routine.\n",
+ "The current implementation uses exponential decay of temperature, but we can use a different scheduling routine instead.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 63,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def exp_schedule ( k = 20 , lam = 0.005 , limit = 100 ): \n",
+ " """One possible schedule function for simulated annealing""" \n",
+ " return lambda t : ( k * math . exp ( - lam * t ) if t < limit else 0 ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(exp_schedule)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we'll define a peak-finding problem and try to solve it using Simulated Annealing.\n",
+ "Let's define the grid and the initial state first.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 64,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "initial = (0, 0)\n",
+ "grid = [[3, 7, 2, 8], [5, 2, 9, 1], [5, 3, 3, 1]]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We want to allow only four directions, namely `N`, `S`, `E` and `W`.\n",
+ "Let's use the predefined `directions4` dictionary."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 65,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'E': (1, 0), 'N': (0, 1), 'S': (0, -1), 'W': (-1, 0)}"
+ ]
+ },
+ "execution_count": 65,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "directions4"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Define a problem with these parameters."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 66,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "problem = PeakFindingProblem(initial, grid, directions4)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We'll run `simulated_annealing` a few times and store the solutions in a set."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 67,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "solutions = {problem.value(simulated_annealing(problem)) for i in range(100)}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 68,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "9"
+ ]
+ },
+ "execution_count": 68,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "max(solutions)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Hence, the maximum value is 9."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's find the peak of a two-dimensional gaussian distribution.\n",
+ "We'll use the `gaussian_kernel` function from notebook.py to get the distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 69,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "grid = gaussian_kernel()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's use the `heatmap` function from notebook.py to plot this."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 70,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeAAAAHwCAYAAAB+ArwOAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzsvW3ofW2b13Wse+//dd3JqFP4ImYaNUmxEHqazIhqyKIaCpUgSwos4oYsdMIeyBcW9CYIhMAI7hypIErC6IGoQAhMCPMBfWETIY4x04hmMYyS13Vfe/93L/Y+9z7Wsb7H0/mw1vr9fuuA//+31vlwnOd+Wp/1Pc6HNd1uNzrssMMOO+yww9a1b23dgcMOO+ywww77iHYA+LDDDjvssMM2sAPAhx122GGHHbaBHQA+7LDDDjvssA3sAPBhhx122GGHbWAHgA877LDDDjtsAzsAfNhhhx122GEb2AHgww5byaZp+rPTNP0DIu03T9P0hzr4vk3T9De0+jnssMPWswPAhx122GGHHbaBHQA+7LCd2DRNPzBN0++fpun/nqbpJ6dp+q0s71dP0/S/TNP0s9M0/blpmn73NE1fPPL+4KPYn5ym6S9P0/Qbp2n6kWmafnqapn9tmqa/8Kjz66dp+tFpmv6PaZr+32mafkfE/yP/Nk3Tb52m6c9M0/QXp2n6d6dpOq4fhx3WYMcP6LDDdmAPmP23RPQniegHiejXEtGPTdP0Dz2KXInoXyaiX0REf9cj/7cQEd1ut7/3UeZvvt1u33e73X7f4/yvJaJvP/z9TiL6D4nonyaiv52I/h4i+p3TNP0yzz+z30BEP0xEfxsR/Toi+ud6vPbDDvuoNh17QR922Do2TdOfpTvgLiz5CyL640T024nov7jdbr+Ylf83iOhX3G63fxb4+jEi+vtut9tveJzfiOiX3263P/04/xEi+u+J6Ptut9t1mqafT0Q/R0S/5na7/eFHmT9GRP/27Xb7r4L+/5Hb7fY/PM5/CxH947fb7dc2vCWHHfah7bx1Bw477IPZr7/dbn+gnEzT9JuJ6J8nol9CRD8wTdPPsrInIvqfH+V+BRH9Lror0J9H99/uH3Pa+n9ut9v1cfxXHn//PMv/K0T0fQn/P8WO/08i+gGn/cMOO8ywIwR92GH7sJ8iop+83W7fz/79/Nvt9qOP/P+AiP53uqvcX0BEv4OIpo7tR/z/EDv+xUT0Mx3bP+ywD2cHgA87bB/2vxLRz03T9K9P0/RXTdN0mqbpV03T9Hc88ksI+S9P0/QriehfEPX/PBH9Mqo3zz8R0b86TdNfPU3TDxHRbyOi3wfKHHbYYUE7AHzYYTuwR6j4HyOiv4WIfpKI/iIR/R4i+oWPIv8KEf0mIvpLdJ9MJeH3bxHRf/yYxfxPVHTB809E9F/TPSz9J4jovyOiH69o57DDDnvYMQnrsMMOc01O8jrssMPa7VDAhx122GGHHbaBHQA+7LDDDjvssA3sCEEfdthhhx122AZ2KODDDjvssMMO28CGbMQxTT/vRvT9I1wfNrPoMtCjXFu5FhvVRovfkVGv0RG1jP9I2ai/3uWiZbdq97A2+1m63f4/90c6aCes7yei74xxfRizT8Fy0Y854i/zlXkv/cv6rW1jTZ/fdPDRy+/FL1LVRtRvxF9PXxmfvV9rxudh9fbdUKkjBH3YB7URYBzdxqeOPnv6arW3sCNuz5vEjPW+iT1sT3YA+N3be7lwbAmLmotbz/6OhGVv37W+RgDkgNJh+7YDwG/W9qJe9mQjYL4lfNdUqZ+oX3uj+/1ebhbfws3nYSPtAPBh9D4uQHuAbw/w9ARhax96+MnY3ucX7N0+0mt9H3YA+E3anseFtlInvX29h/eu1T4ahHv62vNv9LC92PHpf3jb6kIR8dezb6OVb63tDbrSZP9qZtAWH9G6Z4rP6v2U8Lumr97+evftsD3YoYDfnH2Uu/63AN/aUO0ewsy11tLvTN0z9R+i6PVd2eJmlGi7oZjDRtkB4A9tW4R3e14Ee4M8c/HKvndvGbrIWl5P9oZojzdsW/Qrakdg863Y8Um9KdviIhOxPfoaceEbDRvPRvxcs5tgIOOvMRomHRGWjvqM+or07SP4OmyUHQB+d9Y7JNvD1x7hOxK8Pd7ftX6aWju1YM7COFO+9LUHiCO+PgI4DwhvaQeA34ytPTbl+dqjGt9yacqeNqDoYahfWSjXwrgniFvVcG9V3dNXDz+HbWnHGPCbsL0q1h5+1la9UV/R97x2HDQz5rwXa+nziJuZPX6/IvYWf4OHjbC3dgU4TLW1LiJrXhz3eGHMlKvpQ9ayfekRbqxVxxlF3FMN9wxJ9/BTfO1NnR+h6LXtAPDubU938B+1L5lymbZ7ttfTb82FmL/enjCOlusJ0LXCv2v6idgB4bXtAPCubS3IrOWjh581wTtq/XBtG2uZ1qfMjGVu0bFRr421lOzaqnoNCEfHgw8Ir2kHgHdre4HvXvrRw0dvtbvmjlkZy6rRqKH+R2f2couAzfIdKbM3EO9BmfcMjx/Www4A79L2EqLdAzR7+OipdkevG65tq6ePDLTla8wCuReMPYhabfUC8V6U+TE7+q3YAeDd2VozJN8CfNfow5ZLl2r8r2EtS5CyS496qbJeSvStKNke65db+nBYD9vTr/6wXSjfPYSsR/dhi5nTWb97sxooZ9RxRqlavlpV5Bph6V6KemT9iI/DWu2tXg3eob0FcO25/lrQXWNiVrad3lY72YooHlK22ukBYy/fa2MPID4g/N7tAPAubGv4fmTw9oTuXidlZc3qV3RdLrcIRDXfPcZwW0AYBfkoEO5F0R8QHmEHgDe30fDdqu6Wbe8hjJ7117O9GqsZ4+UWDS9bbfVSrVr9VphaMIvWHdF2qb9lSPuwGjsAvKntWTlupZhHqd29LLeq9TvavD5kx3uJYiFmzbenjnvBeBSIvfojwtIHhN+a7eGX/0Ftr/DdAtqj1O4eZllnfPVqK2s1a3m5tYSYpe8aBdkyntsC0x6h7RqYjgK4126k/mEZOwC8ibVeSLcIOe+tzb2GzjN+Wnz3tJqwMree63pbx3xrodcKxJa6veuNrBupf1jUDgCvbnsM374H8G451h3xkfUXsZqfb8smG9yi4WWtzR4TsGpVsVVvlNKuvTHYQkV7dSP1D4vYAeBV7S3Bd+0w9Qi1u0WoPeOn1m+LZduIhpeLeSDV/GbGfL36Wt0R4em9g3gUwEv9A8ItdgB4NdvbmG1NvTXbWrPvLe1F6md99WgnYiPGfqOTsTJART5qFO5IVbwmUNcMZ7eOKR9m2QHgVWxP8N0DeHuHmNe8YfDqRn3U+OxtmTajypbIhyny10PhWvV61BnhzwPxHuAdqau1eZhlB4CH24gL/VpAXAt6W/e5pZ5XN+OnxXcPy+xixc2DKfLdEnK26mogrAF4RBVnAFmjpPcC71L3CEn3tAPAQ61F1dTUXQOkWyverW8WInWjPrL+RlvNUiOiPrObPR814KiFWqaNEYDspWxrQ9KW1dY7DNkefvXv1EaEndcA6Rrg3ePrs+p49by6UR+1fntaZnZzMUudIr+eQo4qXKteiyquCSe3KOLsuPIa4NbqWPWs/h2G7ADwEBsxE3h0nTVAvdVNwl5memf9jPDhXRw9f7Vjwd44cA1YZb01YayV7xlm3hLcVp1IPdS/w6QdAO5uIy7qI4EzGmZ7gu6an02kbkvZWmsFdstYcGYcWKvbc/x3NBBHg3VLcJd6x5hwix0A7mq9L/B7gtRI8O7pRqK2jlUvmp9tb4RFZjBz09QfKtMr9Nxbufb2vQWIe4wN9wxje20dRnQAuKPtEb5rg20vr2Xr8eJIvue/1bdnNWO9xbJjvlb4uWa2c0Qd1wIzC1cExWx4ugbEa0O7to5V77ADwM229XjvHsqOAv3eAe3lWT6zfnpbSyi6x5hvFq6yjgfNPZTl5SOqOAPiHmDtPZas1bHqfWw7ADzUat7eNYHaA1p77Ve0bG/oRj7zDPzW/Ilm1gG37m4V3W2qx7hujXptVZ2ZceJI/UzZTL80v5bVwvSAsLQDwE22Rth5axi1Kt61+jRyvHjUmPDeQtCZ8LPVh8iYr6WQR43r1ijdNULZa6rhHmW18qXOEY6O2gHgKusddt5a9baAd8v+rKnqNR9Wea9exkfUvFBgbXsWVItF4JqB8jdKGvJjQbYGxtlytWVbQDxCpfeazEVKHavex7MQgKdp+oeJ6N8johMR/Z7b7fbvDO3Vrm1v8N0TeHsDei049wpNt0zaivjIWA8/kclWRO2KNwLTUj4C5B4wbinHy44E8dbA1vx6dbx6H8fcX+k0TSci+veJ6B8kop8moj8yTdN/c7vd/rfRndufvYWQc225NcA7ur9bh9Ct8l49aVOw3Ai7Pf56ffXC0d8oPrw1vRl1uyZkewF2KxCvGb7mdQ4Iaxa5GvxqIvrTt9vtzxARTdP0nxPRryOiDwbg2hDhiIv9aODtBby9yrT0wSprlbfqEG0LWM+8vnmA7gFYrfxoGFvA2grsGjhbVG7vkPQB4RqLAPgHiein2PlPE9HfKQtN0/QdIvrO/ewXdujaW7IeYee9qMi1Q8Mj4dzSh0wbVvliewZu1tBrubHjFsBq5WthHFGFPRVmDdiivmrrjYAwsgPCWYsA2Pu13RNut+8S0XeJiKbpBxb5b9tqLrp7CiX3Ur2jYVnTXgS6a4xXF3tPoM2YdZkYpXgj5SL1sr6zk7ZqoV6rhrdSzFpZr45X7/1aBMA/TUQ/xM7/OiL6mTHd2ZvVjueNuLiPAtZIEI4KM4+GbrTcR4Vt1DQoZ4CMynrlamCcAeiIEHYWqDVquFUxo35Kf1pZy7fVl/dtEQD/ESL65dM0/fVE9H8R0T9JRL9paK92YbWhxjXguxa01oRzzWscOWa8E+BGfqE9zVu91GzyPURALh2pBW0NjCNw7FWGl/P8WCDupYazcG1VwweEi7k/79vtdpmm6V8iov+R7suQfu/tdvtTw3u2qe0FvnsG16gwc1axj1Trg4EbhetaEL4E2uoO6IhKbgFtBIge/DKqOBtS9mAYUa1bqOEDwj1sut36D9fex4C/093vOjYaviNDziNUb482e/e7VwRAlhkAXA9oe4BwBqpW2SHqGV2fZEPyYo064pWR+dnyNf0a4WPEexH1o5XTylrlrTpvwb5Lt9vPuBeUtQNc79S8sWLL9qIca3z2UMW9FW9NnzqDV/tV9YJxbT3tWmfVl3W04Vsvr9r4ZyMndWXDwBl1GlXFnprtoYgz+V75Gp9RP1o5yz62Ej4APLMa9avVGTVJqQbOa6vetcPM2fyOwEUf4R4BHAkv9zStvSYoSxhL0Fpjxt5s514gtvqU9VGbXxPGHg1hC6YfF8IHgJ/WC749w857BtlotbtD6PaA7QgIR+pkhckIQ1Cu7pP8PL2xXg+MXn62PO9TC2hHquEeCh35yZSzykfqvW07ALyLMd/RIec1wdvzJmFj6EaBu6UK3vIXbKnc2n51AbLVeAbGvVVxTf5oNRyBZ01Y2ypHoKxW3vL/9u2DA3iP8PX89FS9ewFvz340QFe62juAa2C9lQrW1K/3Gqr6q4WqM8q3RRX3BHEvtWxBtMdYNCqjldPKWuWtOm/XPjCA14bv2iHnUXBtAfqodgZCtyeUrXQvr6UsqtcDxFsAPd2epow95ZsBNVEM3DUg7gnp3iFu9GU6IJyxDwrgtw7fXmHcEXle/zaGbitwR6jiaH60TMSi8FwTslIRR85TFoWx7FQG1DVQj6jabJ70q6nhVtUu+xItg/rC7WNA+IMCuMZaws5emZqwdKTs1nkj2hgA3hrI7kUJR8uja1k0/IvKaXnRUHMNbLXz6jC1taypVnFaoNOUopdX276EtBWSbrXIndoWIZN92wcEcI36XXPMtzak20NZ9gDmiDBzB+j2Vr61QLbSvbyacqh89Bq41+ulNXac6m/NWHGtIq4db65Rw9Gx3p5jzLVltHKoT7IO79/btA8G4PcC3x6gXBO8NX0iqgJvFLpbAbgVvj1+sU3KkfnYI5ylhfsYCU97YWMrLwJtD7ZRNZyBZ2bctzYcfcyO1uwDAViDbya8q5VvhW8GzCMB2wrejdRuDXRbABsBa2/4jvilRkBslUHXxWjaGlYdpi7fQUsVy4YyalaDag3oe8A7q3Zb81EZq6xV3qqzf/sgAI6My0bqRODrgVeWqR1T7QnYKBwj4K250egI3R4AXlv9tqriVouCOArhNS2ylAnVCVmNKs4o3YiC9VRvzbh1NCR9QHi0fQAArxl2zqheL78Gvj0BOwq8ndRuDWjXAnArkL08yyQnaupq9aKwjZSTZXqAfBUYt6pimY7UJy+XAXR0bNhTwzWTvGryURmrrFXeqrNfe+cA7gXflnJae3uCb2u4uaYfSfi2AHbPELbSvTzPamGk1es1w1mmyXYzeZk6XnrINBAT5dWpB+IsVDPpVl60nMyryUdlPo69YwD3hK8HT69MLXx7hpZHq94B4B0N3VoA18B2tAK2ymdmPVvl0XUykuZdX0defzVVbKWHLKOItdB0RIG2wFZri4w6e4Lw+1fB7xTAa4adZblM2NnKy4Z6W9JHg7ez2l0bwHtRw5kyXvm1J15lICvV6lbiKNSup4iRQ5SuqWFNMUfSS1u1Y8l7gTAFy3p19mfvEMAfBb490rU+bADeHiB9CzDW0qx0L6/GuL/smG8Uwl77vceAW80LaZuW3WWLj/FqMK0Bce9QdVQxy7yafFTGK0tK+bdh7wzAW8LXy4sq2pGQzaher552s9FB7W4N4FEwzqRF8nqZBWPtGofSPajWQjdzI5A1b+JWGsREfnhaLlWSaR6ILYBG4ZxJ19oq6aTkyXqRfFSm1vYfjn5HAF4Tvl6ZteHbo6wHWa+NSvD2BOwIAG+hhL28SL5lNeFnK70XdHneSOj2sKbwNAJqRO16ijkD55p0DcKZvEg+KqOV08p6dfZh7wTAveBbW64Vvi3jtBmlisrW1OFpg8A7AsCjlHAmz0qz0r28jFmKV5bpMd5rnfcCasYPUr1a+Nmr3wXEBNI0oHoKtxbO0fQDwj3tHQA4CtKIWfDMlEF50bfaK9dTDa8I317A3KNKrjnPpGXyM5YBTcZP5rz3caSs1q9W/65JEKOJUtbkKQ+EHkDXgDC37N1VFJTvB8JvHMAefLWXlx3P1cpYoemoKm4N/fL0DJCzdRrB2wu2o6EdSbeOa861tEheiyH4oPyI4uXl0HlEAWePkWWv+aWf2RuRZhDXqmEUkiaR70GYgmWt9EiZTB4ZZShQzvLp1dnO3jCAR8PXG/ftCd8MZHl6TVqLn5XBuyWUa457nHvp2TLIasLOMl1Law01ZwBaA9uoRRWzlu/2ywpLR8PL2gSt7Ljw3iGcKaeV9epsY28UwDVjvlq9teAr69TAtwXILeAlCsG3Fwh7ALgVujXAzeRl0iJ5GeN+0HUqC+IaCEfgXANwrX6t9VDHpiEQF7NmS2tqOApuEn7eAoSRvW0Iv0EA1475rg1fq04LUL36KM3y7fkYDN63BGWtnlU+cq6lWenZcp7qReU0UGegK89bgKz5j9TpabXq2DQ0PmzBEkE1qoZbVbNMj0KYWwTC0rQybxfCbwzAbyXsHOnL2vC1yjeEm0cAckSel5bJjx57eZm0SJ5lqF6L+o2cR+bmeODN5O/B0NhwWhFbajgzNoxg2JJGRnpWRcu8SD4q49m+IfyGANwTvi3lvPZknjb+itK8q3EWpgPhuxZUe/pqhW0rgCPnWlomf5TVhGO9ctqxVdZLs/ojy1u+vDxKlJdpphUI8waisCQjzyrfA8KeP5mO6tdCuBam20L4jQC4N3wjL1uWsUAZGfcdqXIj8I2EoRvBuzZIe/qO1o0eZ/KsNCu91bjf6HgvSouo4Oyx5l+DWDRNWgiIScvcDKimhaSlAwnLrBL2Qs8ZCEdDzt6b3hvCXnvbQfgNALgWvtHykfD03uBr+YuAtlH17gWuvfx5aZn8bB4619Iy+Z5pwJV5HnS9857gRTYCoBmzVH9EgZc01crvkhfWIEqgjDVmHAWuBmFkUdBmfEbLWOX2CeGdAzgbFvbqRl5uBNAobwR8o3Vrxnt3Dt6asiP65aVZx5k8K81KrzUNuDzPAnHkvOa4B3gjUO4N7giIPSCbxseGJUQJpCEIk1K/FsKobSIdwla4mStxmUesTGTiVguEeR/G244BHIGv1v0ofGU5D74aZDPwRf6yoEXteaCtUL29gZnxs1adXmnWcSYvku7leWapX56PID0KvLI/Wlo2T1oNeC2AamVr/blqGIWkEXRlGQqUpYo0Av60fJku86RtAWGrbn/bKYBbws57h2+ryl0ZvqNhl60zCtAt5aPHNedeeo15F30JVZS2NniRZYBbA17NeoFYlg31s2dIOqqkyUkj4E/Ll+nSMoDOlMmWXwfCOwNwi+rV6kfg6+VrkNXqZMLOkXIWUKPwrQQvShsJ3DXbyJbJ5FvHkXMtLZLnWasCjo73lnoehFFZrdxIpTvCMuoZ1VXNCklr0PQgTIo/AmkWTLMQlh+WB2HUJvrALYjuA8I7AnDLeG9r/dq3wavn5SNVK+taZTrDVzY9AqQ9fY3oQzav5jhyrqVF8jxbC041KtDKa1WV8oYik59pw/pLlXWgRSAsLQJhWRYZasMDs9YX5K8Gwl4bNTYWwjsBcBSe2e5a0NLK9Aw9W/VqJkzxtMHK9z2Atkefo+VrjiPnXrpWNqIQi0VD0Fb4uZxr5RCAon0OAylRp8YntxpFK+sOhzBqmMiHMAIpArWVhtqR+dJa7wy1+i3jwaU+KT7abGMAZ1Sr1dU9j/v2hm8mRN0AXw1AHqDWArBsZ0S/onmZ/Oi5lmalZ8pIuKI8C7TauQwpy/RMmgXqLLCy13atfAt4W03tvxwXLqaNC0fC0b0gTMKHzLeUbkQFk1MGtc8t88Xor4Y3+iplw8VZ+EbK9YCvVj8CX1Q3Cl8N3MnJViMBOervKN+1Zbw06zhy7qVH8jUgaWVqwRs5roVwFqBZq/XvhbJluWj9ISHp7JhwDwi/l0lZvD9EvUC8MoBrxmmz8EXlewIf+Y1OsEJpmbCzln6mZb0OIWcJji1A27PtDGwjZb206HHk3Eu3TKujqWAJ2pJmgdg6zkIY9b8GUKOhbZkGZA/UUd+qbQVhEmW0tD1PyiJQR7M+IB4M4NaJVVn41vq1ABoJPVv5EqwozTsvx5qvBvjuBY577lc2L3ocOdfSMvnFUDgX5aNrkQbYkteieknJqwWoBuJWv6PMg7QFc2gjISwtAmqeJo+RLwJteuHfHhDW6lgmOZAD8iAAT7QNfFGdbOhZy9Pga4WUkWmwtSzyeivgq6X1BmNPnyP91/Tfq28d15xLG/QLftMWBRqq4ylXq73av8VHsWhd07IQLhaFtQdc+WI063WXlbH+Y7lz30TRFSc7/fnWdCsCXy9fg6xWp2XSleZX1omo5Ar47gGMvdsZcXNg/fXSao6zaRlD9a0wtDwvZSIh6IwKjlxLtbKRa36Lf2Qt4eOMr2j4ulkJE81DqhJQGWWMfGvtbRWK1sp6dfrbDgHsdaln6LlHHs+Pwjcaeu4E3zUBNwrAW/QL/Y3mRfK9PCKi8w0kEtG54QJxYQ3JNi/TPN2DbclDcOXlrbQatedB1mJDr+trBsQ1ihqdpxVxBMLFMZEPxgjMSdTR0nqEokdCmJR6/WxnAK6FL6q3VugZXjWNNnrANzHmOwJqa8B1jb7JOpqPiB8vzTwWkEVwPV+XacK+Bcp8vpx8X7zM87pzFucAzNZxjQKu/Tt7bSLNglMvEGcsEv6uzauGMNH8AySljGaWX288WPqw/G9hY9veCYAj3RgJXw2yHnyRzwiQe8C3WAf4toJ0DwDuDeXM31Qag60ErQAjAioR0SkA40jZawEvK/NZpj3PHwU4mDmUowrYu8ZqZTN1a8ug8q2WUboteU0QJsIh54zqtSBMogy3SCg6k4fytba1stLGQXgHAB4N30z7Gnw1/xKQqFxP+Mp2g/DdEn6j2xl5Y5D5q6Y9gGvAVoJWQvNkhJvPCRhfhBrmfq8PsPK2r5fTEs4czBzKz+NJB7KnYiPXbA/UqIxn2fKt1kMFe2nQNAgTza9hPK8m9FzMKpMdD5a2BYRJqV9vGwN4jeZlGxHISkOARXUtIGfMg3TSDT8OgwP8HQ3AaPk9gRjmAZWrADcCWw2yGSVcyl9RSPrRhgS0ZZ/VnPImBPcd51YzploLM698tK7mIwNLqz8E/KK0aggTqCDB6YWjM+PBUcAiy94hZfxHy/a9S9sIwC3gs3xkZz1reWclPTOhCqV551Zb/DwZdt4qrbff3tCWeZGyBMoT0ULpBoDLYSshi+AaAe7pxBTsdQlU6YMDueSVtNKnAubS3+vlPC97vs5D10gdc2Xc+peSedz6Xj/zNwCWj5obgGYIE+WWJ5EoQ4Qb2kMoGplVZn0IrwzgXs1F4auBNNqX6Lgvslr4oqu7Nb4Mio+A2kgAr1Un0ldZRq2jq1wEXA22SwUszk8AxMEf/+mkl7s+Xoj0f72eFmFoBGYO5TCQa2BcLAtYVB6Vi+T3sCiUs+DVfIchTGQr2HLtsZYUIR+eX62tLUPRXlvSBxl+YrYCgGubyIz7trabCUuj/ChstfKonuZDUb9bQK4XBFvr9L45kHnPczCeK6BrAfekKWEBQQTYM+nq92TkcbvSS+1Kf5dHXoG2BDQHs4QyV8omkLMwJnGMzrW0GusNXc9aFK92nIawBCE/L06KRceDs4DTIKyVyX5QoyBc/BTLf3kGAbh8sLWWHUNda9Yzyvdgq/XDm3SF8gLw3QJma4K+Z1krbfaXgTegcjPA5bCVUERgjcIWGapboMzzrnR69gWBWUL5+lTD1yeQkUJOwZgoBmYL1jItWkeztQAdBWumbBWECeRlIRxRxppF3/CaULRXLjs+XfwVe7M7YfUe97V8aHVbQs+yjAVoq0wQvsXO4HgkfEcBeA0wh9LwmO63zlc4jiuhi4CrwfakHMs63CxF7NmFKWHu/6l6H76vdIJg5lDWgCwVslTHJowpqIo1kLaJA0fHAAAgAElEQVSCtqdlYFrjK9OG+folMIl0mGrhZ8+ndYeUDUVLXzWhaM9q6uRsZwDOwjdSLgpoD+4Skigte26V4cfOpKsWQPWA4xoAXg3CNwhdIl/pPv8awNVgK0EbUcORvGLXGXSvMI+3eaHTs08czBzKUil7QEYwfr1gPm5MvirmlgFtpGwtrDMw9er3OA5DGC1PIsLwtCCIymiWCUVHIexZNhSt1elnOwJwVsmiOt7L4fmR0HNPy9wYBPuAWB05PgfSM6CLtLMmfKsArIeZa8GLVK4GXQ+4GmTDE7Lo8gRp1rjqTVlFleXypvMLwpplFGWkbCS9Bn7SF7GypNTLpmvAPYO0mVnLkyxYamUy48O1dzzSRoSix9oOAByBDepmJgys+cjWbVG/Eb9nUD857rvnYy1fKxNJ7wFeI8xcC10LuC3h55bQM6rLlS4RDkOX84XiZQqZq2OuqKUyluPGcv3xIjx9dzRXxM800o+1a+p219q7RVRy9MZAgz0p6e5rRzOjuWmw1T4MzSJgl8fcb0YFy3wvXI5MzgTvZxsDuCd8Zbmowjwr6Vb7WdjKfG+C1w7g2wK/iJ+sz57QJgqBtxW6NUpYKy+tZTKWFo7mgCXCkC3lZBi6lEXlZmU4jM8nPzwtx4kpCGJue4DxCLXd6geanJTFzdtIIzohi4w6yK9n2bo1EK7pl28bArgWvjWmQVZLtyZeRfqEYIpmPVsADy43KseR9Bq4tQKxZ3vNNxXLiVUozIzAWwvdTOjZVsPWGLBPER5+1saAkeot51L5onHhxSQtBmRY5kQLVTyfVT2ftPWZKA/iKHwjdbLWAkt5Hj0mevU/mg4NvQlInfaYFR1tMwraSCh6HxDeAMAtk6K0+rJs79CzVU7CE8FUmgXoM1U911ee7xGAI9t3fcwVLwcvDzNbarcWupnQs6WMpY+MaeFn2Y5UsLxPEqLFBwJtKY/UsReiJqIZiJ/vRwuI0flezFK4WQjzc/4+aOnueDCRvj5YmxXtWUYZy+PoHVXteHDE+oWkVwRwZnJTL/ha9c9KupWvwTUKY01lB9V1BkI1dboCb4N2O4EXqd1W6CLI1k7I0tKipq395W2jsWEEWARkTR3z13p9vKuWKibS1hYnQSzPe+VlrQa0UR8SttE6LoSJsGpF8JRp2rllvIwH9lY1WquCeX1q6sMKAB41qzhikZfnhZ5b/Uvfko7SnNCz5wJBCNXx8nrDN3pzMAK+gTFerngRPCPgtZYd1Yai0bmsmzU0I1pCssWqZ03POxS258zpy/n+mdOUA1wmj5Sy2THbSF5pC7Xr5XHTbiJCnEGgrZkVnfGPrGZCVosKzpStB/EgAE9UD16tS6PUr9eHXupXS0uGnrsCarAfLU+mZyAeamOperPgRYo2onYtpZsLRc9//FroOaOEta0oZShaC0NboedSVhvv5dBfhKBpOaZMRG5omk/WMtXwvbHtQtMtynct1eyGoi2CW/CUZaw6GaXcyyJ9z/gq9iZ3wsrAN+NLqx+deOX518yb9VwRepbFI3k94VsL4F4+zfJ2uDkL3qwSxvC1Q9W8DC8n/WjnyCQ8tbqRbSj9WdD2WDAKQZc2EahL/dPjfy00vXjNPCz9tDPNdtUqSdY1tTW/xqIg9fJbVLMLYSKsgnnFaOjZy0dlZPuoXZku81A+KoP6McZ2BOBsV2T51sldVn5Ji6pdBH+LkkRm6NmDGT+38r2yI+Hcs10EXiJC4eZW8HpAfuXlgdsyAzoSgtbKyLW+9zQ84aocZ2dBIxifHm1cgfItNp9FvZyw9QTxbAkTGB+Wa4it39caYqtF6Xq+SJSvCam7EEZ3MZFZ0dxqlwh54W9pNXdR/ZcYRWwnALa6URN6RgC0/GbVr+VPwlhLSyw5KscRqGn1miGXrD+67WcaHueVE6ws8GqhZCvEXBuiLobBLMHbNwyNtpwk0idclTo1s6C18DPvM1K+C+CSA2KmiOWmHrPNPOj8uO4qYWl0rqVploFo1hfR3J81Hu3V18qGxoOJYhOyivValoR8e/myD55Zk7II9K/dNgaw13wEvpl8lOeBUqZp58i8mc5GXQQgeR7Ji5RvAmCwfPbcLTNXvWicV85sjipeD7wxMGMlPC/TZ0KWNG3bSS8Ebc2EjsyCjsC4+JKhaVnfAzF7Ufc/YtmSOT4sw9K1lvWRBbQFVa080bwNeZ7Jm1mPULRn0fJbLUvq8aVZetzIauCbLVe77Eim1U7E0vx+YunOgxb4scZuD1qyfgR0Xp3ewI208TzXVa8MN8t1vBK82vguTvfC0Hb4uS0MHVPDkfzM+l8ESF4+MvEKjwHrYWye/0qfg7gEtYlek7WevhcwFuPDZ7LD0rNyRn5Plav5Q2WIpXkhaFkmOjasvm5U0ApFo7oZQCOwa5aFLMr32ugL4Y0AXNusVy/rtzX0rMFV+glOtkJuZRULylr5SJ0o0GtvAqw2IjcBQfhq63mz8EXw9MLMHnhrn46EYBoNO9eYNit5TZvDXj4W8Yxf/6Ob/HGI9/Pz8/wz0UMJEz2XLOmdeJQTabWws8rLOrxeNAQd8eOVN/kSXRvMLQssBOVe0GsNRRfrB+ENABxpcm31i0zrp6d+NR/lOKF+PXjJ8xTIOqVly6DyIT/+0iI01hsBbyTU7M2E9qBrh6Lbws/ZMWAUipYTsrQtJXn5SOjZGuvlylZCf6ZwFzcERlg6MDYMQ9IjLRtKtupRwFfNTQHKD6ngYp4Kjo4FRy0yIzpitTDtA+EVARxtSgOkVz/7UryxX1lO+kftWepXHgdmPctmMqDV6si03vDtCeBn+kv1oqVF3lhvNtwcAW/LTOjarSl5Xcv4GK5V13vyEU+vmXSFQsvu2l8S4WQjT274MYOzMzY8X7JkQLjPdbbeasLSkXoZhTyzlh2ySDkvZkG5FtSyrV4quPguVvclGQjgGtcZ+K459ivry3zUhkVRxTSQ1oA2CtRM2R6QRnXUdDzRCs1wRuFmK6yM07wwtB2evr+EnBKOhKB5PVRWM60MmmzF28zOgi51IjDWxno11ftKfeWV1zY/B3A+nQhNRuOWHheutZrxXs8fAZ88rSacbfl4mnxiUmSHLJRvzYCOWFQFexD2bggiVgfjQQAeHNIxlx3V1NfSMv49nwH1W5qyeO2BtzYtW3Y4gHHI2VK9NeHmjOL11G4Wur1C0NIHMu1pSFr4mcieBS3Vsh5iluHs+bpgNL5sjTujELTmh1WyTS5XImU/6VaLKtdImubTSuM+PNUcet2S2BEV7EE3q4JHh6Jr1gfHb+QGAbjGakPPlp+zkm75Lmk91W9gEpYFWguIWh2ZVgNTKz0K10iZRbo/0Uob60XQ1MB7LzduXLjk4b+ZMLSthiOG1v5K39o4b6kf234y8PQjwuO9HOIlzxrvlSFo/n4t1h2fLnQ9nRc7aS2WKz08p0RERrlG61sg1tKjZa1ylmqeWc2yJGQRFVwDQWnSf8Znj/ax7QTALaHn0S8h4j86nkyUUr+RMhHAeumaHyu9pi+oD4s8f7zXCjlr478y7V5Xh29kGVIuBF03KYuXleU0sxRkpkym3J5MbixS0ojo+b15prPx4W+dr3MI8ycrzZ0t0yMK00tHv7GMHw/QJV1yTutzekKWWZDlZ9cFI/NC31Z6TXtjbAcAziz7qfXlLQmKbrJh5XsK23irLSXrgRGl1aTXADyqesMAnsNXm+WcmWiF1XE8PM3L4zwrBL1Uy1r+6y2OTcaS9TJ51uSrcm7Ngn5tKYl3u9JC0NHdrlCeG2Z+vm5cdqamhZsyS7pZCSOzwBqFs5UXha7lxxs7Tk3I0o5b1wVHx4szELX6FCnbbhsDODvumlG/0ZcWuQHITvhKjP1KMKE8EmVqoBhWoAPrqL5yk63ioJXquC48jfP8EHXJl3la/isvH362VLGEkTX5qvjiY8NoXNga643OgLZAbMFWhqpf9ZYhaVT2UeH+R8ySLlY9OSszuSoL1do8L6ws02RdVGdmHFrasSy7hQrOwNlrr49tCODWSU9RfxoMtTZb1a82Dpzc8QoBS9ZB6dE6EZCvAmB7shURQfh6IWcExvhELRu8LbtkafnF0NiwLIPOLZNlpeItadbkq1JGwjYz1iuBbO12VepoM6P5+yXTrPRikVnS8MlKtWaNE2cnWbXkZSZolTSTWR9JBWvl620jANeEnVvVb2ac1mpXq++1abgpxxrzZRkPrNE8lL8qmGOTrazxXh/G8XFepJBf+flxYe6Lp8n8UneZ50M3si64WO1TkFAo2oJxJPwsQ87c0GQsPaycG5+GYFZcLB9xGIRwBn4Ri4SmCZTxxnqtdJmm1Z1V2FoFa3VqVLDXJint5mxlAEeAVwGy1MznaJuofnSpkSRYctmRBUAvHeVFoRn1YeWHARyfbOWt442q3kxoWrYz94nD0K3h59YwNK+LwMTrZWZBIyBbm21EVPEcxDjPfo34gRPW63cczgzunvV4JautFUb53CwgZ/IiE7q6qWBkEsgRQKNO1QK9VtW2q+GVANwKwtpyVl0Lpp4U1ep7/g2XGkRRvQgwtTZb4ez50MrM0nOTrVonWnmqNzqhS/pd5uX2itbKvd7GiALOjQlr4efSXmT7SSsMLZcUWVD1FHEkLI3KeptvqBYdF26BcBaypbmsr1ZljMp3UcG1m29YUI6AsHY9r9fHNjW8AoB7znKutZ7jzRr9KnxHoFsL3IjKjfqrVc0wj435MrNmOiPTw9B2uHpe1wc6T5d+X3l2CFrL08rJPP28ZUJJf0MPSUBLguL+Tgt/KK2kF2t6X04E1woXCy1TIopPluLWA6bRNi21G0lbGFfB3LyxYE31RlQwsmi4uZcK5vUp7WMQgCfKg1frijf22xp+luWlgkWK9pORJo8DjzyzgIfSRingaFlPNas+4mO+0c010HjvvWm7nOZP1r2X08PgvHxJ575fZedgtSZlST/zj6VtFrQVhs5uxLFUxb4iRttOIkOzl9UZzcQVu+03ZE71hRKOqtpeV1sL7qgMKqvla8O1XtqikRFPSiqW3ewj65dbto/l2v+mdsKKwrfGrMlXvdWvdgyqZJWulh6Gn1E2Wj6jjDvCNzvZKluOaAnje5qukLlfXhaly3xZZl4Oh6A9NWwZUpE8PRuCtmZAyzI8TbbxyrOXIekznQv8O0BXWm8IE9WFn0sTPep4NwLWWHBEdatjwZ55qtcKW3uAtCAdhWvtjULMs2nTNP1eIvpHiegv3G63X7VBF4yyGfVrtZNRv6iNRvUbUboRhYvK9VC5NXUQfB/WCt/IZKusOp77yoMXLz3SJ2XN/+J8eYzOX+n2BQLtBY1AzI8zM6CLH00583yerm07yftaC1fu88o+9YQD06ogXGMRxZup40G7WQUjp5ElSahui0Kuscis6n4W+Tj/IyL63UT0n3RvPQTFvZk3+Uoeg2KekI6Wj0IzUqcFuCaA+yrfzGSrCFAz4WZP8ebC0GNnQkvzQs+8PQ5RBGQMY/3pR6V9bdcrNKPZW9/rqV5rlnTKaiBcTpF5Ys2zyExoq05ETWvADqtgTcmidAk2DXSZdcLyWCtjtYesP4Tdj+92u/3BaZp+addWXYso2Ij6jcx0js5mRm1Yfeqofr08y3cGmrV11b7WLTXKQjWqZjPlXnl99oq+/11CNxOG5mW8NGRS8Za00oamdksdpHw56JBa9UCMNtrQHj2Y2Y6yVjUbTlVbZYmSZq0TulCdyCzpkh6aEU2UB2ZmtrRXJgvO7PKneusWMJmm6TtE9J372V+zZtOd2vKArOUlNgixVG4mveRlFbBlrb4WdefKl6gOvkRostQLdMgs+EbKvfLqniOM0u9vkR6mRuV4WXks67xFk7OmtRnTaAMR2y+eKV1r6IlLRMbsaK6EpUUUaMZqQKvVzUzMiraxcKI5XPN73H9LyVrrRsHb7fZdIvouEdE0/RI0Hz3YbFb9aukRWHpmTdaSsjFoGtiiyne0AkZ9SfsbF3ZGCvQL+noB2Yyv+0usUcL60iSezuvI/NJ2MRyKrp8JzZVgzQxoro4zM6B5fmmHp5fcyAMXrNnP87rz8d7ulg1Hr2E1oNfqojpe+Bmys3ZjjuiSJHmOfMhjC/Iobx0VvKYMHdRkhEZamjf5Clnl5CsPrFqeZq2QRX3L3AB48H3YKPhaZYiQarZ93etg//e3wQY3r4/T4xOuEJhlWXSOTIOyFYaWY8MSyN4M6PIa0JIjOTbL4Y3SLJiiusOtF4S9ZUKt1ntGdVQdP20SBbTjHutvazb2iIwFexCmZJu6lxUs0lSL+q3xXVtfU7/Oj84CYUQZZ2BqtYF81QDdgi94sMIa8O01cetern7Lylc6VsPzvzElzMvO03wQc4h5k6/KsT3pSt8D+vW4Qn+8dw5vrHTnY8NtKvdEF/oefankXenrSJ0WCNdM0tLUp+VHs5pJYJHJWSaHIhOiNJVLwXzNb1QF11obiN2Pb5qm/4yIfoSIftE0TT9NRP/m7Xb78Y5NPCy7dEjme+FnWSe69EhrgwLpzIXkdET9bgrT7L91ws6jyhDh2dF+er8JWfwvLyPTrRnQUtlqdS60VMEyHD0H8kshRx9FWPrjhZp5efR66kA7B/r36AuW+/UT6BK4X9L36EpX+noB6a8fNwZflAZMqwpHa8uYapYUaVargLV8l2mygLWJRlaFWm3VbE/Z0n4diF063m63fyrlkYhe4YfeFlGx1sYbWUP1NZ+BH1pW/fK8zD+truWzti0B3zVmO39JX0M4fknfmwEuD+j8dpWyLG+b55X0+9tsK2ENtks1bCtflM/ByNPmIegCV+1xgvFHEfogRuuI50pXMw59r94Xj9cpYa4B90v6GkCY6CTqWyDedHa0Zi0KGJUzVbDcnlKqWM20GdHR8la+d8fQ4yaA6I3thNVT/fawqL/A26cpXK1sVAF7baK6XtudbLTyfbYDYHhP7w9fpHrRTcD9rdSVcEmTeTwdjQ/L89rZz3IM1i+PFShKr1WrPe0F/ItIf91IyDRefq7MUdpyIhuy8oCRxd7RlsJFYOPpGQVqpWsWHTeO9m1m0XW5suHaMHTUonAdM3N6BwCOwi4LWY1A3Jd27rUvj523EYEvCtqmsPAa/5ZPNloj7PwFfY+IXrD9gr4HIYpnSLdv3CHT7x8VBnU5n/+9mMCVqvj5dbgKKF/iy22u55dKLXY5LUPP93bLOC4f650rYx5OLmVK36Uibnm27zy8bavj+/cAKd2vF3VK2nxM+Gt1jFg1LxxdnqykgUxTlFYaz8umR0y7GUhbdjJWLVQz+0P3AHcf2xjAkV2lMvk9lh7JNiKTr4hCIYca0PYy7wbgjcD3i0eYuc+YsD/W66nj+1uJAc/Ll/SS5oWhieaw5aA9XV66itsJXFOuZ57/maV/a+b3emZh6FNk4tUcxq8yOAStg9h+6EJ5Z8fa1zSf1DWH8JnQmLAw0MXr5fGanmvhH+faU5Q0aGYtC2XNh7SIeg9PxmpdksQ7FYGppWCju2P1V8EbAjgDxuzmGMii6hcp3cjSI6VJCdIsaHspVa0/pJzvAL5c5X75BG9/+Fpl7m/HspxML/3Uws8R6BbgarDlgD0HhS8qdzlJGL/Or+dv0elyfQJZKmQLxnK8WIK0vGdIvaKynnHgS9V6oquYeJW1OYS1MWHR6NOulxPeqONy7gdZIswKjR81ws9TwiHwysZ7qODWdbpemXUgvBGAI7OUs/maUq0xVF9Tv8G2Ii8rCmate1HYR+Bs+loHvuXC3wO+rRO07vW0dJxWjs1Z0NcrBK4G20leEzIX1Mfn+elR5yYEWgEzB3IZy+RARsq4vLYCYp5eXq8GYk/lzlU2Vs18ZnPJLxOv+Gzn+/sfBfMcwqFx996Tsrhq1cZeR17Fa5ZFuZOxohbZeIOcfE9po3LRfrXbBgDOwrdF/Xpju7J+1J+0wMYbJS2qfL06lh9P5UahLH2tDN8sJMt4LxEpPtpUbyt4kdJFCrcAcQZbfqypX28SDa93Ev7PdzDfzjqQX+r4ughTz8eC5VjxC7ASxK/u4BnQvR41WGY7FzCjJUjYyvKju49QnR4Q1sRixDRoS39Rk+1GZkWr5oWhI3DzgMzLtJjlow+EVwRwzVhsYp/lUBkNyBb4tT4kJl95ajYCQlk+Wj+saqM+5HKjbeD75eNSeFLr9Fs3fH+rcLnShxO99NwrjYFahJdPl886cDXYakD2zLsIn1/tTOxiz4EsYfxSxnMYS4VL9IKu/uAFezerF+DnY7ElBO2GhhULA5XZmbS7H2FZCBdoZmBrqWPNT41ironULkw+JYk7RkBFX9Lasd7oZCxUzoMwKb5jtoEC1qylKz2XIWnU02zw+r6IMs368OqqEJ6HkU5igLHA93kOYWWDcF5Xn7Usy2wBX6l6LcWbAu9VnMtjXmawlW/38usCJoINnCvFwS3Ta5ZkFVXrAbWo+uwDIR6F574u59fypIvILNd5C5jWjZRUoSlV6ph1rdDUcLjN1jXB2dnTI5YT1ftcAcC91+hafs8g/yzOo/kyrWLylZaegaHmP+q3RnnLdgp8ndDzveh8rFPCKjNea4GyTNCSy4wyS5dk2DoD6OInAl4+rhuGrkwnka6de2aoXzrRUomJ/n46I1W8DE+jfaDvTeghaLTV5Kv+coz3Sme27CinhPESpKWV5VihSVjYARHNJ2VdLyeaz4xWbuKzqlibmNV6lfcUcLTss4I1g4woF4auKWMp7awK5vUI1PV7OMAmyoEXdcMKP7dAPfKSI8ujAn4ykE2HghW/KD/iwypD5MK3qF8UlkWqNaZCW5citc+evvd5WQ+Fm2WoWapdCF2kcj0FjK4FnjArvLNClbJ9CeQHrKfH8SxE/QhPE9EMxHc3LxDLhy5EQtBR42t941D21v2+xoBrlPaj4ixadEWbdKA9oyPKMwpoWaaXKrbmHZiTsbSlPwiOPVRuq/LNtE30hnbCisDXspa1v1L9Wgoa+Q5OvuJ5WZWq+dD8oXoppYv+3X80kUlXRGi2sL2pRQaukXwiem5X2Tcs3QBeBFdP/cpjbTw4YkgRSdCWPADdBYwfaQXGZ5qDuIwTcxDfm7o+3rH4rlmljpw4dSL88ARuMaV7h7DnLzz+K+xKr9CzvjzpUwymCKRIhWoqODPGbBn6/p2d/FlBT3VmZixbgK5RymN2vdJa3tCizUfKabQiioWbCeRp5Z3+RFWvVseCrNaFDNxNpSv/zR+ykJ90hScsaROqauH7pZq3hDPyd3/pywldvM883KyFmk3wcsBq0EWh515jwREVjGBb8ng+0QzQCxCfiApeZGh63iW+dGkeXkbqmM9mvtLZeHiCNBuy5WETFmS/YP9H7X7jcLEnZV1O99+ZtlVlsawq9q4/rSq4tKX5C4lGLQwj82smSNUuSYr0tY9tCGCt6cxSoB47X0W2oYzkEwZeDYy1JmsUdBTsMG857ktEqRnPRP6EqpJnzWZugW8EzhHV64F3Nr4rwauBmGgJXaSEPfUbGafTVLAE6sXI4wqYpz/K8/C0DE3fm+VjxKewCvZMKl0M2q+f0F4+Bele34Ps4mEMwK702lVsVh7uluWMB8tohJZHIJ+XQ9bj6p8KQRPZYWhy0pFj7zxqURXcF8IbK+CIjeqi5VeDuBN+jjSZgaJMy/qW+VSR9zAeeob5Bsx4GVSe59nLfawtInW48vxX2/XwReHmxRivpXij4I2qYO96YF285TGHrlY32Mb8mWhi1vSAGdP8EYscgvP8C8wraXw/7JJeVPq927pCRnthL8qzSVlEr7HhxUMb2Onis4rkkShHwbKtFvYfAZ4VWvZmR1ttRepY1g/CGwG4h/r1/JW0lcPPpUhW9UbLegrYg3k4b/mIQTv0nB1jxflafWu28zJPKud+ytcNN0vQaoo3MhZsHWscQCpXlpehaKmiAmO/C2UmL/KP84l0NTwXh/OxYf68Xr5m2Bqj5eHoEy23orzn32dUo60ridBEq7tqvg9f6MoYQV0avwGQS/g+FyVsbVVp3Qx5eUS2WrbgbEEV+ZH5i3raYwrVCg/LAjM7thxVwahsnW0A4F5U2jL8rEy+0u4DNNWpAdRTyl4bnj8vT1lyZIWeiSxlGQemVh+FnYnQMqQ4fOUTlMq5BV4iWqpeTfHycwldMtL4XyI7/GxdA7QLpoS0NetZgzFPI5FOy3pFDb9APJ+kpT0i8QVNfi73jZagfYWT9f2g5xAu21Zam3NY6jcSmi7fs+tJqO/Lmb51vj4gXN44sEEHf4s0SMo6Mo9EOV7fgrPmJ6K6TfN2xtKcjwhDe/3rbysC2GtqhPqNnmvtJNb+RlxbII0o5oj6zShjqw5YckREqUlXGpCzwJzPaNbHdL01wV88Lq36mHBM9aoTrCLglXUI/PWUMOKAd70pnyuHbamnQNOEMVLF3AfwySdq3Y0HXjmIcxOdylpgBE4Lwlf4Rr5AKc2CrObrlc/3z77M1T+aFU1nek7KQvC1YBgBNoH8MDQVH6gvbmXvLnFEGNoCfQbeNW8Y9jDYaprJqt9IOWvzjsjDHIKvI6NekdsW9WvlWRB+pj/UL73Gp9BmG7XwnYeZ52pWA2akLTtEbW9VyduZ3QhYY70ctlHwjhwH5ibHcWV5qXa05UiyX/LCLsFrgZj1bbrcN/Tgarg8HpFO9NyrWQtJa6apVw3C2uYaWrjZgqy1RvgJ3WUlul5Oi3kVn4n0ULQFXw/KWl7J9xQzMut7aN4AWGFoy6KKtPjroWA9H/WKeyCAM6577JYVDSdH29T8GeHnFgUbBXVW/Ubbe/59hZ6JCIaeNbgS0exvZJyVT7Lyws6WSq6BbyTknAo3W+BFateDrheOlnleOlLBUjWV48j4r6aAlaVKEhLqJC0jmivHeZfh5+89x41liPl79AWdaD47WoMwCjdbkLXGfzXlzNcHc4OhaCJb1TOhcfUAACAASURBVGpiDKlcBNpaBaxZikk1D2iQgI002KKCIxDO2yAAZ2YIe1tKyjIRpcrLSYVbo6wTNwg93tEaBSzb9wAN28EbbhAR2GzjoZIVmGbgu/S1DDtnQtTasiUUgq6Gbw14tfCzVLkIuJ76jSiRjArmalcb/9XK8dcl2XOdl0dquIwNf33iS4r0cV6pjLXJVmWcV8ISgRVBsway95eshbQvi/fnOR5cEvgGHfJzlNeEiArWQJtRwAj8VhnV+AMaUCfQ3YTneMQuWDX2JnbCGrVPdNQknHmadRxwGwGoBsSMb5mOui19qtB+0IaWG24U42qXX5DkxhXz8kulrPmSQJa+UYgbtXNPW7bLbyC6w1dTxTyNRDpSx5nZ0MXsIcjdm1TD1/MrbIugyU3L58uS7uXOi+8gqivr8bq8XhnTJSLYzitv/jrkgyBKOaLXePDzYQ18gw4JSE3RRrmFgB4Bp6bCvfI9lHXKUjI8YP1BvjGAkdV0KRt+rrXE7GdkFpA1QMt6XvsI5iFoz9Uv0WsMOLLkSB/3zS5HyreDlLEcE5Yq2YLvF199joGXjHwy0jQQkyjH//IyBPIsQxdkKwxtqV8CaRII8nvG3wPjt8Ih/MVX34BZ0rEZzcXkmHBRzXO1uqyLYC5VclG1WqhaAvuefnqA++XrQic6abOiWY8Wu2TJz6mkES3f44gyRuWyYOaWhXQqDJ2ZfIU6VhuGzrYV681GVjORKhp+9tpCPjvMfs7AWNbV/GjqNaKALfU7K4O3m7wfeypUbnBhLU+yASuhiZYeefBFMLbg+wV93a56W8GrQTc6ASuqfr31v2jcV8JYu/Aj6HKfnj3eH2tc+BVuRiCOQRiVlSFitBsWClsjyEbV+KIseGoSEVfCM2c2ZC1lHFHMBPJRe7JfKD9kslPRMLTML+e1kFw/VL0RgKNLimpD1JoijsR3veOASUhqsEQqFXXHUs2e0vXSiEiu+dWecmSFfGvOvYlUlh80czoDXz7TWYVvuXJzmH5FGKwczEQ2nBGIPSXM82S6laYpGu4vMu7L/3oKODD2O6srypdx4bstJ2fdoXqfUDWHLoYwAiKHbmQMWM6M1kLimaVNvOyVzvffGn9qEtqmUgOtl0ZOujQPoB68w+Vbt6aMNCyhHJX041XwBgBuhSrRvNu14edI+cCTjzLNRspbCjiifrU0FcKvpQBl4tXzGEy8siZOZWEslTFWu7kQtQXf1+YdDny/ppzqbQWvBt3IBCxP/fJ8pIDLuQVdFIaOKGAJ2qR9IppNzqJv0wzCV0K7Wi0fuoCWFfHZz0glI5ByBYsmXcnw8jx9qX552aeifgD3cnntlrWYkEVkw5dbVBlLa1W2mmIOW/RhCprqjUKydolRHwivDGALvr3Vrzw/g3wv/Bw0DYzZep4CjqR7cJ5BWN9ukmg5iQlNaOqvjPExV7eRMV8d6NcZfMt4LxHRxNVtFr5ybNgLTxNIL2lkpPM0ctKKoQttASuRvv5Xpkv1aoFXCztLMPPXy88fJseFOYRftoTwlc4z1amN1RbLzoz2AM0Nl+W+5qFouDa4PDFJquBiSNmSSOuhgKPXtzRw5Z0BOs76qbEMWNshvCKA157x3PLSEO2S1TW4RkGNFLCW7ilgDcwP48uOiF7ql5u8W+djwnd318UF6ATSZFltLTA/1vJkGxrQeT4RzZQvEXuIQg/4SpUrwawp4RYVrKW9BZPXS3D95BAu24FKCF9ZJf5QhGJSsXLoSYWK8uSDHpaTsvB48L3ty+y89O0K+ilV8NPKPtFEmDFI2ZJIyyrgWgZaBn1lw9C1HdJ8tIC0DcIrAdiDr9cNL/ycNeTDUuBK+LmlWZRuwbklzKzVYROviHLq1wofZ0LPlmq12pB5OGTtj/l+KuFmBNivHm9MOeehaQ/aSPUiOJMoR0a6PPbCz6hc5kEMUmldRX2prs4gTapdqV6/EvW+Ivj9f0L4q8/0vW+L10JziKFnBEsVOh9/9fMsNS2BXAC+GONd3KDOVfA97QJV8GxzDvTc4F4K2Kor8y3TrnVmfQlWD27eJhyZTTpQPa1fXvm4DQZwBJCoC1mwFh/e5K7a/aYNt5rSzdTlXdR8a/WIsB8XzMtNN4jm6tcLL9/TtDFiHKaOTbSqHfedb7pRDd+vlPSIIpaQRTDOKGCZz9O0c2TaRRdBlf9F48IyzYJBi4n3REL4dLnS1w/GlolZ3ObjvPMdr/i5VLMy2nP/e3p0yQbp/Zzt+Qx83l/SS1HD9cqzseALXc+n5RaVGlRbFLAGY5RvmfWdTAvYqFqtBW62D1rbRFkQDwLwRNtushEBbu3NQbCa98+rG0lHIJZ/NXWtbLpBNIfqvSoOA0sFi9b7toz7anlylys5Przs24rw9RQyKfkE/lpKmOdHTKpgTQFLuBI71hRwOUeqWRpSxt9m598GdfgM6eL+q/ujDc/n62J29LyqDsFyvlwv/Do/0zz8jCdZ2Up3NsbL/KLjZxtMBc8mZEkVjKBqgVlTwNIP0fJz7M20mWX3hi75WfU5Igwt/RO9kZ2wIup3ZPgZpQXekp7vmgXmWgWMzmdll5tuEMXUrwQsz5P1ahStVs5+0hEOOz/rsQlXRAC+MrSsQVWbHa2FmyPgjUK3lwIu5SMKWEuTdVBomavjqziXxsPOSgiamzUxKwO6SJ5Ut7Kcp3TvZVCdMp683GHr2fZZjFtrzwyOgFZTxVYdaZay9sxT1jPzNuXIWK8wNFX48G1jAPew6EtAwI5svqHcyXhK1jJPCUfUrkxLQdlXvxrk7mVsiN6b1MLQc8BHx4cj64l52PmZx5Qv3GBDg28Uykgdk1GW5xFI539lunXsmVQ2UQWspREtweuBtvQDqdykaRCWm2jw89MDfEXdcvChMDLREp5auWIWoK80n3Rl5ckdsooKXowFE3sjLNAiZWspYATn+QvFbVppIdMg56lVqYgtWLasN+4L4Q0BHGk683CEmocvWPnBt0YqWE2tWtC1ICq7orUj8zXfypaTRf1as5C1sV2Z502Ykmo1Mj6sgdnK+/L69RO+X37dGb58kw4PzATOLWVMohz/y8sQyNOMfye0UHJUASMQo4s/Ur9neqlc/jqscLqE9qMdbXb0EpLlfL48yVpWVPKWs535uO1F+EDjwxHI6yq4gLlMyIJjwfcOYbDKPJmPIIzKWMpYmsco1VftIwpbrAbUfVvfkWXWCY9oR7ZROfs5o44RZLV0C95RH0QkZz4Xs0JpUv3yckgZyzzuh0Oclzsp56htBPNXOjt/jPkSEQajBswIVGvgK9N4v0ikkShPIk07rzWkhGrNWgcs25PHPJ+rb+BjerRVlpRdTsslRVxV3ovP1a0MI5cxXzl2W8qVdG1p0pW9+OKHt8v7YbVNRE8VfH0sSZqp4Au7jmnqVeahsp7glOmk5HW1SBhaql7PegG9343BRgDeivud2u3hxlLDEdDycuhYwreoXz7bWTzrl0hfVxtVv5nlSly18rIWVPFGHPLhCnzSFb1CzxKSaMKVN87L84mlkVHHA68HXQU+pmqUVspqy5Ak7CSMkZpCypf3k+ejsny8V/PD+1pMwP2Foc/3CVRsTw45oWru5vX94qaN/75mQsfHdLn/kofWH8snJPFZ10T0jFY9H9TAN+YgwjdOmsLVPk8iXF+7OZIm62o2DN7aG5B5RGHLgx7ytgEJtSatyVda+lmkRX3LtIaHL3hpPB2Fj4mda/4QaFFdBGnpV+x6RUTmlpP36hEF6oeU8XKlJWQ1oOuTuvByo9lTjSRYvdnOCNZovDeqeqPg1ZQw0Ry4GRWshR+Lz5btJ2UfeN2MWVeigK/zs/7n2czoe3UM1AxorZnQ1r7SHMh39fw6jyjwMiOaiJ4PaljsES1nRL8cLT8vCVTELAL5EeWrgTxtEVkeWY7U2l7vOtjLilbbXLaeBeTIE5US7SGoWrBFdWWTFmhRmxZ4n8dL9VuOi0nlev+rAVFfD8zL8TwJ0rm/ZUhZg7tZR5t05alaoiWUeXoUvqgcGedI7co0YmnyNx9RwVL9Fj/y4mzB2ANxFLr8Pcia/J4zP9NXROfHOLHcLYvfQHLTFe0StNqMaRl61lS2Fs6W/Srl4POH5ZIk/qQkBEkLvhLCZNTlFgGxZyaoa5cjbWHtba8IYKupqPod1T7K7/DwBaupCPs1uKPyWjmRbu35zI+XqnWpTl/pesjanjSllbOXLan1xVrf2VONOBwRUBGULajKXbK0chHwSrhqKthSwFpaRP2WPKSAuVkgRn2R8JY+L5SbEf0VK1/C1+IpSmciul4+38eEHzOjX6DFypWDloORA1MLMXP1bG13qU3WKsdynHgxO/p0ne2ONVPB6CEN5ZhIhy+CqaaIUTlp3TmYWY7Ucxw4E4Zug/BKAB7RTNRnZverRPg5Y5Yi1vK0crKOLIvSwfN+73+vs6UOUv2ipUX3fH3SFII2P8agX8LdB/Ny3Fc+2WgGQQ2UCLQWfDnMiTDMyWiPRBqBOjKNn8tjdK7lWQq2tIWgK0PRsp5My4SgC1S9mwfehlbmdL9t/pKILqeXEr6K+Q3fEztgZUArFe18VvQrpMx9yZ2uuI+MCi7rgmdLkoho8ZAGSwHz900CmkA9EumeGOXtaRbildXBSD3vaUlEeEKXZmMgvAKAvSYyajez/KhXmwlDEPVC0JH0iAL2yj2MLz16FcWQndUTFwoOSJ4uw33cN68392WDWbZb/srQMxHhSVdES0BKVWpBUqpYouXvTVO6FpxRPzwVjNqOhHTlTGJkIqxr1kf+5ASviHG4y7YtAJR6/Bp9Zcd0nxl9Pc8VbVk+JCEnZ0ij9bmyzlX4KuU1mL5eht6uHP+9ElPYTAU/J2MR0fMhDfx90t4/nq7BmkC+51uW6a6GkfPaCVIjOljncyCAa11LONb6GVxPKtcMeLVmPD+R8LN6rE++smCnjdsWCy0FYup1WQdvuIF8ayHqqnFfTaGW8LIWTo48K5iMfDLKk5Im8wmkeyYvtFb4GSlZpJrReUb9yv7JcDR6cINcD3xZli2haCJS1wdL0MrxWRQ6jq0NfvnRJm6hMHcBrYTzHOb3PaJnk7GI6LkkyQo9y3Si+edNII+fEyiHfGdMrSvHgSOOegO1JsydZ85KIWjNem4nGSmvTcCS/jo9/QjVt8LQ2rls3wo/L9p7Tb4iosXkKzRxSk6okiC11K8MWUNgsmO09tgCM9rYA477WqDlEEVAbYGvp3o1EBMri6DMz+UxiXLctC0i5bEWauZ5sg46r7UzLQGLTFPdIq1s0nG9PB9n/wxFE73Ggy3Qag9M0CZTSRXMzzlYZZhbW0+srUGGk7HQs4IlYC1Ao7JI/WoK2cqTloJ2ZivKlq0qS8eGSXdoGwPYM969EWHj5PgvUrvROkTLehHQyr8ucFG962LnK6IlWMvxK01Xv0iVzutpY7rzdiww42M79AxBak26siDbA74ytK2dE0gnkUcij0CeNKRkLKCSkS7VraV2+Wv/tki3IIsu+tF88b2fiOh0IqJ7oJZOp1fl1/dyORMaPYJQwlXuhCWXF83LvX5vVvia9wuFqMv2lDwMfe/co35RwREFnIGwBXFu0RuzMOMswhdwZp6O1PLgBiut3jYEsLc2N+MDKdxebQRNwtkDtRaClsea0rWAO6snFC8LPxPNJ39okOXHHKqyDgemthtWDLKR0LOy5AiB1go3a3UivjLw1cLTxMoQ6eBFKpiIbtGL2YVo8i6Q8kKspWVNPmAhonR5nyI3GPyYpZX1wdcz3W/WlKVJWiTmlTYHtdwJC02s4u1o48Q8fF3a1MaWnyqahaGLPVUwf1awBc+oekUQttJXMa/RSKc26TjsxRuzSJdRmcj2k4m34yz+ec1bKtZStlYXXQX82veZSA8/S2De//pgRuqX10Hq14O0DGujtFmeDD1H4WmFqMtxWWqEHl0o94KWO2RpfmUeAqwBXQ7cy+tteNo34prySSpfIjoz9TtZcC3pUuleQNrVKM+ttGNBWNY9if6dlOPLMo0vTXp29cRDvwboSF8eNB+b1ZYbLVXwPX0ebkZwXmxJKcD8CkNflk9JqoGnpZJlPeSHQPmIqXX5OPCaD0mI+OmngjcC8MBn8YbM8h17jmNVc9l7hwjYk/5R+PmZB1SBBmY0uxnV8SCLQs859Uvz0HMxCTEJQQ3OBI5lfelDgtCCumxbtofS6AVeDl0JW2QIyMXH+fTyC7/1/AJ7BemyLDdwc2CqLl5GtkWi3pWWF3/U10eaFopGG3TItcEItDzcrIGSH8uJVk+IGqCV7czGi1kY+nJR7nSs0DLKl+nyGOVrbUbKFdtehFLfXbVytiMFbEG5JWzc4SV64eSa5j3lnAlBm39f6lcLP2tjvMUQmF95y0cRyjq2mr7AOrJfi7+P0DPRQ/0S2eoXwbYmBG2p5qKCySiH+knsL0uT0OUwvYALFwLyJ/Edu1xeYdlvLq/8UmwG4qJkpUK2FPPVyCd6qVNNIaNJY1Z9+f7x8gwG5+s9DE1EdL5eoQq+u10uUbq7wsuDpPLV1g+jCV0WaGU7i349wtBEtHxAgwZfTeV6EAbvpwvXVmWsWnRDjtLJUXs79/GzAYB7jcFGn2gUqZ/c/zkCVa+cVk/Lt4618mi7SRB+fuYZKldTr/M0bUOOum0lVVV8veY23LDSJGRRuNk7lpO8yKij5dMrj4NXg66ELQpH83T+bHcO3kXa9VH2IsaMLSuvoWb5UcQ3N/6d52uREdhZWosKnpfT94mW478ItMWXB1oZukY3AOoDGi7sNkoLNyMIk1JG+tL8aia/RykgR2R1bYjaCsOsI813pIC5ZbpVym40AQtBGXXfgyU61/wiBbw4no//yq0n738xTLMTpDxf1o5aqE4ps2z7aj/lKApf7R8KN19Jf2oSKW0Q6W1K1aso3gJYBF0J28h9+DdX/O1HMC52JhCajqrhiEVuUDVFbIFZSUMPbEDbVEq1eU+bh4vljlj3MvMnGCHQ8jaK33ud5Q2ABm0ehiYiPA5svUcWODMhaA3e3cxaD7weJONPTsrbigDuEWKu7W7nCVileLRKLXwjPlA+CD8XQ+Hn+zl+AtK8DA5PSzBLv7JcVP0u23/40jbcsP4RLSHIYYng7ClfIv/xhQi2om0LvAi6/Cdf+/NHQEYwnoWme8K31PfSpX+kdPm1mNcVac8JWef7EEZ5dvB8VvJ8i0lLBfNjTQVHxpIl0LkKRuk8DE1E860p+XOCvRCz97eUle9rLbwtc8v3nIjVc3lRG4R3qoC59Vay8iVzXwMnYFnp0RC0lz7z8fjhs7W/WvhZG+PlFwAN2Fp9pGqttmUbnvp92lX8lUqWp0WUcTRfg7qmelFZusOXj/Fq4C0/cflTz1zjVF5KRc1AXI5nIO5lkXHgiNI9sTSpyARUeChaquC7q9f3MzM7WZsFrc2olqBdrPcFftVxaL41JdqUIwNhme+do3T+3hPpnyE384tsgXXcgxLi7dRDeCUAj5pg5Rl/eZXtoBBzpEmrTgS2lg8Ugp61uww/F9OgG1sGZEOzlLHGfmUblvotKrn4Mreb1OCnQdWqq5XTQs0efDmwH+ea6uXgRUr3AtK4WeJRs7KdwbN/qAwDcTcIR6I5GgS0i77MA2klFM0f1oAnO+lhYTlp6t6Es3RI1EGg1aAt2y5haCJaPif4uUEHLSEr3x/5vloqNgphad1D1NkdseTErEi9LLzrIPytdI20ZcHXck8w+H7CgjGCoszX8rS2vPqWL+PJR1qY+OXWV7nzGco6mHkaalv6smZHE9FL/WpKl5Q0LY93T1O2Mh/5LH6t36wC38vlBd/LdQ7fbx7/eLMlTXZJNo3yviHdJ/+rjUNfro/+ixsK9+YDvefoBoXYMc8nUQ59DpE8es2aP7G1wfObPfQd9I95ffuG9rIoz/uBTPZptnsWeMCKadHhLHTeenkedolea1vjvja49VZ1q+3dLNOyzxPuqLojXyovhKyde/WD+fLJR0Q2NLXj7NIha+MOqYpln+ZjxK+Zz0Rg2ZEGAE/VklMueyz9gnIo5IxUr1S8SAHz9B7GRRIRqWqYqFIJy1AxcoqOS+fQsfzuozwJ4TM9lyWdLtfFFpVoj+ZybE2acpcOAaWsqeOIaj7RhehEr4cyPF/345Msu2KVD1Z7z/kHfwHpsj7y1UPlhkRnz7By1iLKm5wycxsE4Ik2fSxgug3lbTAVppPv+bBCzV4Y2wxBvx6+IO+IrfCzHL9F6bKuDeZ5+ciSJrSz1rOMtukGUrpWqBmpL6SyWkBMOK/AV4ac+TivB94hY8CWsa+QnKRVDsMgtkKgMnogy/DJVwgY8m8px8uzPDkWfD2dH125En9QgzejGY3lWqCVvpZhaw5wLTQ+3yeaiJbjwOV9lRAlWr4f3KJ8iwC9CyetHbFaGhm5wcYniv4qttXfVdYL2iu89NYxYJSugVrkoUcPzt05oV6QjsLHHJZyuZEVykMTrlAbafXLL+QS0giYFkS1NK0eb8uAL59oJVWvB96RCti0K9H5hDf7CKlhtGa3GLpgy7LlfdWUrgSNV15RwdcnEM+z7+W9K7FJU6UuAu2rDVsdRzcCIaLlIwrRwxmQgtVg6aXzc/neyvooX1qzekZjtz0AiwD/rh/GMLo70v+AGdBeuDkKXwu0ZvsXQuO/xbRxWqx6/WVFWLUud8dCx3K50TL/pX6JCG+6QbSEY+YfCZ+a/wi0FUhL+MqQM4frSAXcbCAkXba2dCHMv8PcDwczL8thgcoiWGt5SDU/9pc+XWj2oAa+LhhPjsKTpu7d42p1DnBvTbFUxy8/WDWXtNIH/ojClwIWm3Ig82CphaaRD68N6bNY6EssnURB6N1FyLSxW09K27ECrlW68glJLb4oF2r2QBlN99qzQtDC+OYbSPUiNaulexOrMEyX64OhytWO2eMGJ+0CK4FJ7Fj+lWmWyuXHXphaAXMEvhyuWyrgc9SfADHcyMO75vE8ra524Y6qOQ0uj5sA/qCG61nOXL6KEPALlpHtJYmWM5rl5h2aOi7taaqZ94eI8CMK5Xss34dsCBm9r6ieZrxbq941FlsXrFFbYRZ0D4sScJRvekEPATkCTC9NK2OBdpH32jVGmxGZgSjKlxOrMuZtUKAeP5YeEdESnsU0eEbzyjEKV6N8AumVypeUYwu+1kxo9E+rx9NKWU2Ry8sXf12z2dHyPUI3KTzP+yy9zyyaZ6SfLtfXzZ74HqLZykTzm9SMyaEZ6UdLl+1qv9Vvna+vMHRy6KrJhsk5S8VvoSH7zV3aSAH3eAGl67XPEO5krfCNhKqTlh3/9cLMxTLrgNFYsPTphr3lxhvowowu0OXYChkjRasde6FnWZbalK+nguVLlXkt9gn4hqawxwxFF5Ukw84lLI3gWX4HV3EeAUrxK/2zCVrT5T4ZqzyoYT42q81otidH3bu/DDF7m3rc/cXS0a5Ys3HgFsuoYK2Mp4i1dilST3OeVblrPuZQ974T21FXIhYJTWdfkhei5n/Vi464mz4vIUeUU8JnWM9+hjAqg7e3vKjliQhPvrLCyQiu/K9Mk/WkLwLHKPTM6ykWhW9UAZNxzg1tSyC7idJ4SFoNTwMQQwjzcdirku5d3OXMZl4W5V0UX+X384Dz+UrP5wXL7Sk1oKIwtBdiluFjOd4r61rp87d2/ozgp6GnI2nviwY/FL5Hx7JOLYQ/mL0x6rWoXUt2AmuNaEeUrQfcdNsMZmf8zUdhYxlORmFmLRRnqdmXD3l+WdSFs6f55CuimGq14KupX2LHUlVbbSMlTFj9WvCNhHyRCo5MyAqP61ZY+XXJh0OU5ww/ISyhOysMztFnI9VssRPI49Dgvk7imIHiHmV5LUl6gRMv/7HGe19dw+oYbU3JZ1VH0/nNAbdy0w2fjuRZRP1qdbJ5VZZRrVpZr1ORNvqMKW8wBryX9cHclC9o79sTDb6ams6OAT9M7owjoSvDxjI9cpzZvYqXR8Dmdefh58/z8LMGzStIR2FjS8ly0KJ8WU7261HWg28xDt+LckziXP5DL4toCW5ZhvcFtUOBY94naWU3r0Un5PtJIp1AHoF66HPWbsi0dtjxxPydLvOZy0T6mK01X2JxMzn7bSyPz+ImOJI+a/MxzHRW5n6Y140ekTrLhyZGovVD1sqMzAvoZ29kEpa0nQp3T/22+HDLz/d/Lsa3n/TGc3n6y33sAgLHcEFdlI7Kn9gVfJIXXCJdyaKLMa8ny5Y8DdISzhda+gzCV4LWC0Uj8PJuEciXW1hm6pZ83l95TKzuLF+8biIxKUu+l/z9LCZvcNDnq0UkeBq6mdL+PsqcrzR7yMf8+3xRgWqBVttwBrXBLZuuTgazJmJZFrmOtUL7LP5VOcnWW1vY+bYTAPe6+0BLkLyynawGstF61hiwVkW5E/bGc2X6/Ny+IHhLl6yLD5ywdTHCz+hv5AKs5ck0pOBI5Mt0IrhRxSyfYvAtx/wvgjJPt/6RqKelE9n90tIQhBf7RhdDNzqWCtY+b5RmfTf4X+Sf6DHj3gadHsVZftf1mc7L30wkfZ4m1tGz9f/qvtDq3BHSrylZcFv5XbXT1kBtb39lAI96w6xPtbFN9KXJKNxaMGdMmYD1yvZ/zDLdK5MJv3lrjVH4eWbogmuBlJ9HL9paOQvELJ3v8aypXxSuzcCXn1vdQaFhWTaytInAsexn2CwVzBvRGs7eTGk++Tnrx3SZ/3y0YRtpFmi1OtpcCqRkZbqmdtUlgcpckLBFoJopX20DHhW7A1sRwCPgOzLuD4pGYOzlZSZhRceAhWk7YGkTsOZN+3fk3mxmlI7AjNo3Zz8TOObn3oXWCmnKNA3EUv2y+nybSQTfiziOwFcqVQJpKJSM8rS6KK2URa+BRH5IBUtDNz3oc4p+ZrI8grN2oyWYxoc/lkvkLix9fuPIYYyjPRcIZQ32LK1rSQAAIABJREFU1k2AekPQOgbshauj0bsWEKfr8gq9JupKP1qn2hi0kxD0WrbyU5DW9OU8gGHZJL7I3OvEwmBeiBrV0yZszZQDmv1c/krVql28LWVlqSmZpoQqeTpXv0R2GFobT+XHHHpEPih599A/6Vu+HKSyNSXsjQc/09jNyLMyunGx3mfl/VbLeX+LGQAv48DnK54zcT/2fyvFtAlb0TkYyFRgi/X+z/kgbIOeaquZZFVzTes2HrzTeULCdgBg+UZpkBwVvu4c2rAmMGTUr9eG8yX3dsKSx/dz/OP3drC6d2OpErJhbDn+OzNDsbgKOHLRtvzIkClQYtbEKy0EjZSx7IoHRw2yGpyRD55mKWHeBlLEzzz5vnAVbIX0NeVqhZq1PPTXAb33nGAiHai8vDzWFa6MUAUiQ4sb5+Vv9ny+4mWIgevGonwmfc2yzdazsXo2rQTgrR+W/DbuhmaWCQs9TM6ALmaBNgJd6SO7XKmcZ8d/J0kO7WJNtLywygu+Fmouf7U0TQE/8qX69cxSkURLCJJy7kFXS9eUL0+zIGwp4gurI5/8NHshFnhlOS0Nhaa1Gyfthg3kRcaBI8uUIBiNdNROOcZha3wDvHTufDmt4S6tbDbvMNV2oIB7mzflPPBNaZ4MlfDXY6xECTHxkJQ203lRJ3FXzn1b6XFIP8ozBfI0pH7kuaeM+YU+o4BlGZEfVb/oXeIgjsK3nEfC0KSU56rbm3jlhZs94888RjcxixdA4q8F2ojSJZEn64K+nC46FOfpy+84qufNqJbp8thqY1YmMgacNW8uS039JvukHHtl92crAHjfb4BqI+/oeoR2FDudL/pifK2OA1o8O/OSSpdlPDNfgqZ0Sh6q6120IxdscPHOqF8iHWBIDWvwlQbuCcw8bTa2dezdQCBFL98bGIbWOinLRULTqHPoWNYReXI9cOQmFOVHbnJrrAbMwsG2Zd+sWu6/2uYNKuAVP70eX76WiQjeLOhZHfwjjO52hcpm/EWWaWjLLGbn/KptqRmkngica3XQxVu7KEtAP8rJ2b2e+tXGgC0FGhkD5qaFo5Eilm1KBS77gPrupS/C0FqHiPThAi/ULOtbSleeizy+6YtcDicnYlnL7bhFlg5pM6ctH94cDlUNv5zO/2ppqI527rWVrZcyC4L7pP5gAHt3Bft8U1azHi8/OQkr7jYO7poyZqjbEsjWeF7kgq7V1epZ/okWuz95ZoWjiTD4EAit8LKVznmmgdwKRWs3EtIWfXmEoRcbc3jDBlYjqLyndK0Xysd/xTBIZC3usy7p+6ZnQVoFZvnbLxOyel9us/60iandN+iwbGRENu/7DSpgzTYMdbcq5ZovcqJO7TiTnDgVWdKE0qNjx4sJWNwiapdoCc1o/eSFXJt8JWHnzRi2ZhrzblgTsKRpQLRmWaN2tb4RSEevE4WhnxYBo/U5os9MKyvbc/IyG3JoE7GkRTf2iJZx+9V7LDirdIcNs60p2sYzZSCA9zb2mxm472S9x0+cMt4a4Naxp3lX8OxMLV2WQcdEQnlYCrRGSXmgrbmQlyJXH1iaRdSnNG+pkTzn9aR/S2kjQGdfn9ygZNEBdI6iENGIhqeeg30owyGRzTOQxcrEx3Mj/qRpKyNCKyve/ZhusZ48yPkaBOD3uW3Y06K7wWh1IuUybQS2m2tRui0TS/j4WPTisrhmWBdMK89L1wypLMVPJvxMlJuEVY4tQCrdWuRrqlmDMDq2YCzzYfmHw5v1nmbvETUoe1GQTN7DNFhGZ0K/zpeNReZfaP5meafsG/jsQAzKkfw3bT1e3CeKMnBnIejM3UPkwQsDlO4bWJrcOv6baisYclvm+1CfWebi6YUm5UX7AtKcNso610hXLOhG1W8Gvmg8WJb1FKylgi3TQulmZ7RIh5WG5H3xpbWD2lXy5DyE7EzomrW71uM8m5Vxj92wotayE9YHs50BeOfWvF1kl16Y/rwlSNaPtTVE3RqWW8yA1szqpgfdqBnl4d7G5CtAzVpgJ/14vrywdm3bZtnoe5+PsMbrWSCubbfCeg4DfTx7f5HVA8CeWbP03sAdXu2dc+YOvNaafUYunJnxwgi8A/C1QJa51kdnQVttWn3R1hdbb4mmrj0r0YJZ1CDy8fPohBfZaA1nP8xaiiQtsy64powVmnYVtTY0NXoi1mFhOwA8ykZNYNjBj6D2UW1Vpl2wMxNteJkateQVT17otXW60eaj8NXys8OjmTJam0SkrwdG59HGIj4b7/NiW7fmGslA1wpN78Z2cF1ax/q+UBfA0zT90DRN/9M0TT8xTdOfmqbpt3XtwWFdTJ3tuEMzVfmI0GCv0CZadtTp3qK2TE2Y20r3dsTiJse3vXuiZ/oaX9XonIHO4efsZhyt1gTkdwVN/mL2tgJHt4gCvhDRb7/dbn8jEf0aIvoXp2n6m8Z2aw0b9O3byZe6ejaksJoLR+aisEoYeoD/FvBGzAJjD0uN4SbKWpuMmNb6de31efA1wGhP8rdorTfnW1zTdnIdHW0ugG+325+73W5//HH8l4joJ4joB0d3bPd3NGvvkdrxC2k/9oznza9qkT1oh4TIooolshSFp5e/V5AW6RYo6y3VscpHIJuZyWy1lWmnxbQ2zaVIxdCQQ/brlR1zNmz5DG37XRqlertbzXXnTawR3j/FU2PA0zT9UiL6W4noD4O870zT9EenafqjRD/Xp3dP633/T9R8iRkhIaJhs0a70gkeL8vZX2BU1/JXbdE10bJp7+JR/p5AWqRboKy8Vfz0cFn+euXdNhvq1rbTYlofp8hnegJlsl8v7rvxq3kRDrzfhyy/W6u57vS4pg23TRsPWRjA0zR9HxH9fiL6sdvttiDs7Xb77u12++Hb7fbDRL+gZx/flu3kM79e6378FkBHXFCagb3/m9ywWXu19YAt9zHyaXJVH8leWMX6cT3rl8c3A1ciokvzncdhgywE4GmaPtEdvv/p7Xb7L8d2qdjxqWfsc+uPTLGaC40HVVN1jwBqy7ILRUF9Os//1rpvLRMFs1auBexSjRelj/KH2ypP2/HtbYF56w4Ylu5by+DMdhaZBT0R0Y8T0U/cbrffNb5L78RGfbl3+KPRgNpF3daOQaGmow+xWOHiLcGX2nnU8VVjxaemwi2AR0PrJWR/RqFl7dyzyPp8a2gi0J4Xao7YMnwd/23w9mW9IUM+NbbD69JbsIgC/ruJ6J8hor9/mqY/8fj3o4P7tS9rHQep8b2C1V5YsvW08umLR+OFFNatHYvkSYmXIVWiZXKzVaueBUgL9si0tyTT90U/shED/vmcQJpVp9FuzM/lZI/7lu9wy02oLMNhbSlqqx4R0fWivCEHLBus75vnfmVvt9sfove4B1jULnR/l8r73vIjL756GfB3uZxme0Ff6TSbmXylszo7k5dd1ju5sz4t36H2z6fX0o8z6bNWT6T/Ds4ir5S16iTbmEDfCpguNIdUpMlP9Aqa8WPLysfO/Zd2vxHnqB7PjwCem6Z6NVNvUKKT5twGAv4tpb1iuHo3irXGNgf3ivtZr2Q72wkr9ZCzx99eu9sGbfMvoW/XQePBqT4krmqhi1JmqURNeNrLQzOekRoG52h8FJW3mJCBJFK90qfnp+XhnaG61uzkyIz06HBCxpg/ORchunKg5EdWCGRmVb8pcFcs6fuoNgjA7+9ORbXa6fjZ/QbNsv6VyBqDWo4r4TCbVdYLwXljYLzNxf2DpWQiKsdTSDzMKesrVCyqDi1FipiEZGbsVU50ahnWlmCP3hCUOvIGg7/loSVIsgHvBsj7LL1hCg38DTCP/gYsy4wRW78ddfXDxQli8m1aD3A22jcUZeBABby3mWi1z5wBNnINnLW3bdKfBVKvrGfa2FRkzMoa45ot/bCuW5mJNjxNXsgRZSxFDfr06TwPs1rvJArfetBDcETjusivTOfg11S1dTOQDT8/+3Jevk+zTkjjQJVwPoFy0qd19xAMTV/POhSvdA4oYfziaidV1czZcFdH9FgD/OaBbT3mZKztLATdYhsCv3VTjuhDApzyNaFnTxnPLxYY6JqPkq4rA/2CckMwjISaTxSDqmeBi7aECYebpQ5lHe+Yn2sw5V3TVDFaIqQtG4qoX81mdU/KxCu0+Un2s/LqRcLcII//jCRotTDyazKWD92oZWCcDlFvtIXr24J11XO/UqUHA9jrzJv6NPrbwFnUHMbe2FPWes745CZnnM4sAmJPFWvlkTIu9ZV2Sxj10zkXhpZQBq5ncDzTEpayOW3Ml+dF4fvJOJY3EegG45Moz+15w4Lg591wRWHr3TgFlbHchCM6M1la5LfSMgQEfV9OdEE349kbfVm3Ns3zm6qzZ2bkgf2OFPAA67HdZEsYOjMuE1S/kbt5eczLRseq5PKMK51ndZdKm51HlIx14bbqRMKZ3jiiEYbmoNLgNKtHc7BqkEUQ1sLLCLokyntA9tR4Rs+V8DORswUlCg2jmyNPnsvPyPpuCJ8tS5C4Gka/JZkeUbHexCzL36sDzqeVeTpUS/hZttP9qVSRJ3Hva2h0BQBv+RyXnTbTAmunzPVynt39Ru7WFwAMKGYNqDIdt2eMgT1k0qXAUrso87+yjFVH86OlaRO1zneYZNYAE+khaQS8Us4bs9VCzjxPawuBHR0j6Fo3Gp9oGX6e+OclPw/rJgulnUGad+PEzQA0vwHUfhs1ytZK92AcuTle+NRuyiM39TXzTqLjyXsWsab1X3HzDhVwz+nFlU2v9QVLtGMpUL1OVO3iyz+aHZoJvc0sAjrtAhyVaxE1DXzJMHRkGdCsfqCMzPfGfxGUvVC0JyxLWU3NWwbDz9Z57WfmfXZRZSzM++7Xbr/afXjIg26N9Qg1d7WOE2o3tncI4GINM9t6f7lGP+XoscRAznjUliREJlZp51qaTNdC1s8uizv6eQj6/rU0J2JFL7wy/OyFrjNh6PM8pIpmQyNgIRVshaI1JczrWUBE4NXgi2Y9a9CNqF8efl5UlmpWi0JEP2tNYVufpfi8y0/oej65N5Za+v37vAQzT28d7gmDOzMxc5R22Q2s92crAbhmgw2i+Sc36o3svGbZUsGRMAyqL9MCExesSVhaegTM0ZnQqC2siOf+yrjbVbsYE+njwLJcFs5am1oYuiQz2DzPKTYT2oOwBkikctEYsFae9w/5tiaIpceAT48bFQTa7A2QB1mZFlXGzPgELD7Oah0j82dGz3+j2s1uNmp0uZzwNpRoTknkehOxHnNgVgV1z8bq2bQDBbzmu94R4jWz/ay81rtLZaJFZKLHQoEqP3JtiZKXLlWAbKdczJ7ncmA1Ejb0LsiemtKUMYIFgsnDvL2ONRhrECblXIMrerkI0sgfV9u8r7Lf2mtx1a92IxO9AYpCVr5o+R1RbtyiE7CsSE92bDi6sQ26OV4o5NpNOLIWVcq99kFoK7hrWxHAIxRsq8/GD7EmZJNVwZE7VWZX5e5XrunVLhhaaFgLmWnjyi//rwsKB27kQnM5zS+KswsnumCXMhpc0QXYKy/9gwt4mYzFoaOpYO5mAa5HHoIrUqy8rDWOrAGbCMO4/JXw1fqO4Pssy9Wv7Jx8Mdpn4d1hyPLcP/oM+bm4GbieXzeAy5n8OJyMZvyj3w1Pt6JNWnoI7izypW7CkbymmOleXo2l/aFIadRJNOI6BvgrK+BRYeT+s9NmrtE/mV/TtZayqJ4cA774Y0laenR50Ssdg1a2j28C5heyy+k0X4dpXWgllEmUsy78SDFZ6kvrx3k+I9qDMFKR6HiLEDS6GZDHkRA0VL/8PUWAJZq/59rNl7yRQtELSxkDWN/OtNiAg2h5o+oNzWgRHm7o5lcLaVtLmub9UD6RcjNeG7Gzlh2ha9+wiah73d64jS87CEETvZdwQthWmMwAF+ILi48Naz96HbTZtlEbV3FRNFUMT7MULBl5llqy4M6sNQTNjz+Jv0gh8+5p3UaQ9vzxvsp+o9eCQs8L9Ys6J9PkMbrpQXWsMkThGfJy/JebF2ZGpoWLpU8vXVu6BNMv574PZOl5ef5gl3rPdgJgyzJz4DPhh85qvFYFR+4gUZlFGAmP88hxIak6y3E2HR3P2gXhueV42fwCMlMPbBz4hpRoMaScZDqvZ4U5Sxovp7UNFFkkFM27h0LQSIGSSEeKNaqMCfiQ/sk4ln0PWUT9Wu8xgTre0IEGcEUZX06viX+XEx4uKef3v8swM/qua+mRMeHs0qXQul9rEpanXr1Q9ah5LmoD2Wt47whsu7+dAnjtqeOdwhstX+BOExs+X07PH6KcCV3749cmnXgXGQxv3Id5CPtxfP7WfFcsBEmiOQyRYrLUlKaevboapIlCs6KLedDjUCZagtgCciQELctkQ9Bh9SvfLy1ddtb6TMjI02Ar653m8wz4jZ8MJ0fCzGiIRZq3RCmajmwW+Wp5EEMkP1pWExDasF6VRXbBqrVxsn0DALfAtdcdT+ANrf1SRMZOvO5Elxst7mKxJtEU62UGzVz6vBs+aGeznNkFRZ0VzWahXtCFVIOtVEhWqFlTZujCj+qAevJRezIkrUHsLNI5qC0QI8CifxqcZRkS5dGx9GfCV0YsinmRiOhnROJvRBkDKN8nX90vh3KylK9m52BGN7vFT3ZM2UqftcMiXdfL6TUBq0TGojf/0bHfqHVnV4QBpdEML7Id7SMS8RV7E7vQ2O509h9xh8rwNJmv+Szp/C+JNGHXy5nO5ytdLyc6na50oROd6PrKpxOd6PL4m08vF4KTSC8XmFLmTNdn3ZefV3+ujxL3Opd5+vmRfvlMtzPRVG5ITuw9KBfVK3hvruwv0fw3dmV/z+DY+2zQZ/GwiZSvxhUlvuwbVo8f9zJtprQ10Use87QwfOWxBCaJPFmOQB3Nf8Sv8DMXjThShG4gSzovO4f1HMyvdD10jdK1tmQ7zSFora4GXw/Wux3v9Tq2XgR2IwB/Q/PLwZq+OrYNLr6wTA2Es/cLz3on+kxEp/OVLpcTnc4SonP4nRkVoulLOMdAS1TAvIR2KXNi/i+nE50uV7qeic4SphyqCIQc1Cdavuc8j7+HRPi9l+dXpdzDVAjzvj8sAtsL4W+tlo5MtmGBt5zL4zB8eaNa1ACF8PmxBVj5otBf6YvY8aNcCT/fhzuWYeaiMqMqt5xrcLWWLmnpWIELhfwcdmLp3kMYpNWAsyd8w3V6kX37XbJ2pIAt8yimlan1rRTjlmm6tvtI6SJ4P/9Oz7zr5USn8wOG1xOdThyMc4VbftSR9Bcsy4XgGgZtUbtIHfMLzyy9PJzh8pk+IXBKpcvVLPpr5aELPH+f+bkzvEaEIfzN5Q4qTQ17QM0AF5k221k7R8ch5csdIvgiRYvKa2URnLVhB+SXpZe1v89d2AT8LKWqLVeSflAZXSEv09H4rxzCKfb5cnrJegTEjPrVzlGdHlyEPqw5OrzCCKCiDvVrZ6eTsIj6vpkNvqJjwZExkpoveqM974wJP6XIW6PL071ZmdbFR5vIhfo2S2djwXBvaCJd5WTywIX5afyiTqTDhOcLQztDaROeJPhkWe5ejgUji8yC9uDLfcnNNqBJGEtDihYpVk/hZvKAfxm1Rd/lebr+fY6CWWsPlUFjzYsy13KTGrgr9Cxyjaq9PtXOq3nHtqEC7hmGRn4jsrNzH6yxWUvBat3T6lrKuISdztfFOHAxTX1y5SvT+RgyV8koPH3vGgslM9XMj5HilmFoors6uZ4/z8PQ8gJrhaat950rUZ7ufXWsPKaQiwq+XJfQulzxt08LSdeqX+krqoJlnlS+RMlxX3ku1SrKJ9IVLpF/o8XriRd9O9Nz8tV97HcJTQumaGcrC8xy6VJkTFlLX9wggJUPiwlY3hhwZHxXM6scAm8axrJwVFSVetYS1e3uCjYEMLJIOHgUuG90v1x2tMjLyY4BmyHoV95nMf5brGYcmMNSghaFp+d+5qFqPulKg/bLN0s/f4sul890psdkLPn6UWga/eXHMpSMwtSyDs9HEAdmjgcH6hf7RPlYDvqlSEXL07SQMxHBPZ6r4IsiD0gFS3hKNRv5x+sJIPMnHxXjY74aTGVYOgLmSDg5O17Mbw5mdjnRYgesDFyjoWfNTyZUfWH/qm3NaGnfMPfOADzCNGBbUtROdvO8cpk0IhvIXvMPEF+vJ6ITV6b+TGdtRvNcDWM1fc+7PutzX1IRL9NfKpiI9MlYmgqW75l875BqLuWIlmDmdYiV0coCK7d26kcYhHDWUHtRBczBS7Qc8yWqhC8CpVSr2rGV5pVnUOaTr4r6tWBaYKdBVoKZCKtWXJ//ZnS1jdKJlAlYGfMAa+VpZYaEmyP7M0vF22LjlfEgAN/o9QZYatVTszzfopb0M0olg+aLWe+kFwb1QtGpEPRDb5XQ8/ny+KuHjFuWDhWf93KvTkvQzn3NJ2ZxFfxSx/fJWMVvWZIUUsHlvZGAvijnF8JA56Yp4iQ4I2pYKl3+kVtpSCHXKuASbiZiavcsQs6lYg18I1DWYEriWFO6Mo3llZ2v+IMXMHTtvc01YHMVzeHN/Zbypb4bZkZtifHf2QMYrDCz9lfWRedR5dzFRu0BPWoGdPEb6/cKCjgLw0qpl6pbCWgLuJ5a9SAszyOhaSMETZcTyXFgIqLTCS//wWO2d2fa0qFXngbt5brhe17xu/TF275wyCMVTOCvVME8LTreK6GKVC5aWxy0AuHziejy8HM+E124rw6KGP0SvLXAMtxc+kakLDXqCV8Ues7CWWuf9ZmrXyILuMvvLAerDFXbYWlf5c7Lz8FshrTl+O/lTHD8l0hPs8r2gq+nikO/Ja1QLVAzP+D+4ekdz4LubSvMhC5l+d9oeXQeaRfe5d5/fOVuODI70r5g4DEpfoxmNC/HsPT6qO35hez8WpLEX06F+nnmyWNUDuVznxkFyHzzHbOkwiSi2faVRHOlemZpcga0YA0sh3yU4/NpCd9P5wr4ctPK8U7LdOSDlyVRTn7WKI19rmjfZyIMx8hTiqK/m9olTZotws+eAkZWC9XotW5IODpj0WcErLlL493QTfIAsxRnJgwdrVMsMtjKfYGJWK2CnAJdQKFRmX8Wf5HfgKExWzm7OTaj+d64NaOZq1srjC2VhhoCP5VQ+n1G9POTQhdtmYaUMXoPtXIov9EmuoPt5l0XHm1ak7D410KmS/sEjlG4uZzD8d7iXLvpKPlfOuUy6drNTlT9nl+PHUQbb8zHav3w8avscrZ0VuUu83wwywetLLafRBYNPVvDq9FyGngvRh40WTCqRFuo31MZ67YSgInaIDzKWuha0YwVYpZpWtfCIegzyXFgIpptSxkB7b0Ze+kQ8kUkZjFTCTPrm3rIMen5mPIjTY4F3wvqNyfWNpS8HDnlUH4nk+PCn873DTvguQHjyC9oppbZ9dsFL5Gp6BfgI4rBVwOnpZq9crwPLO0FX3puvPEKJesTsfgxgiyaZCXTZZ0eS5qKwQiXBVov1Bwpi0zzYVkaxtL4ryDqpAaYY8aMVwQwUZty1dJLWgTwVpkkjOVnjRQrcp+FMCn1ZDoCkBgHJqLnrlhR0BLNoalBu9QvhsaLiebjv/dzfTLX6dk+3p6SiF67Y/GLM38vT0r6q0Nz08oV+zYRfQXSI8Y/H3E+PY4tEMu0M9FzDDnUvLhOS+iWNBW85RidW9C04GtB2gOsBn6ZR+wvzcd+ieYwjKhfOQ4sfcn6xfezfQDmYpklTVf05LPZVpTgbxao2bJdjU9kisyAbrFtoLwygHuat7woE9pOKHArrEwsz+I5gimJNAT0SGj66eO+LWVZD1zukk9nfRtIBFoiGRa+wguGVef+0pZriuUNgLU95b2NuQqehaK9LSf5+2ZB1gtBW+maFThcRBqAJwIxEYaxlY7KcZPQJQqCt/Rdwq4Gvt6x5Uemke0non6JEACdCVCEJm8tjz0wyzqyHSv8vBj/RaDVIKqV80LQvJ2s2k1ZhP69J2BlQ971tgGANdiNClF3CjNr4K2tkw1BZyBc2hRhaCJaPB1Jm518d61fALJwrlXB/Ph5YeKPKkTLkkgcZyAr1fGZdMVrAZorcxQtcS5EBcRojFhCdzGDWjYH+imh+2yz9E9GDhCMJZB7wffboG4EzkbaC77Ldb+ZpUNS/aKx3yhkNTCjdqDfy2m5/IiP/1oKWMsnpwzyQ0reRfzrbgiMMk2OB/eC6Zt+HGEEqJ3AmfbdeSIWr6/BU2tLAhxBVvP/THuFoYtdzyei0wt83tIhbalSyUNjZvP6Mn2ucC048+OyLOlK5ycMrufPRMQUHn8/UVp5b14Nzk1+Pqi+rAeG4FRDyrekKWAu30YOY76EqeQR4ZC1NBW6pS/8HEG2pMu0LHwlaLUQs0zPKOPzfMvJMvFqCdU4ZIkwTMtxMVT/nr4EM68j20Hql8N3EX72FHAkXStjgTRTrhrK2S0oW32OfWLSG1yGFHljM29+9FvTaJkfQuRHgcqVtKfP+XKkiMWWGi3zssdyuVJsGRTIe4znzR7UII95WkkvLhBYJHzkhZ1oCV2tHQ9UvIz0c1rWnc4vaJYlQ+Uf0WvJUPmHyhUfk+xzsA+L90K+pqjytd4j9J7L90z2h2jZJ6LZlpPoiUdEL/VajnmZcszB/ErH9bXfkbXDlQVmzWD4uVg0wmqVjZapvXSGQByB4MBr98L6QRnd269kLcuLZPki91onYiVM+7ylikHvcEbBEjifhZlFmwufZyK2EQd/OENkB6u7S/xowWJySdLL17y+TM8ug7q/rIAK5u8RP7ZC0ShfszIRi1/4uZXPA/mT349IXkln6nkCZd0nFP3/7Z1NqC1detf/9e7d770BCUHMIKZDIihiCNgBCYFMJGTQJiFOFeJI6IlCRCXoSBw4cCKZZNKoKESU4AdIQCRgmiBotKMxJLRCEMUYoRUJGqTv+57zloPaa++nnno+11r1cc6pP1xO1Vpx+L3lAAAgAElEQVTP+tj7nFu//X/Wqtqlr6KLUK7BuBzzcg7IKHzpuRbjuWNeJqSupS9ckNxvkbQOq7lfbe1Xuv+dp5h5f8vx52CW0s9Fi/SzlWqOpqAtE0BjVksvR7+CsFaZTyfraUcAZ1UDzhU2YllpyVJvgVOr0+IBv70L5wtwv33n9h/8loaWb0Oag3F57+/jP7/2HGgtxQw8Ut92uvlRNt/odZm3ucC+N5geS+81Fd/hXKAn6T1rL0AypBLPP0RpfdG4MrfoeJJrpz9LDC/XwMvPuTP14KuloKVyLdaAvXTPr7apKrMO6639Tn1fFudFkpsOg/n5Mnv4hph+LpJS0Va5FufBmNdpfaRhHbHzkht9YnW1UI2sMbdpZwB70JMIVSPPIfNxlHVgVE5HgiIf2gM1by/9hNAH2Q1dxF2wBFruWul6MYfzVP9wwfzcc9h0E5b2cI6p7RLEAPR7g+8v+N7RdPwNVg4hFlhCtkUFDk/KOR1fGpPCmMdobWi9Vma5YAm0dO40hoOQtnnH2r8T2mnH79iYViwr03Y9c/drQ1Zzu/LjKCXIe+5XcuMSmPnmqwd8Wfo564C9Oi3GquvmjD8NHFtlXv26a7yeVgRwLTy32qCl9dGhb6+LaAoaTlzYAV+B6/PslqTyDUnlnmBAd61zaOouGJjDlJ5f8HTr48LK5Z3TUsqaflED1X1d75aKvoLsip4akGDyXrXc0xtV+T1YwNQ+SGllwNIdR/9kJeDSYw5mCbTauQfHKHB5uQRZrY/bTz/1vExFL4/lh2PwOFpeju0Utdw3ILvpUg5g8ehJMf38aOQ7Xc/hWu1rU9Dd09e9PiV7/fRfi14RwEDMNmZcsLVuzF2uRcEO68Ce45AAKvXB66S3zHPH/CLNy6/AYzf0/BuS9LXc+fcBTz/nLvgxhYczlcBd+pYA/8ziJcCXcaU16/s5SUUDwPU9MBS48t9Vr8dNXiEDnLpbz+GWc8nhWulpOvfo/3kJuPxYSkF76WepLAPf97e2HL7S7UjcPdMYAl8p9Uz/fjhktdQzUKCog1lqr23KKudT2yW0yzHtm6afAcxvPdK++9dywBJUJSBacZaaeRhd/13zCxiyfde96JUBXNQrlVzT50oP5MhIgySv06DLzzXYan3e0tBF9BuSLpfY9wFP3S4f0jG1mz9AQ9vEVcTT3HQT1lRfuQZ9v7/mM/l7g6l7lN63UnaBLB7PH/DRImtcr110DjyOA5aWcajSMsn18vOM8/XG4POhfWB+LD1wA5hvrvJSvcv08HXRB48r4tmZzLjlnPbNyxdPviqKOF2pXKrLlPF6zymHOeVNMNIu07aH+81rIwADOUe6Bgg7pZaLsl3Vulh+ztuVei89XVJWwCINXf6vL8E4Txt7KWpg6XSXrth2waWPKXbpgvmxtCsawHw9+NFw7lqzKWhp41UNNGl7K+WsxXK3DKUNWEyRl4bmQJXKaLkG1Ah8i6sF7I1WXhr69tN+4IbseLU1YWDuQjXHrLlfCeDerupyzsv55qupg5v7lb56MJJ+1uoiKWhpPKufKmnQyzyAg5/3cNBc9S90QwAD9RDMtos+VcuiorARK6pI5t2K8UALck7jpdTmvfz2yH/lCxqA4oBlmM4dKE9dy06X1j3Ol31E1pOl9bB5/x/j46mzqVxbD+bvbe3/AArB0g9PJUOo11LJVhoakFPRIOfRDwLaa5fcLi+3XCoHJLAE6HtW7gH3vVBupKk/fQfxgRscrD487c1agLTZSt6gxevo+TLNvYS2++SrKSjmgDW48j6ktrzcArOnKjhvuVnKg3lR8yeMrQEM6DCNut7IOrA3ltdnoF3EcUhApPVSP1pbaVqSewKLXVywL6BPxlq6YC1tvHTBPI6nzfi3IEmbsPixDtqre0xvTcJ74ONvTP9x0vfHRso9FWjwC58GYsnhepCm8/OuBREXLEGX/9TAW86jrteLi8CX/ORPu6K7nj/gYwZj+9aiDJi1uOlXYvcPyHAWy7Vbj6R7fyMOuMa9RsCZgasYx9d/tU8M1i1G26zf9tAOAI6q525oCdA1z6RuEDfbgDx16SW1OODZ8fyWJOqCASy+JeleDvrFCA8HOw39SDXzzVEcoFoKOwba5YcBfjxz25f5pqzFb5S7VG3jVSnPpJr570Iq5yDmMZYrlqBb44DpuQXdUs/BW44liAJ94au56ety09WHy5TT9kD6Ae9EOE5t48975sAtY2sOl/av9Tkr1zZfFUVcbdQBa2URiFtttX5E1QJRs/CZ24+2c7/AbgCuccFR2Ea08v3AkRgepwGal1sOGAj0cTtgLhiYb6aamkr39C4d7FR3Yec8Ze2noqfyCGgfY38A8A6f3F4yuwVK+takVmkQ5Y6X6qK0k/qgztdyx6UthPG0efM5SXUaZKUyC7Zg5xp835E+JIdL14cpfInzfboAn7xfOl+eQo6s+3oOV3OwUh+0nh4v2xrp7Nvar/i1g0+D7GQt0HqxWhuprRXXrMj9vxZgJfWabL8XfWAH3Ko1vns4KQuK0lASYDUIa2CWxpw5svnXFAKYrQXrtxTJD+Ao57SunNN2VNrTs3jKWj623Pl8HPFbk+4FkF2jVp6NiarnTmpLfL4SdOkxd7xSmeV0wc41J8vnJjlprW/gvulqOn64R+DhQgHZgZYYno3hQORxVh+eMy7ttGUbLQM0PXiDvGgqD7b02IOn5VJ78mu/rC+TNZH11513BHAEcJE0cemn9zqwI+v3JgGWlnspaK8PD8L8fOGmruDPhwZwXwvWbhuajrnTlV1qEU9LT9O5iHVaKvoTvMPkdfVNXVJ/dGf0bFPWYkYVmn2oMeqpYwU7j7hfXl7aQTjPzF069hxv+Sm53NLec720X+k+30g6+h2/3egjfHj3WOf9MG3Hm51TqGqp5+iasLW2S8dtccZ05zN1v7MHb2hOlqd7tRQylHip3HLRXmxY2fVfSS3Q3P5LH3Z2wBIcrVuSWlyptQ7M+6VjNuyG9qSliWkdSAxIHHe2HAhS3eznZXZLUhF3wYDsZq26xxSWqWcL5loqmtblPgCQndFkU1YIwldMtyhdyHFUEpx5GloCsQZdWg7WDsJY2pz4fKQ6ywFrQLYcsFbvuWMpHX075vC1djxb6eWa1PPyeAlRWqY54wjgAZDHThL3y7920Eo9S1COpJ8zKWirD+mfKR5Qm14u52veftSulQBsPcmklyLrxdHbkaS2hrwQ6Y9McjC8Pw5QbUzNAUM5X4B5fktSEd0RbbtgfU13PuV5qpjugp7a6OvIGmjpmi8/53Wzfm4Qvjw9A98Q7hGWfp/0vt9yzN9LClYOWQhl9Ly0t9Z9aT0g/86z6fCMA74KdZLj5eeSS+4B3/dL+H64PBwtd7uRHcvFFQPyuqx1a1Lknl/JGdNzsY7c9wso7rfIcr+AfD2KuNcIrLuJMiOyQYoDtkYWlK0vefD0KaIMXNEB0xeQ/cKF2p3IHlQzT8XS5iZ0zSU1kS6cYGWaA9agKkHdcsGLPh4uGJDXgou024amrmynO7W5Ltpmxqit+4CPH1C+DffJeywhnN3tzFPLvK6899rvQ3LAHoyldkDsupB1wBHwlnIO4lrXK9WRx1RKzpfCVwLsJ7d0czkGINZx18r7BGA62KltvTMu6uJ+tbJaByyZS6tv7+8xxLEa2ke+/ehIj69cFcBUrd965KWhA6BU4yPrzJ2/HUnrx3LItFyCrwfnoAsGcP+qwiK68YlvgpqmYzvdqfyyiLXWgK0xrLoPeIcLnphLDkKY6gr7O3+BOTQ0B0xBLMVY6WZeDuGYvKaQ+N+WBF8NulIZd8sWnGvgeyu31nw5UDNrtpk67dahFmcsbfqSvnJQdL+W27UgqzldLVZrx+ulMaJQFjvz0scecGthm21X78I3AjCwzv21PW9bsq5yybcp+uFN61aCabbchC89vtzulL0VESBfLlNguTBEnS5fD5aepkX7pW2lY6+ObtIq4uno2VgChHElX95QZH3nL39fqaIOOAJdWs77oG+D9Tcn/Z3RMskN8w8evcAr1b9jfbEvXKDw/eT952aPmLTg+wk+nh23g9laO65zxvT43u628YruzRDdL5XmSHm9B1Srv1ap/dSmnzMDW314AF3DUU/aEMBADpi9gF2zDlyprCv23K8EU16uxWufH2ZjPlzwvZrenkQachdc9Hwvm8OatpPWh/ktR7St90hL657jRxn/xiaWL75V3XdHb3U7UK0k15xpy6VBl9bzcskFS4C1nK1UDyFecb4ABNjx9LH+wIsSJ4FQq5McbBEfT5qf1hc/BnB/6AYguN8pcH7sgVKDnuZkI7D1AF8Fa8nG8+OjqX0z1w6XnB7ru5IrLWU168DaNyI5c+V/G7XvpuZmtfEkp6uVS07NccHPT+TqflkCMXq7kRbL09hWKlrqh6/zTrud55uvuCuWXDJuac6Pv/Epnq/A5dL4cayAg0+PumHJ+ba4XwrlyPy08wsrk6BbfmpO2AKx5YqVutia79Ltlnp6u5HkWqlr/gD56VmWu+Wg5h8ItPuBdSd8gzx/5rP0pQuWk80AkgMcRl0W8N3ZGbn9qHVQPobWX5+d1Dt95o/uQI7AuhboFsS14+DtSJm/Ac+pWrHaxVgq81wwsLgtqeyKpg+0kG43ugiOVYudynV3C8iwnD/96h3esXoJ9BKEn2+XwdmacNkdXb5L+ErS0RSEXLSOf9CR0tCai9WgK2Uzyjz4RiyQGD5HLsn90mMNxi3gBTsPwveT95PrldZ8Nfg+7gN+gNG691eD5nINOJZ6foy9bFvqxHGeL7ONV7O9Gdp9v15KOQpIr99apZyx9sQrK9YDpGf/a0Da7zamnQAM1IEzek8wv79Xu983Mo+KB3Rk3lXNqUYdMC/nsVLfvOwKSA/neC5fWXjrWEsnLx1pKZffCO8xllJbDt2n24XLugUJUJwv5I1Zk24Qfn/7FiXtfdMuKBy6vIw7YM35cvBrtyKV8yLNCUu/iogDlqAcBbHmeoHlQzhIf/xbjbQ1XwAifDn4JPhG1oR5P/4tTXbquZyLaXLtoRuRnc/ZsqgD5mVSP9q/sEZhEH7csv5bFIFm1P32044A1lTjgrW2mjJp6MCQVNbwEiij/dJ23AFDOOfxvJy74OsI7eEcAICLDFqeTp7XyeXS7UfPuMzSyFJb2u4TvLt9IFg+F3rqC7NYDcJXPONjAM+XKy6Xsns6+NQsyQHzD0meA7bKFr8nEgfoLtiT54AtJ6yVWeAFUq7Xgq98n68O3wg0I+nrTFo6k7a+g5lsvBK/79dyqGBlPBZKucU9CeAZpdtEHS8fhNZJwNbaRMaMzqNeOwN4i41WlrhDpmXacSANrf3haUDU4iL9Si5YAzTvZxbzSEXz25IALO4N1m83kp/zTKWt+drrvLKj5s74AwBpE1eBMHXND32MsrHsw7vJ/V+envH89BkuF0wpaQ5aaa0XpD7qgK24UgYs/xRpHS2T5LnfiBOOOGDucjloJRgH1nsB3OCqg02D7/xe36meAzd6S5EEagmo2gM3eJqapp6LxNuOpmDf0Wpu1ivPgjbjdKtcsZeKXuPLFyLut/8TtA7ogAHbBa+Rhj6YLNBKsfxCfFXKeV8StDH/usIi+oQsYHm7Ed3JLKWqH8CWn7DFH3PpbcR6gD7zweD2AUMk5yx4+vH02f1B/8GPXnJf3o7lSDbDirfaaH83EnRpeQa85WflWrDmfIH5/oD8ZigGOgWKdBzrnt3HPORxeL9W6vk+Lkk9m7cdlZ+Wo/XKIcRokiBvOe8q0FpPi5I6ytrwouM8epJrJRLRvL43RO/7g6OA7bgbugxbFHWwEjij/UuOiNelyx8umH5d4WwalyUYAX29F7DhJ8Hwk/IM58UY83TzFc+L2JKalh5ZWS590iYuYdLLzVkQHl/JXe0Tae/9DqiT5nXWRqzS/+ONefTvvSY+f+lYgi59TbzOAm+JrUw5081WGnytdHEkLS255PpzeR7Tr2kJdgCLjVfiFy5kHK1WbvVVDVFBqfbSJweutdPPvdwvbbf7oyiLIiCWAJd1qrU7prlD5mPzeShPxeJTjbgRz/XQMs3l8r5r4DvT4+sKueiGrPk05V3QRdbjJ7X1Y6k/DZwasLWNWaWvC55wXwPGFc+YP0ELl+kDh5qS1n7HGoglsHrQLeUgdUXRz7jSHMlrXJRz91vKOFB5eQS8t/IW+GrApPUAFvAtu6Ol9rU7nrNwBogjZ0+8AiBvvCo/OWAlaHqwlfrSFIG11zYE9E9J0B7pZ0/rfUvSBgAu8oCa2Y2cvSeYx2ThXsZErF1N914/2u/Xqvfq1PLbxYAW00dWMjZqEJXqOcAz9w9bY1kQniArg3gSWQMGyKXz9h3HsyGZG6aQoZC1QMw/WD0JPymMAflvQXLBkqS3LOJ+pZ9S+jmxCctON8ubrYAlTCVgWrcaae21dV2tLyk+sgOaul9ATj0/83t+uZuVYCuVU2ltpHrPJUuqdtB89zPvtMi6nahmZ7OnLEjbQL8hgIH6j+ySvBRypg96dZNAHyCq5EatmIg0lxsdV3PBVt0TcN+QdX3GZ7dbkfiu6MuFruna7w2t52At4HzGdbGZKrOj2epr0se3y+E0YzcFzUWm/XzFww0/K2lpfk6hyR2w5nzp76qHC+YxWkpac8KeA6bgZecFvEB7yrkXfC0XXet0p1/RA7bmvcLR1LPmcIH531oEohlQhmHaosijJ7mkTxG0jfZJROq7Zve1No+8NgawJ+/biKIP8MiModVxF83HMrbkeG41Ksm4W/1a9dx5eXVXTFdM4csagOm2HSCwoQl8I420u/mRmpuXz79zuMja0fzJDbTeOi9NQT+2hj3OpxJ6+bzg+fKMy+UJ1+fyGj67P0FrlpaWHDB9f6U4DXYcxJYLjkpzvxEn7K0Dl3ICXkB2vRxcUfiWFDLgO+Oa25asFLf3QYCmmgFl45jwwA3znl8Lwlodj5Nkud8IvKN9L8TXRzlANfX4koReQO3zyWQnAGeAmYmV2tWkoSu/ISkypYwiDtjqtyk9PX9CFlV5Qpb0tYWSqOu1djdLD9+YdlfLj5qcQOuv82r11WJrwzQtjQLiW5zodIE5eLm7pR+IJOcr/elaf1vS340EW15+Feq89DMBsZRuBtDN9bbAWd+cVeeMedn0KxHqvAduSN/1W35KYPTqeH3U/WZU1TcPsr75SOqQg7QGrLXA7/fm7eiANQBm0shrpaElBT4IULfTQ1kHnJnPwu1CgPLyMZUAu094dvh8a365HwMPByulq8vtR8WDzuumeGmTV4m37jem/a8itjb8fAUuT25+JHZrUlZSdoPWSXOQ6q20s1bPUtKW6wUkVxhL63rw5eus5VjvU4enNt6j36vaP4BFPID7/b6LB25wSa42UsfrIzCMuGutvJlDPdPP2nlUPT+RxLVzCjrqbmlcSxqau2JJmTS0MxSXx3cpLgL1GgfM69X0tP69wXddMINkBnr27UvFySzTyY/bkPQ1YaDseH2s+T4jtwZcEtLLlPTtw8jtCVrUDd/T0s9soxZ3uNbab9T90t+Z95bzt5qfZ6HLyrR1XkB2vdNL6+N8y4aqSB/UKevx+ngfiMuOjveM6+z/j5h61p54lUkLS9Jio+yy2KSNq85HSz9bAwDrp5+jsX1BvTOANbXeG6w9hIPLqm9IQ2t8jqYJJRh7v/ea8aL1V4B/Y9JiKOX+YE80/axBFsD9gi2JQ1YSXfPl55eZB1+uC0t6wmUGYghpaQpiXNkasbURC5hfiGkZ3/lc8z/Yc7/02NuABRm803FurXd6STropDRzDKaPtDMAN74Fzur5wv0q8I24Tgt4GZcadboRpdrwACv9LKn23l+pD2s+0b7qtRKAYzchT4q6YKrK5zZXzYVfAa2FOKMLyOFmO6ut5YS4tOlb8TP4LteDL9en+bcmVf4p0TVcD7KW0536mpxyca1efETFMfMNWgXMDxA/blkSQfzE1oipy5WgKwGZx2l/Z1zSr0YDrwRnxe0CuKeZAd3xAmDwksrrXG/UGXvxUThXnbNbjtRnPWtApL/fqDP24jWIS6qBuyjKBe3hFy2bpGrdckTZF7v7gzjom+ABUqJPRdrXBKjnij2od/yCBul3qTlSDZhWPL9APwl1UhsTzvJTsqz7g6Mqu50j6eQIhAscizOmG7EoUOnmLOpouRuW5zwH8aPs1pqlpsutS4DgiimMI19D6GVLNGkpZ17PgXsro9AFluDla7zT8Ry8pYwDax5bD98CW0B+4IYGX2tuveEbuuVIAinYuQdiCG2sPqxyTVqc2s6apBZrPQkrKu3Wo4j7jYxVB/2NUtAReGWdcC9gSlaynGsbtCp3Q0ecsNamKOKCsmlrCgFTy6dkSd+alBWFpuekabq5QFB6AMdyDMxTxrOy+aYxLupyOZgfpU/3Pp5Jf3cQPz/j+XqZwfhyA6sK46kz2wW33obEz4X0MiBDd/opb66apvg49tLNFiSn2HlamkLb/65fH75R4EcfxOHCl95yRH+fnmvV+JVpYzldrTzigKucsbdW6+12jt77a43Xqvo+N1wDLpOs2aFclIW0119NLL8KOvOJOhTtj9ZLZ3vOuGZM3v/s88kjFf2R8rzoKBQk6D07jUubAsVIm0NInOJ8Vf1KDgbvIhb6wKS00+ZF6iLgneplZ/s4lx0uj/du6aH9lnMAszJtJ7IFV6k/b0e2JWn3M4Xv/U3lqWfAZ0jEHduTW55XQbNG2fRz2FIztW6+4vLGbQP6hgAusiAoAS26Y9nqQ3O5kc1alV/QYDlUSdpvQmN8xAFb4/GxNbAvLvT6/cEzBZhI13+ni5u/Zvt4tOTkoFa5z/emR5pZ/qDAN2nRtHQ5LrvDZ6lpPM2fMV3gVlLUzBkDxB0DtvulU7V+BwJsgQdwgTh0y08O3sexD16tvDYtnb1vuCbVPf0qloCmG64W9/ve32gn9WxBVqvP9KXBPuJ0tTauNPtOlXnucqub5XOIfCmDFlunHQAM5CEcqY/cPhSVB3HpOPg9wRZoJWXXhjOyUpum+kGYPhkrssb70Pz5zROUC8wnyBVQz8d7gJOCm7aRdkA/EtXLFLSelr5CSk3ff97S0wAWKWr6VYgFyFMcFn9DM7es/H2NQjlfPdCAO8XK67tAgZC1/qs5ZN+d9oRv+ZKGVvhqr2H2WrVNVwDcp11FQHr/xQjnnmvWxrYUnYt7/dDSy5Zz9YCrfZqwxs7UtcTa2gnAQJ90cMu4miuWYitdcOm2yHKmETB7zjazPqz1EZYM4Su/X9iBML/HNwdhro/vl8t52RKSwHxdWHK5wBzMVjl3xLN7hQmIS+pchDFuzvgGYwAzh0yhPJ2Xg/g79Hzl5x+x84fLLXMvr4/+fGLnGjgf740NXl4eTR979+jSslJufX1hdGzVzbubrkjquSgCxyycPZcrKet+EagHsPziBc8J116YMrczdXlhzdoRwED9QzVofXQzVsQ5e/GaCwZCX1NoORXLAUfa0/ro+nCt7v0vn5S12JQFhCBM3SaF8DMuoU1WrXqkj+ewpmCe75R+wFVyvzyellswBoDnyxRXgAxgAWVgCdOp7vFxiMN19noFd1teD31P6E8JuuU8C95Sv7brXS9GeA2RTVdS6vn+iwier/VPU9T5NrtfqYy7X88NR8bO1LXE+toZwEDc3WZdsAVQ6xakmnGdDwyWI4VSJ8WsJQnMEsRFsA/3cvVJWUB6t+7cCU8uljvjZ6HME3WqWrqZanGfL+lHA3GJLmnoefkzaftYKy6QkoBcHPJ0/LR46MnjyyHmYJ29DuFBKXyzEAduef3zOmvtV9401Qpe3seW8P1A0tYlhj8IpAm+rW5WKqu9ZmTAHB7HevKV5FSzk7eA3Nv99t9BfQAAA/HNVFp9r7VeDdB8DOqSYY/d6oJrfkMe8CPlVhmfm/D9waIqIEyhJz3xSr7Xl67nLl2tJ+qGl9CdA1sDMa2j5XPX+wDs9FMHcomjcLz3fYn/kdD2UpkG3HnZHIylbOmUdfDSNlJZBJBlrB7w5aAtZXzu9N7i+2tcA75RQHvQXsMBS3WmntD3uc/ZW48sRaG6xu1LqwG45Pz34HvjQzRmkoCsva5S7nxNYXa3s9WXJisFrbVtSk/PU9EfXZX11OfLAsIUVo+pLL/IgZbRW084ICXxNPJrEQV8pg2VdGvPdHwVyuQ0c/mpQVmCJW2T3QlN597L+fL58vdFur2oCr6PycfAZpVb/dWoxf2mxvRg5t37m5V1exOXVJ8d/wkHeBJWmUh0mIgLzt4XLNVzqGrn3hwbb0nS4KjVc3kvW4rV+uyyNhzcGQ3Mv9xeeCHS1wjSZzfP14TLxivL5T5iNGD5qeZlWwrA5Xqvn4KWXTF3vI/Us+R+az9QSLDldZKj9eo5SEt9DXil2Ihj1m4RagG0eF7jfAH5/xp3tLxOA6JX3uqAvTpT0c1Xtelnq30Emj1dbd3cN7KoGRBLbbMA7+GCI0CW5mc8GYv+jjzgWpusPEVTzzXzMlUD4Y9VMNIvY5hLbxMVv+1I6oumkx9lc9BT2AIPENPUNG2j7oImoAX01DNNaVNpa9j0tSzLLuL53P350H28L8uYyG1JWjrZSlfHHO1URp9epfWlQbsLfIs4ZCUwr/3PUtSRS87bVObWI9oxjdHOM+Ot4X7bnItLtmEY3gP4RQDvbvH/cBzHv1I3nEeCCCR7bMbSYqKbsbT46IcF1gRKs55Q9uSlrSWJY9c54a1EwSjVSTug+ZryI56v986dLR2v1E9lSxgD9DuO531I68Elns49+vqpuIP2Us9SjARRXp8Br1QmxdQ42tJOuhWpvDZaxu8dDsN3/iavB1lpHOuakIFzrTMGUL/5qufu5ky77eELxGjxAcAPjuP4O8MwfA7AvxyG4Z+N4/iv64e1QJXdkEXrsrckeRWupIcAACAASURBVLdBSS5YKiu/COP50FI4lDJep9XzGE+9YO1qXwg/PC2F6eNcUoGh3Jd+K9Iz65eOxEHL5yeBVkpFA8t0dImXVMay0tNa2pkeR5ywBl6vXAOj52Y9YLaknAGIz3qmryPsfKdG27pWrbxnX1KbhUZS2cP98jbhiSTqs+rTn3v5HsdxBPA7t9PP3f5lvm9QUYVbvOsILlgrM8bS3K7WRAK1VBdV2s0G+lPbrQ9hnkbO7HSegzS621leHy7HEnAlR0shO738+Zpv6aPEln4eMfM3jLv5SLpZKreAS48lR8whyvvQ3C1t02t3dNRFR/qq3u08TaAPaHtCu9VNR2G+aCQd87LsbUPerUfRnddZ99sP5iECDsNwAfDLAH4/gJ8ex/GXhJgvAfjSdPa7G6eV3ZCluWBLWRfszY+XB74lSQKyBV2tjdV3VJH14Wi7e5vY7mhAhoYnKY08De8TXdp9XcRTvVrZS5I0dy39TOOjqWgvBS251lLnrc9q5bRP6bVl4au/dw585Qm0OdC1QOuN36Pcr7wp8nCM3s61VX3nE7rqjeP4DOALwzB8C4B/MgzD94zj+Gss5ssAvgwAw/CdQYfc4oIjWuOWJK1MO1a6AmTwZjZnWX8LvdaLa8A8S6sfb01Ycri0TnoOtPbFC8DDTdNyPwW9TGvz1DM/ntrO09Clfe716+lnei65Y2tTVmZt2AJvGac25RyN81LdobQzsHS/HjB7QdbqKxNv1UvlprRvPco+9zlTLzlobfNVi/vt/2EgRb9xHH97GIavAPgigF9zwoPSYNVrLTjS1lsbltaWI7chJR5PGXXBnkPmMVQ9U9Bm+pnHrAdhDlO+UWoZv9zZDDwgyNsu4Sk/8YrG8tQ0rQe0tPL8liTedgliOw0tvU9emQVcWh9xwp7jpeVaypjGe7Gaa86knelrCK/5Aj58qXq6WatdTaxWl4lfNNKOrbKI1nkwxpbwBWK7oL8VwKc3+H4TgB8C8Nf7TiMD4Uy9FlN7i1HEBSPWNwevBlbNAWfdb7ZNr7+3FSGsbZySVGDmgWoeL9/n68XyMXk9hfgybr7ZyoI2HysqHiut/fI4C7rlpxSTAS/tpzU1TfuNwFeEdit8H29w3OnWuOMMnDPzlMpNZdxv5rnPljwHm3W/0TH6KOKAvw3A372tA38E4GfHcfw5u8mIdTZKZeJ6uWCpjeeCAw/miHAdiXIphiviWmms1Vc0Zjbu9uloKX38qFveXiTDUd6kRZ2rVs4d7fTSlulk7Zakx7G/A1r6MLLnTujyM7P2a7nhls1Wkbj0hivAhm8UoHDiLXltWhywNUdzQt6xVSYpcqtSD0fcy1V/im5PwhrH8VcBfG/9RIC2rxLMuOAaJ50FuueCpXLnwRyWC651wBaErblk+srM6a4+EKbpWanO2hEtuWHL4Ur98jno9/7On4ZF+5peon7vb3YHdGQjm5eK9txwJg2dcbzl2Esj0zY1KWctjrpeAO23Gnl1cGJq+rL6tcaT6qS+TK3lfrO7kaMP3uB1rannOnivuQOKKAriKAxbxB1urbzd08FpWM08rkf7puWaeqw1WyCegXwJ4QvZIR35PmFrVzJ3mJG2NN2rSQJgRNTBAnO4bi1p/pkUdCQVrUG55fakFvjy12+tAQMB+D7euHbQZWFq1UXirP4jZaasAVrV2kcNINeFL7AZgIsi0JPo4d1r2/uWJO9cmxuNByl3bkmiU+7hgjXXmlnnTbnaGslO+PnpisuVDVq1MUt2s1pKWnLUtIw7ZKmOlvMUtJRS5qnnZUz/HdCP1+ann/lx7U7oKHhpnbUuXPqKxHpp7K7O1yrX6no4ZknR9lIdL7POF6rZzZy5yEjtswCMut/oXOq1MYCBPmlpIO+Wa29JstaSI+lvY57l7+DKjr3mViwCdVJMDWgt0If6q/0CB3nTU604fCM7nS+sbtlGT0Fr9XTDlQ3i3A5oqY1WbgGX1tekoKX66EatKFA90NI58M1WANoesiGVa7ERsHrtI9Dn9VzeBwUI9QtpX7oQvfWIn0vtI4puvorIu4D1WS/eAcBF2XXZ2rVgzaFqpNNirPpGFywNVSTBzHO4NVCs6dNrF5o3gfDTBTAe1oHLfK2VywKz5Hx5meWcaT3wADEv15xtqZtehv8NSHyzFU+bzz8w5Fwwj1+maXXg0vio2y0/LcdLyzXw0njrfuHIzmltpzOA7eBbC2ar3msX6YuXSceiJChJIJQ6yjya0oOrNVFel22vtanTjgAG2iEsxfScQ7Y9hzRYefAZ0dbasBZD5aWupTqrXaTO/c9p6QFhQHbDz08XXK7P01qxwJvM/b3ypqh5e83dPursbz/S0tC0veZ8I+nn6C5oLZb2SV+zdBzZCa253fIzsi7MYb3G/cKRnc4A7O/zbQEwnD68OqtPXscVjffOZ8q6XwumkXoqz2Fb5bXq29/OAAbaAaj1RckVvSWJAzR6bo0BoVyQBGJgCTdrrZgPG4G0VZeBbcapS3oabuPm1oW13cuaG+bpZl5mwVZPJVs7oO1nQWvumcY9XvJxdkHTWG1HM4+xUs28fc2GLKnMcr2AkXIGlvDNwFaKscq3aiPNE6Scn5v/f72dz09KPVcEalHwWRPmdVn32//hHwcAMJDbUdzqgrNrwZH+O7lgDZyWA44CzoJhdi131Q1a83Xhz54u6jOkn3HB5RL7Hl/gAWvaXrutiMdrII6knzUY1977K93OlJXkirPAleIst8v7yIKXxoYcbsD1AkbKeRpgGwDXlEtjRsqtOG0MVV5gb/fLY/ixNnZE28IXOAyALWUBm2lr1XOAZndES+XGeFHQWulqqR+rnP69WWvQvEyT5c61DxdiHElJ3+A7uzXppsv1edq9GmQQv9VIcpQvQdY6eKaP+bm8GctKP9NjbX23/PTccuae4RBoHfje5y8536IM3CKAjvTJpf2as9CV5uBpE/cbGawH/CLud83xZR0IwJlUtOWCPQhqMQFIitLIdjXqnQ1ZgA+uzKYtrdxbK/ZA3LL2677N/uassi4MwIUwT0lbtxXxcynFTI8jO6AtVzxNX38EJU9Dl7GptI1jUqxVl30QB20jrf9G1oY1YLfeLxy5xQgQ1nunQWJuNFJW266mHEI5L9NiurlfyaVq7tbbCa2Nx+N5fS/3ux58gdUAzBfmo8O0pKK1Og/OERfMXS8/p7HaLuikC9ZAXMpqXWYUzlq5NHabGXto8dYMwNPngOv0aTtyq5K0mar2e32XdfpaL63XYWunnqUPA9bmq5pUtJd65n1Jx5H7gbX6iFO2XHJkN3Q65QwgtNmqpizaJts/L+dlMOIioJX6uSvy1CurjMu6gNT2afXP2/eAL++j06Mo+0gig6baTVkWSKPjtLpgrU8I9cHbkjQQZ1yq166U9wJpdy3vF565X17GNmhZ0twtreP3/vKvGSztcs537nYz9/5yONdIArYG3uwuaKk+sjacSU+7O6QV11vKXNfLzzV4ZWOkNh4Ys2W1oDWhy6V1Wo6P6n57O9q2i+bGKegoiKNOdU0XzKHqnUtjc5JaaWknLAtiD7AtKWXPBdfA3P3Mo6ekyy7p65W5WrZBiwJ2mYJegpg7U+uJV/OyvPNdK/1MX3uk3EtB0+MIdLW4KHijbWZlbK03dIvR1HE7TNdoky2Dc+6B2QQxzW5q7vdToUwaiJ5HnW7PjVVWfatzj2mnNWD3alvZLttvdke0NR4HcsPDOShcLfBaQG0BcwSmuzjmZUpa2iXN14atW4v4ubfeCzxATEGrueIW50sByDeQteyEju6A5scRJxyFbqnznHIqBa3cXlTO3VuMAPs8C9jW9tl++Nje3LS5qpJSzxrFeZnmdiPxUr9rul8vrt+Fb8dNWB4soyDccy04Mget38CHBc53YMl7GPUQYnq1KXFeHy0fGCRdMV04S4Pr83RhNXS5zGH2GlS7E1p6HzjAo9ClxxFXHL1v2HtKltSf5nqBTvCFE8PrvfZef1Ybb1yrjRUvHS8kBda6X97GG28r99u7ja6dr0oBCKXbZPusXQu2IN7JBbc64BDMjPNIm2w9lQR371d3j7lB+OkKXJ/UDVr8CVryE6/8FLO31jtvk0k9Lx0vT0OXvou0VHRGUlst/Uzj+U/aLrM+HAFz6OlZ0bVeAKldzjxGapOJt2Ij8ZHxW88laN+lbbyiHfF6CdBSvdSHpb3cb1/4ArsDGMhDLhJH+1xrLVirl+YkxdDNWQ6EyzHYuZdmrklLZ6G8Syr6JuHpWTQlzZ+gRTdpWWlpafOWt9ZLU9DTjJagBqKpZz0NXebH5W02q3kcJT/nQKXts9DlMdEU9L1cAO90Hry9iB73gGambY/z2v6ltmDHqqQGkQdiSPHauVemzSdTp9VvC1/gEAAG8hC24mvG8mCpydtsxfuW5lCOA+vB/DxSF63fG7rWZx8rBoDkhvna8NPT5b5JS9stPcFSd77AA8R8rXeqk9eSCxwzm64oiKXzUlaUSbFL7rdHGjqzKSu7Nqztbgagb7ICYN5eRI9bz6N1Pcesga1XJ6rmoRsSoCPy4rV6a804ctvR9vAFDgNgoB3Cazycg/flpaattWKpLLHhS4KtBVpvTTUD2T1dbpH3mYu6YZaWppuyliC+3NeIrXt/+a1IUopZcrK5+3/lb0EqcyptilpS0Fr7ml3QNDYKXamfjOMFUJduts55XS1kawGcmWstbKU4XjfTSAI+VY4BGYCe243edhRNeUs6LnyB1QA8ou5+3t4QrhmH1lsAjZ5DKaPxQDoVXZQBreV0Jci2tG2V9FaF6sv7d0tLK0/RoiC+i60R81TzNJT9kI3lGnLdLujH+TwFXZN+LvPWFH0Qx9obsqS6MHgBhNLN9HgtyNLz3v31nBePW4jCF8axtfEKrM66UHi3HW1za1B7X5/iIA/ioG9Yza09rZIgyOU50podzlJ7aROWRJHgrUne2nAUlkdzuhFZcL6fs1uWni747PqMj67PoiO+0HICYmtjlnY7EhD5zl87FV3GK/H0nJZN5bn/L5EUNAd2BLilnfYISw5w0yWzW4qAAHgBqPf1Qjleo27N/loBK7U3xSHLj3kZj9cGiThoKZ7Xr5V6zlwIs2n2hzZMQZdJtjjUVhfce0MW75fXW4DWdkg7vxINPhqgM23QWJc53qIOwH19+PoEPF0Wu6Uv16f7Bp6F2jK81eI7teVNYcuvVYz1Lb8oDbi8TWbtl8fwPsUU9PMcuuYGKwBp10uPt4QvyDmSdV4chHOpjQZoUZKDkxp6rpWeZ6Cm9X00tc1xhzXgXmniqLL9RF1wdD2YHnup6OCtSfRYgmMUWN0g55RHFP3QEK2blRM3rKwP0x3TjzLdEUduOdJuV8o8epKuDxdZu6Gj0lyzBlt6Hln7pXGhFLSSap7KjA1WAEKbrLTjaF20Ta/jNdpIr8UUhawEXJ561j4NcGXWj7V6Xq6NYc2lxf32+XCwA4CBnBuW2u7hgrMbsHgfHMJam3LspKKLNDi2ulXpXFJLTM1nrDR86TlZH3664rPrE8qDPPitSyU1Te8jfsZ8w1YNjIHYNx9ZaWfJxVpu2Nqs5aWjM6loDlVaJ6agBbcLYJFqBpSdzdOgj5+9odkC0bXm1LO9Ku9xk9p/Zikmsys6Aj4+F6nty4AvsBuAizw37MHOitVAm52X1Ta7Icvb1AWEUtEahKS/nYhbjawDZ91tr/Xk6K9Oi5PeK2XHdAGx9UUP958VMJ5GtHdBT+XyYye1NWBe5ymShs5uzErdnuS43VI3SzMDNnjp8dYglI6z7daYM5xyUdHHTVobryL/8TVA8z69teHoeBF5/fRPie8MYKAvhC3RfiIuuCbtrI3HZfXD6wLfG8yPNffbmkqOgLqHIjDNltPXfy8TQEx2TfP0NHfFGRgDgJeKfsSsm34u6vFYShpjpqizbhfAYnPV1Hkb6LY6XrO/KGSfhBjebibtliNAhqLnbrMbs7jWSD3XOt911qMPAGDAT0lHIczjrPreG7L4OZ8Dr7dS0YH14DIFIJdetlLWGRhrMWuCmcqDrgRfrX0BMds1TdPTLTAGHs7XSkVP01o65RIL5NPPRdk0dOZ+YBo/izWgO50/1nYBY313GqAP7LT6KOx6ATjTV+8PAqKsW44sl6rFRODrud+IrLbRC9E+8AUOA+CItCvumrJSxlKcB2WpDy2G1hkQpmH02IMwnPJMf/wYRn1tX9GYaKzannzRw03Sc6bN3dP3oGWR9h3F3q5m67uNpYd0SPVW3/P4XPqZtuHQpcdSmhkION7yMwuZVmAdDb5IHEfrxWBr3TcCox4xNbcdtY7Zq01cBwNwNq0steGgbnXBtanoyGuxwMzrEl/a0Ao6SWu7XO0DQDYmGkvfIy01zXdOs/Q0ANcV32Pw+H5iyR1P5ctd0eW8tKPi34YUTUtHdkDzc9URPy9BS+Ebgi4A9T5e/rMVvF59z357jkXLWuYlStp0JTXwUs+8TaRPCa41kF3D/a4LX2A1AJdfaE33FriifXoQjrTL7oqO7oLm55bzDa4594DwHmnkHpLeNg2+FnR5f/efenq6HGspagAmkIH5VyV6j6GcYrZ7FOUUw9yw4XCBJXBpvQndafDHzyyErbq9YNur715zEmVtuuqZeqaqTT1HwWx9ePDiMvOxxj/Ek7DoC8wMlYVwdN1Wqss4Va/fllR06RtOG2M9uBbCMOoz4Jb66aUMaKNteJwWcz9n9xMDM2cs7aLm7ngqo/UylIH45qvej6IE5s6WzpMfP83KFZcLxKDr/TwiBLcYu0cbUZFNVxJINbhGnK4FZG0cq1zrM1PfGl9/sdswBZ2FcQ8IW/UahLXybCo6CuHSD4SxJFA7m7Jq3G0vYB7BOXsg5mUWfDUQ33dPA1qausCHu2MAC4cM8JQ2cbxXefNVgfRUl/tvzOF6L3/SoQvIsOVxC5cLtEGX/2yB35ptesxvrTmLkjZdFWnwze4gjkBcirf6tNpk2nlzbukjrg0BTCVdESW1pqN5jNVf7a5oz9VK7SywW+DG7bgRwmB1Wtke6WkJgBYUvTKrP+tPyAP07P0Q1owBE8gAFlAGoIK5xBdxONZK6odvLtNgCwSBC7RBl/+kf38RYFl1RwRwz7mJ4vD9lB3zOqvcO5dEJ+fBnddJbaU6LUaL8+YQaZ/XTgAG5lcwS5mNWZFUdDSdLPXrtbXgKb0GXh9JWQPNEO4N2T2cbxS6NXXRn2IfujsGCLAUKANzME/nczhTLb7RyZC1a5uO9yhTYAvIwAWwcLn02PtZjlug3Bt82ba1Y6/RhygLvhDq6M8a+GoxPE4aX+srqmPDF9gVwEURCHoAi8RGFIGzlYqW5EE5CmGpz0YI87je8TwGLFZSD/eerVtV869HlCTd5kQlPZnrUfd4hrXdh/1/THLCn/EyD7qADtXIT15WA+a1QFwbL/3s0adXJkqCL5W3PkvPI/+Jov/Rog5Xa9N7Pmu1X+oAAAbWhzCPsUBbm4rmZZE1Yw/CtB2UNgkIF0UBuLcifxZSvPVr0WKi5VY/5oeS4VEGgO6qpilrYO6SiygMP7ouH5OZ1QKuRQvosjdSgm3kuAXMLbDbA7JrzClTJ0qDr3cOVs615q7nyPiaMv17setdEA8CYCB/td1qbA/wEnA1wFr9av0Ayyt8JYRr09KtjrIV6hGgWu2yv15eHoEwhLG0+vuxAGVgCeYiDmhJFM5RMEvu+In9PWmA1eqyZT1+rgnlzBj8dW45Z1G18O2ZeqbK7nq22vN2WlvvA4Sm9eALHArAwBI4XK0umPYd3RWt9VkLYcndSm2AJYgt91wB4aItYdqiDEgj0I7+tMaLAJfXQ4gBPR/m5/cx2d/kVbjX0Ek1TzHOo02tMg3E3nELfMvxWnBeC5iZNj3mIGor+FJFgRyBrwfXlwtf4HAALrKufD3Xg6MQ1sqj68HR1LM0P5BYfkzPAxAuigBXBIMSG20T6cP61UdiNRB7cK2BslXG6yEca/Pl9WBl91jj8aRRRaDLyyJQ7lHWAjt+3guqR52bqC3hmwXy23a+RSsBeETbZihgPQjzGGueGQhL9RYhMlC2nDcQhjBtUuSlmy3HHD3vLQ2e2nmmLyumBcLSMSC/dwiUQaj3FPnw48VrIPaOI6C16tb4uRY0a+dRO4aoveEb3axl1dfWeVoTvqXvQzwJi77QFhhrfffuE8hdvSNtJaAWZZ2xVp5IR/NpanCxfgI6SDQYe+UZV17zoeGtK/J+7AVgqcyLiZavDegeP1vbLtQbvpa8NV6pTOs3k3qOjCm9SWvAN7uh7KENU9A1MOZXe6nPyM7krAum9WumoqV5axCOwjoBYUtZkHrte4i/bS2flaz++U8rxioD5u+D9wFGOqdlEOqk+qi0dhnwWnUWaKT6HkBqAS0/P+JP93e9Bny9tpmymtQzV+QPvuY/RbZNPXiLNgQwFd9k5Mm60q4FYa2tB2Gp3irLbNxaAcJbu8ke/Wkgbv0pjZEBrgZYLwVtnYOVS3VSjBcbibNAy89rj1ugq9Vp9UcBbev4qlrhK6kVvlRR+EbrMjFSXKRNtp+cdgJwUSaNbEE4Gp+BcLTO+wAQhTCEsloIU10RhjDXlu52b2Vg7JV5xwic0zKrnM8zKivWg3EvEGcgbNUd9edafYrq5Xwjbam8siyssheWLeHbD7xFOwMYyLthrY/opqxMX1kIe/VeGZ+3BWFLUlvA3SHdax33CJLm5bnfTB0vQ+IYgXNaZpX3kNZfbxDX1O8BuSOANgxeYHv4Zsv4sRbDX6wH1ygQW+HbH7xFBwBwUcQNW0CNumnJBcPo1xpTqrMAzsta0tFWe+28IiVdoy1BbEEz29aK0eJrjhE4p2W8nNdl5f1ueoGXnnvwlWJbAFfTZosxatuI8sArldWs+VIdGb7SG3Zc+AKHAjCwDoQ9SEb6kaBptbFAqs1JiuN9WzFcnSAslUV+orJNVi19Zsf34o6UAbBkzVGrq4Fu5LgVwFbdlnBeM1aU91xnWqbBV1ItkLWymk1XNdDL/sfbH77A4QAMtEM4Gr/Vpiyvned6IZTxdDR/OAeU2ASEqVrgtpesPxEN2FKdV8br4cRo8+NtaRkv53WtikJXKmsBcY+ynnBdu002VlU05SyVZVxyaxmE+h51rannY8AXOCSAgTYIa2vKEWhbEI7WWSlirV0NmDmEodTRc6oynw63Klmw7eFyLVkwreknU1ZzDOOcllnlvC4r7/eQBS8/73EcgVZt3VZtatubOjJ8uTSIZVLPkT4zkD4OfIHDAhjQQUqVvdpaEI3EtEIYqAOuVMbB6u2Q5mOXGMcN06GjsK3VVpCW6rQyqW1v8Eagu6YDtvrzwMvLIsf0/Egg7hXbUqfKSjlrMKZla7lcaywrhpd77aR6KUaL02KjbdfRgQFc5LlhDcJauwiErb4s6HsQ9vrskaKuTUkDVWlpSUdJP1twjcJYq+8JXi8VTculuhpZv59aGG8J32hZb1ivVaeKPtIwAloJklvCl+ulwHdb8Ba9AAAD/SHsxWWuxNG1Yq9tLYSlMi8lLSnhhnkzCjWpziuTprIlwCWA1sDWAu2W67+175/UJlK2Noil2FoQb1UXjTfFXS9Ql3KOxktlEYBKsLdiuDz4SsrA19M+8AVeDICBvhCWYj0IW/UchhDqJChKbS248vZemZWu1sAcWBumL5OrJ4i98VtWH3hZpj5yHKlDoIyWS3WSou+rFafV9YavdlxTvzWcW+JN1bpeWpaNj5ZZ5ZEY/gas7Xy1+Ei79fWCAAzEHW1Ea0I4O34Uwt78rdckjWeNFXDDNJz+RGMZOtVlyjL1XjtrzEz9FspCOANgft7juEfZ3vGmesIXQp1UpsFSUgt899De/8FsvTAAAzboJABZbbR4KybjhL314AiEgdjDOqS4UmatGVt9BNaGyzSoekExoujnEK+t10/kWKpD4hxOOa2jiv4vjryvWkwrjI8I4payaDw/VmWBN1PW2+W2OF8uXue1e53rvlQvEMBAPYRhtLP69iCste8BYVpuwZLHAXPQRtPP/I+yAcRHkgfbSGwNhKPnRdF1YD7fFmWumZGynmB+SVC2jk1F1nq9shp4rg3f2jpen43TYiPtttULBTBQB2EtFvABa0E4WtcTwrysvAbPNfM2YPGADudEWro04cNstUa8hVohDCz/TL1yKPVZee+tVp+BKz9vATE9XqO+F2xTf7MWeGl5i+v1+mmJjaadLfhKWhO+xwBv0QsGMLBuOvroEAbstWLLNXspa62sjAk0g7hGNf14n8U0iEaOM3XlHE6MFSvV95TVp1TXC8Za3JaA7tmnq5Z0My3fwuGuDd8InF8nfIEXD2Dg9UEYiIGVlmug9ByytzYstSljBtPS9GVRvQSHS9ULwlpMkeV81/jf6v0OIuCVynrA+EgAjsS6ksAL1K3RrpFyjpb3WPON1Gv9vw74Aq8CwEBfCNfGaeNlIayNnXHImT4iQLcceALEdAjujjnApHpelqnXFP0gYPWZGc+Ksdqu+YElcw21yqOQra1bA7w9+gop6nppuQfADDi3gK8FuV7wfV16JQAG+kFYiuVxnlPWxuN10XQ0ELt/mJZn3LTloKkst13hiHvBsUXWryrahn+AgFGvxVjltE6rzyryvvaCr3ceOabnkfKeMLbGdhVd59XKs2nfKNxryqPwXcv5arFWvNVmf70iAHvaE8LROg22NE4CpARnXu5BNvKwEGvsUt4I4qOmprOgtiDt9WmBmNZTeevcGWXAK5XXwteqW/O45sOAKS/VXFPe28me8D2CXhmAvXTx3hAGfJdL++FX4ozr1cqjIK5p1wjiiNYGNHezLWu9Vt+0DEpbD8RSbIusPiKuVyp7aTCmx9VulzdeA3xrut7oHDJ1kXopxoq14q02x9ErAzCwhBnXnhDm9VZdy/ovB2TNJq8sKxPMhwAAEv9JREFUcDuAmE/Rc8V7p6g9CGddb8Tx9v4f671/GTfslb1EGIe0N3jXKs/EZeEr6W3BF3iVAC468ppwDwgDNgRpm0i55Ya9GKmci77Gzq64VRH32rNvC8Jw5tIK4+j7+tLdMD2PlDdBl3dwBPD2nIcV540r1UdjpDgv3mpzPL1iAAMvC8LAHLDaurAWF3XJUl+9Qez1W+mKqfZaK+7lejPO2poLl7TZK6st4OvF9ARwpo0rze0C6wAvCsIad7sHfM+0M9UrB3BPrQ1hXh/98JCFcE0by5UDtsu22iRBTLvlrKfDPbFy6zhTV3OeKYvUSbFUXru1HLBU/lIgHFY21WzVZV1vdMzWvrgyfwg1aWdNrx++wJsAcC8XrMVvBeEawGXcs9dGcsNenFQu1TU44r2ccKt6OOAS21s94CuV9TzfxO0CMejW1q2VPu4xn0wfkXopJhOnxXptjq03AGDg5UMY0B2oFpcFdG2bmrVgK21d1JCeLl1tAWb+q5T+PLQ/MQ/CMOrX0lrwlcq2BnBKreC1ANUK3tpxrf5O+O6hNwJgYAkOKgvCUptaCNO+WiDd6mx5/zWuNQri2v7pBXBFGK8B6l4QLvVwYnrIew+yYD4KgFOqWd/N1K0J3pa66Lxq6qWYTJwW67V5GXpDAPZkXQklN1wDYR5jgUqrp/3XuuFSFwWlVRcBsRSbqatcK34p8iAcjWkZvzamFr5SWU8ghxXdzdxS1wPeawC7Fa418NV+UW8PvsCbBHBNOlpr1wPCUkx0XZifR93wGnXczUbcc7auwhXTaZZujgjnPSAcfR/WcL9SWet5Smu73V79tIx/wvfoeoMABuohHI3fAsLA3HVGgZ2po2N4dTD6bYF9GYfXVbhiaQgNzl69FcPLpLY8XmqTiemhPdLQUtlq4LXcLrAOeNcaZ6v51dRLMZreLnyBNwtgoA7CWps9IMzra4FpOU7e1qrz+q1xvbzeSk8DTSnqvZzx3unn0n9LXO/0dFenC2yTZm7pi9e3OFur7Vau9lzzjeoNAxjYB8JAPWQj9dk1Zq2vCIhLfcQRg8RE+uL1EVBXwphOWdPeqes1IXw0AFt9hhVNMXv1e7rJI82zpo9MX1as1+bl6o0DGKiHMIR2EQhLcRHI0vE8sPYCbbbeAyRvX+O+o2M1wBjwAbA3kHsp8xqyKWitvLvDLap1utl6z+FFHGCt462p791fNEaKy8Z6bV62TgADqF8TjsA1GueBT+qntxves74FtivDGGiDxJFhfQQAV8tb0wX6gm7ttdO9wVszZrQfK9aKt9q8fJ0AvusIEJbiIm4ZyLlhK36t+hLjwbMVthvAGJCvFzWgPQKcW1PQVl3315bZSKVNwIIyr+8Bqd7p3y3mXBujxWmxVrzV5nXoBPBMR4YwoENP6kdyw7TNViBu6SOaoub1Jcaqp/0UNQAZQvdliB4QWhPUa7vgJnHgSgOtsX65hXtco4+11nJP+K6hE8AL1UI4Gt8Ca66sW65p0+rAtT5oTKQPKZ1utYnOi8Z0csdUGpSj9VbcmoqMt+qcjgremj4jMT366AXWSMwJ3x46ASyqBsJamzWdcCTGg1mkjec8M320jJMZt8RYrliKKXEcAJ2ADGE4PvTeKemiTecRAS6wHiB7p4l79pMFb6RNbUwmTou14q02r08ngFXtBWHAd7Geo4zE9HCqkZge0LTmL7Up7WpioMStCGSqVujxP7OjwHyhWuBG49aAbm3MmmNtvZZ7wrenwgAehuEC4KsA/vs4jj+63pSOJA/CgAxWCO0i8KKxEcBK/WVjom3o+JkYGtcjRgJmDeS9vr15bgTkrA4JXAm2QPwCL8XWAmNvt9grZg9He8K3tzIO+CcAfA3AN680l4Mqshabadd7XbinG6ZlvUAsxbUC04rJuGmpbxobSVeXWA0yBwHzZsrAFshdqPeGrhS3pmvX+tra9faIteKtNq9bIQAPw/B5AD8C4K8B+POrzuiQqnHCVru1U9JSXBSWLY6YxkUcpxSXjSlxEixr2vWIpW00IAEvE87W6wHqLrKtF/0IpKS2PUHZu79e4F0jLhurxUfavW59FIz7KQA/CeAzLWAYhi8Nw/DVYRi+Cvy/LpM7lrb4I4mO8akQu7Vj2CLuSYhreU2R/mtjtXlp7YAJZvTfERWZo/c6vfcnEq/9LdT+nqOAjv4/q53va4GvpRO+mlwHPAzDjwL4+jiOvzwMwx/V4sZx/DKAL09tfu9RryaN8pyw5oIhtLPWkGtja1PCUlxtWprG0disI65NYXtxkbSy1F6LteK1+Wnta/7bZFx0y3/LyEW3xgH1cMV7fGDcInXe2rb3nLXYmnirzdtRJAX9AwB+bBiGHwbwHsA3D8PwM+M4/vi6UzuqaiBstYumozOxWhywBFukv9qUM43tuU7cGkdjvfZWH6WNlYK2LjIROEt9UrV+1s26mchFc+00tFa+lyvcK4W+RpwW2zPeavO2NIxj/D/wzQH/RW8X9OSAv9Q4taPL25ilgVhrl4lvjW2J69G+95zWeD+scu9za+3fRk1fPZW5KG7tiLdynNF5rbVZac8PAj1ivTZeu9eiL2Mcf8tNT533AVdLSy0XWSnpqLvV4lvT19m0NI9tbR+NbXG8tX3S2Izj1cblffI5SPL62kIZZ+zNseZi3MOFvSRwrbEuu0fK2WrjtXt7SgF4HMevAPjKKjN5laqBMIQ21jpy79jMHKLrvzSWxnuxrevKPWL5PLQ6Kw0ttePKAHoPtaaevT7WTHseOXarlHa235p4q43X7m3qdMDNstaEa9tZa8nR2B7OOdpvBvqZufX64JHptyji5Hmd1yfv1+pH629NZS+QrWnoninqI8C01fFm+zjh+9J1AriLPJgCOSdc2rVAxovPuGHet9ZvqyOm8a3uOTIPC57ZjVjS+JF+ubTfyx7qmYbufeHeGtC94o8+v5p4q43X7m3rBHA3eU7YcoNQ2u61jizNJ+Mkax3mWqlsq2/apgbIUjve1roAeY45I+l33zOdHb2QrrEmbLVby0326rtnP3uA2mrT0u7UCeCuqoWw1bYHKHvHWy5NA08G6Dy+FsY0PgpXC6yek41unorCWWujqRa2tRfJtdeFezqxPSDda1wtfgsHWwter+0p4ATwCrIcLVAHYatdjRuW5pcBsQYsq/8ecI3Ms1f/VNH0s7cRSxrb6i/aZi31XhN+ic64Jr6nw9zrtVltvHZe21NFJ4BXUw1Ma9tloWqN0xvcUputPgRE21hOl7fz2kbaS/1Y/UUl/T57Xwh7rgtvmfLcYh30iODt3cZr57U9RXUCeFW1QBhK2z3T2L3b1K4V8zatMLbG8ebnjZnpR1Lmv2jrhW/NFPYe7niLVOzeHxR6t2lp57U9xXUCeHXVQthq23Nnde9xeoLY6q/nB4FoO6+t1l7qR+vL6vsIylxgX0pKurbNVs7yKJumTvj21gngTbQGhK22NQ66JwStNla77KYtq02NK+btvLaR9lo/Ul+StrwPGKi7iEY/JLQA12u/VQp2S7Ad4fVG2kban5J0AngzeSAF+q4LW+1aXGItvKXxrHbZ9DRtw9ttnW723geujGveUz3XfyP9Hckhb+0oj+J6vbaR9qc0nQDeVBZIgbbNWVDabrWeTMeyxpPaRjZsSXU9XG4Ph1u7Bmz16Y3RU61p7ugFODLOWinQLd3uGnM5Wjuv7amITgBvrlYIw2i/tRsuWmO9NeOKo31KbaOvQ2vP+9D6kfqy+vTG2FrZi+1LTUuv1WdL2z0AesJ3C50A3kUtEPbar7kJaY8xpba9gJpNN/P2Uh9SP1pfWp+a1loP3mr3dI+0tNfPGtBt6bel7R5jem0j7U9FdQJ4N60JYa997zXlaFso7VvWwFv71dpG2tM+rH5oX1TZ/35HufD1XguO9rmWYzuii2zJeOzV9lRWJ4B31ZEhDKVtK7TWcMTRfmv7bkkxZ6HsjbGFWi7CWwE30ofX/qiblo7qfCN9nMroBPDuikAU2N6VemNvAeKasTMwXXvNV+pP69Mb42h6iWvCrf3vCV6v/dpjR/o4ldUJ4EPIgyiwHkij7fccW2sfdcVavQfjSB+8H68/3ifX1vf9Wqq94G59u1Kknz3h9dLbR/o4VaMTwIdRK4Qjfay1uau0RaC91sfaIG+dH+2jKJNejv5Xi1zoekC65wU169aPAN3IGC+9/RZzONWiE8CH0tEhHG2Phj56gVjro8bRrgFkr29Le14Ua1LjW64LR2L2BudrmcOpVp0APpy2gjCMPnruDl4LpBnH6rliKyY6Fu/P61Pqm2uv/55bbMLKjtXqdiNjHQF6R5nHCd8tdAL4xaoVwhG1uuGj9BFx5ZGYMhac8XifkX6tsSS1/tftvclrDfBG+3wpwHop8D21lU4AH1JReK7thHv2AaOfHm62xzxojBcXdcVSv5H+Pe15Ea1xR703Zr0maPaay1av51QvnQA+rLaCcLQPOP1EnWxrP703U/WGsTWu1X9knK3VeiFe44Edrw2aL20up3rqBPCh1SONHO2nVyoYG/UThXmPfjJxdNyizH8z7wLYE9C9L7Zr7IbOxL1G2B2tn1M9dQL48OoJTzh99YBw6QdOXz0ButX7Q+MisXR8qpb/dke6SK65IzoTezRIvdZ+TvXWCeAXoV6QifTVC3rROW2ZIi/q8WznbGzNXI6mrXZHv2bw9uzrhO9L10v633/KVRTCPZTZNbwFhKP9RPvK9FdikYinc6E60n/JHhu99gJvtL/XDN9TR9eR/refMtVrPTjaV09I9UyRw+mr9+1EWbCucevRFv9Ne+6qzsIhE98TTq8dvifIj64TwC9KRwRnpi8E+juyG0awz5Y2kiIXUu81rH3bUs2FvDd4M32+VPhGdd7r+xJ0AvhV6qgQjva39caqmodr1IA42y6jPS64te7pNYA32le0v6P2dWpNfbT3BE5l1fvisHVf0f72uNA8JfusdX21bY+glvln270V+Eb1Uv9mTmk6AfzmdeT1pCiEe88t+wCJFif4UmDc43VGlf0gFO2zl97S3E6tqRPAL1J7/MeO6rU4iLUeKmG1P9pFsccHhGzbNZ6edeT07gnMt6wTwKewj3PtrTUuZFtDuPSx98W21xzWfB2v4W+2t06YvzSdAD6V0B5rwcC+bqIGwj1BvOXFsud4Nf2skbE58rrvCcy3rhPAL1bnf/LttPYjFyN9rfX7WQP0a8P3yEswp07FdQL41Eo6+geEl/iB4wigXKPPE5Sn3qZOAJ9K6rxYxrWma21t/xI/gAD7zfvoHxTP/5cvUSeAT70yrXUhOtoF7ogAPeKcPL3EOZ96LRrGcezf6TD8TwD/tXvH6+r3APhfe0/ilet8j7fR+T5vo/N93kYv8X3+znEcv9ULWgXAL1HDMHx1HMc/svc8XrPO93gbne/zNjrf5230mt/nMwV96tSpU6dO7aATwKdOnTp16tQOOgH80Jf3nsAb0Pkeb6Pzfd5G5/u8jV7t+3yuAZ86derUqVM76HTAp06dOnXq1A46AXzq1KlTp07toDcP4GEYvjgMw38ahuE3hmH4S3vP5zVqGIa/PQzD14dh+LW95/KaNQzDdwzD8AvDMHxtGIZfH4bhJ/ae02vUMAzvh2H4N8Mw/Ifb+/xX957Ta9UwDJdhGP79MAw/t/dc1tCbBvAwDBcAPw3gjwH4bgB/chiG7953Vq9SfwfAF/eexBvQE4C/MI7jHwLw/QD+zPn3vIo+APjBcRz/MIAvAPjiMAzfv/OcXqt+AsDX9p7EWnrTAAbwfQB+YxzH/zyO4ycA/gGAP77znF6dxnH8RQD/e+95vHaN4/g/xnH8d7fj/4vpwvXt+87q9Wmc9Du308/d/p27WTtrGIbPA/gRAH9z77mspbcO4G8H8N/I+W/ivGCdegUahuG7AHwvgF/adyavU7fU6K8A+DqAnx/H8Xyf++unAPwkgM/2nshaeusAHoSy85PsqRetYRh+F4B/BODPjeP4f/aez2vUOI7P4zh+AcDnAXzfMAzfs/ecXpOGYfhRAF8fx/GX957LmnrrAP5NAN9Bzj8P4Ld2msupU80ahuFzmOD798Zx/Md7z+e1axzH3wbwFZx7HHrrBwD82DAM/wXT0uAPDsPwM/tOqb/eOoD/LYA/MAzD7xuG4WMAfwLAP915TqdOVWkYhgHA3wLwtXEc/8be83mtGobhW4dh+Jbb8TcB+CEA/3HfWb0ujeP4l8dx/Pw4jt+F6br8L8Zx/PGdp9VdbxrA4zg+AfizAP45pg0rPzuO46/vO6vXp2EY/j6AfwXgDw7D8JvDMPzpvef0SvUDAP4UJrfwK7d/P7z3pF6hvg3ALwzD8KuYPsT//DiOr/I2mVPr6nwU5alTp06dOrWD3rQDPnXq1KlTp/bSCeBTp06dOnVqB50APnXq1KlTp3bQCeBTp06dOnVqB50APnXq1KlTp3bQCeBTp06dOnVqB50APnXq1KlTp3bQ/we5egeI3ld27AAAAABJRU5ErkJggg==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "heatmap(grid, cmap='jet', interpolation='spline16')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's define the problem.\n",
+ "This time, we will allow movement in eight directions as defined in `directions8`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 71,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'E': (1, 0),\n",
+ " 'N': (0, 1),\n",
+ " 'NE': (1, 1),\n",
+ " 'NW': (-1, 1),\n",
+ " 'S': (0, -1),\n",
+ " 'SE': (1, -1),\n",
+ " 'SW': (-1, -1),\n",
+ " 'W': (-1, 0)}"
+ ]
+ },
+ "execution_count": 71,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "directions8"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We'll solve the problem just like we did last time.\n",
+ " \n",
+ "Let's also time it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 72,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "problem = PeakFindingProblem(initial, grid, directions8)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "533 ms ± 51 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "solutions = {problem.value(simulated_annealing(problem)) for i in range(100)}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "9"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "max(solutions)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The peak is at 1.0 which is how gaussian distributions are defined.\n",
+ " \n",
+ "This could also be solved by Hill Climbing as follows."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "206 µs ± 21.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%timeit\n",
+ "solution = problem.value(hill_climbing(problem))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1.0"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "solution = problem.value(hill_climbing(problem))\n",
+ "solution"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As you can see, Hill-Climbing is about 24 times faster than Simulated Annealing.\n",
+ "(Notice that we ran Simulated Annealing for 100 iterations whereas we ran Hill Climbing only once.)\n",
+ " \n",
+ "Simulated Annealing makes up for its tardiness by its ability to be applicable in a larger number of scenarios than Hill Climbing as illustrated by the example below.\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's define a 2D surface as a matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 73,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "grid = [[0, 0, 0, 1, 4], \n",
+ " [0, 0, 2, 8, 10], \n",
+ " [0, 0, 2, 4, 12], \n",
+ " [0, 2, 4, 8, 16], \n",
+ " [1, 4, 8, 16, 32]]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 74,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeAAAAHwCAYAAAB+ArwOAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJztvX/MdV1a13et93ned4AMZZpIUueHjka0NSRCO1Ia0taMNB2RiqZJiwYTf2WSWuPQ0FLxD9umfzVNiH+UNHkLRBONaIttLdUaGiGUhCIzCAYcNRMYwxTiSA2BSWfeee9ndv84977vfda5fq9rrb32Ptc3eXKfvda11tr3eZ7nfM73Wj92WZYFUqlUKpVKjdVre99AKpVKpVL3qARwKpVKpVI7KAGcSqVSqdQOSgCnUqlUKrWDEsCpVCqVSu2gBHAqlUqlUjsoAZxKpVKp1A5KAKdSg1RK+WQp5eursj9SSvnRgL6XUspvae0nlUqNUwI4lUqlUqkdlABOpSZRKeXdpZTvL6X8s1LKz5dS/vSm7mtKKT9WSvmVUsovlVL+u1LKG491P/IY9tOllM+UUv7DUsrvKqV8qpTy7aWUTz+2+f2llG8opfzjUso/L6X8WU3/j/VLKeVPl1J+rpTyy6WU/7aUkp8fqVSD8j9QKjWBHmH2vwHATwPAewDgdwPAt5ZS/t3HkFcA8J8AwK8DgH/jsf5PAgAsy/JvPcb8jmVZ3rksy199vP6XAOCLHvv7cwDwPwDAtwDAvwYA/yYA/LlSym+W+t/oDwDABwDgXwWAbwKAPxbxu6dS96qSZ0GnUmNUSvkkXAD3sCl+AwB+EgC+DQD+x2VZfsMm/jsA4Lcuy/JHkb6+FQD+7WVZ/sDj9QIAX7Esyycer38XAPwtAHjnsiyvSilfCgC/CgBfuyzLjz/GfAwA/utlWf4XZf+/Z1mW/+Px+k8CwL+/LMvvbnhLUqm71su9byCVujP9/mVZ/s/1opTyRwDgTwDAbwSAd5dSfmUT+wIA/q/HuN8KAN8JFwf6JXD5v/sxYaz/d1mWV4+vP/v4859u6j8LAO809P8Lm9f/BADeLYyfSqUYZQo6lZpDvwAAP78sy7s2f750WZZveKz/7wHgH8LF5f4LAPBnAaAEjq/p/32b178BAH4xcPxU6u6UAE6l5tDfBYBfLaX856WULy6lvCilfGUp5Xc+1q8p5M+UUv5lAPiPqvb/FAB+M/gl9Q8A8J+VUv7FUsr7AOAjAPBXkZhUKqVUAjiVmkCPqeJ/DwC+CgB+HgB+GQC+GwC+7DHkPwWAPwQAvwaXxVQ1/P5LAPiLj6uY/wPHLUj9AwD8r3BJS/8UAPzvAPA9jnFSqdSjchFWKpUSVS/ySqVS7UoHnEqlUqnUDkoAp1KpVCq1gzIFnUqlUqnUDkoHnEqlUqnUDupyEEcpX7IAvKtH16lUKnUA1VuoLdfaOq4N1r61X889cvfDxWvLvGNGxVL6JCzLL4sddToJ610A8OE+XadSqdT0er26rj9qufrXiXKuTtPGM6am3FKH1dfXWBsqjovXtrX2pdHvlEMgU9CpVCp1YlnAM4usAIyE71glgFOpVGpX7X0k/7yAGq+xfxcJ4FQqlbo77QV9L+zP+SUhAZxKpVKhOics+qj3ezV67temBHAqlUodQtxCqyhxi6k0bSx1qQRwKpVKdZW0CjjlVxTg9/mikABOpVKpaeRxuTM549YvF5btR1Grn/dz6QngVCqVCpMVQD0//F8nXu+lGe6h1r4p8gRwKpVKdVNU+jkSXp55Xk1fljrt2L3d777KGfJUKpUKUeuH/t4p01Yw94S5RaPfR//RlQngVCqV6iLL8ZNSW0qaoyIjwTjSwXvc7wj4RpwVfVGmoFOpVGp3jUqZas+PtvZlleXLSLTmgC9AOuBUKpUKUE+Aah+I0Hs8i0akyWtp3g/PfcVCd6sEcCqVSoXL+/QjqU4zHpV+bv24124/sj79yPrkI2l8Stbfvx94VyWAU6lUqknWx+95Fel+90w/e9UypqVtf/CuSgCnUqmUWx4QjnK/HrUeBCLVed1vS+pZ+3uMA++qBHAqlUq5pAFF1GIjD2Q1c8fRp221ztFa4Htc8K7KVdCpVCplltWladSaCp7d/daawf/tB1+AOd6BVCqVOpA8DhSAd8eWOuvYVHvP+c49n3wU5X7nd76rEsCpVCqllvZwiJaFWR4Yag/esK4WjrhP7FrzfvSC7/7gXZUATqVSKZV6wVfrKj2uWNM+YrEXVzcLfOcB76oEcCqVSrGyfOi3Pg2pdR6WGr/nimoLRjSxVvgey/VulQBOpVIpVFa35VkV7anzuF+q3DO/25J6luKtccdzvVslgFOpVOpGI+DrSelyfVpTzK1zzVJdROrZm3KeG7yrchtSKpVKXSlihW2Ped9aniMnW1dEa/ryxkb1dwz4AqQDTqVSqUd59622LrqKTgNr+m4d03reM3df1D140s7HgS9AAjiVSt29Wg6MiIQv17f2HkadeEW1wa6lLw4Rx1EeC7yrEsCpVOpOFQleLD7qIA5u7EjIRhwMYj16837hC5AATqVSdyXrQRTa9r3gG7HoqnUvsLZNxP5nC8CPC95VCeBUKnUH6gVerN2IU7Ba9/VGrMDeqkcqnhrv+OBdlQBOpVInVit4pT72OAVLGxd92hUo6xK+WiWAU6nUyRS1jaZ1L3CPfcD1dS+HG3E/Uj9YPRaz6lzwBUgAp1Kp02gv8GJtZoFvrehTsEbA93zgXZUATqVSB1bkoRE9wCvF9IZvrzaWPjz1qw4C3/r2H3zNUqlUanJpoQvQ5na59qO3Id0bfCcDbydSJoBTqdTksgAXoN3tcn2M3obUc/529JzvQVLOA6mYAE6lUhNqD+hy/XjBK8UkfJ+1E3h3pGACOJVKTaQe4NX02+PwjTrG4oqtMMMU3caTuqbGngC+UfTD+nk19hZSqVTKIStwAcZAl2sfvQWpro8A3V5zvhPP93pp15GSCeBUKjVQHuACxEFX6qt17y8W1yPFOyrt7F3BrakfAF4r4QYTUfU84FLKh0op/6iU8olSyp/pfVOpVOoser36Y9HLzR/tON6+qPZUOyy+jqtjsHouth4Pq9P272nTsn1qR/ha/tm8BFt8sMQhSykvAOC7AODfAYBPAcBPlFL+xrIs/6D3zaVSqaPJ63BXRTpdqT+r26XatKakW0DXq03EPdV1AN3BGxET0e7zcd1+DQB8YlmWnwMAKKV8HwB8EwAkgFOpFLRB1/rJ1ppilvoYuf1Iit8rHd1rvrcTfKW/7onT0Jqh3gMAv7C5/hQA/Ot1UCnlwwDw4cvVlwXcWiqVmlejnK5lrD3Bi8Va4BsNWK7uJPCNAu+OK6E0Q2Pv3HJTsCxvAsCbAAClvPumPpVKHVUjYWsZrwW4XHsLdLH4HuD11mnbHCTlHAHdVuB6qens6lMA8L7N9XsB4Bd13adSqePpqMCV+vLMB7fMBXvBW9f3hLIlbkfXy/3VRaegvW06DfMTAPAVpZTfBAD/DwB8MwD8oa53lUqlBqoVuADHc7lc20joYmXRKWWuLiLdbBkbIAy8I6C7Y/pZNfyyLA+llD8FAH8bAF4AwPcuy/Kz3e8slUp1VLpcXTvvqucW9xjhhrl2k6ebqb+KPdPP1naBKWhYluVvAsDfNN5CKpWaRqOBaxnzqNDFYkeA11vXAlcO/B3B2+KCtTGe2CDtbMBTqVRfndXpSn1Ywduy2pkqiwJcRN3ErtfqeCPnfGdPQadSqaMpoatrExHfulCp98Ksg4HX63hnSj/P0XUqlRqrFvCeaREV167V6WLlmoVYo93wxOlmC3i9Llgb44kNVAI4lTq8vOBN6OpjJWhpYnpB8aCOd4/UcwTxNH1ELsJKpVKzaZTbPSJ0e6SWtWNZY6LqJgWv1u16oNsDuIOJmABOpQ6lEW53ZuiOnM+l2nv2A/eY+43sZyLwtkBX+8+8B/m2faYDTqXOpFnAe1ToalPLlvYt0JXqpbYR88RYfTB4ezpgTb02JqLNvMOkUimfZgGvps/ovbpndLvW+hOlmkelnidPO08ydCqVojULeI8I3T3cribmZI43Os3she6ItLO1baagU6kj6gjg9aaZZ4XurG44atV0XT+R251h8VU64FTq3nV08PZ2u9pY7VizQre+nsTtYt3NAt2RC6+0faQDTqWOoBnA2yPN3Au6Z3S61nhL3Q6LqiKgO9IBe+KDlABOpXbRiH28ezjeiLSxNrYndD0xPaFb1x/Y7Vrnf6X6GR3wPt2lUilZR3a9MzjekVDVxCR4xRht24g6S0xEmwYlgFOpYTqy6+3leM8K3Tqm5x7hidPMoxZe7b3oyvlXkgBOpborweuP2wuonhjp/qPnkQO3EHmg2wrX3sBNB5xK3bNGgFczztHAO2ofriam1elKfe4IXaxsr7neVuCOnAfW9JcOOJXaU7O7XusCq2jw9nK7e6xgrssmcrpYd73mea1w7QVjbUxLfJASwKlUqGYHL9d+VvDuAVWp3vpl4Y6gGwncSNhG0U7TTzrgVGqk7iXdHDUHq2k3W711DvjOodsDuCdyvzsPnUqdRQlfW0zvudmZtg1p6ncG797zvKPmf62xLX2mA06lRuhs8G1Zsbx32jYS2gndZrgexQF72wQoAZxKudQCXoB953tbXG8P8PbcI9vidhO6zWVceUudpt4bO6KfPt2lUvegdL14zMjVwZFQ7pnO7rhtKAqoe6egpbromJb4YCWAUymT7mGVsxUyWEwUIKOgfFDoRgC1N4StsS11lhhLXHT7nANOpSJ1NteLxUe43hGu1Vt3Euj2grC2naU/Tz+9YlriO2mS20ilZlbrfO9s42jHOit8I1PdO4J3hhS0p7ylzhJjiYvuIx1wKhWhe3S+1rT0aLh6U83ePiaG7mgIR5ZLddExLfGdNMltpFIz6mzwjZjvjXK9PcEbPS7A1NCNdLR7gVhTr42xxEW3XZUOOJVqUcJ3zHxqJJSjoe6ErhWIo+Z4ZwJuBIwtcdbYFr2EBHAq5VeudO4D31ZnawWvB8hBTneP1LIXsBaA7gXbns53RwomgFOpK50dvhJ46xjvQRWRgI0Cb2e3e8ZU80jXGxnjiY3sJx1wKmXV7PDl2kbAV6rvlTo+GHhnh+4RgHvHrnerSW4jldpbs8PX2kazx5eTxflq+pgBvpOCd2S6WdtXZHlUvTXOGjuinzHdplJH0hHgG5l2rmM41xo5p6spnwy8FnBGxWLX3pgeZVLd6FRzOuBU6qiaHb6te3ylmJ7wjSifALxakJ4h3Tw61dzDzbZSLYKKOQecSkm6B/hqnClW3wO+1HgW1zsBePeE8N7A7QXiXnHeeKvq/hPAqRSnUft8vX3NAl8NNK3lra43GLytoG2BqMfl7g3hljpLTI+4qHZBSgCn7lCj4CuNE/XfT7PPl6vrAV8vRLV9DQLvDC7YG2Mp85RLdZp6bUzv2Og+0gGnUnuqBb69D9nQ1mnisHLt4qy6TNvXAPhGuN+90s0WkI52vzPBdgL6TXALqdRIzTDvuyd8OSBa5lcj5oFbUs6B4J3Z/Y5IN/eAbY/U8VHcL0A64FTqVglfvM664CoS1No2ja43GrwjoTsKuHu73jtwvLUmvKVUqofuDb7WeizOA1RLe63DbXC9reCNSEVb+tG2by3zlLfUWWI8sZ741nYBSgCnUqyOCl/PXt/Z4Iv14Ug3t4I3Iv0c4XJngXBEvTXOEx9NN0t/mYJOpVZ53e+I/x7W7UaauB7wpcaMgC8X3+B6PZCNTD/PlnqOLNfWa2N6x0a066CJbiWVOqr23G5kiWmBr7RCmWvfMt+7KgC+HuBSUNwbvHtBdy/na20TSTZPX+mAUymA/u53ptSzZcWzJE86OhK+RucbBVmvE7bUea61Mdq2UnlLnSXGEzu6TUdNdjup1L1oJHy5/lrTya3w5cZ3ut4e7rcVyBHXrWWecqlOU2+NGxUf3X6rdMCp1MzuVxvvWfHM9eFNMU8IXw6aLRBuAe+RIdxSZ4mxxLW260k4ru8EcCrl0Z6pZ400kKbAGrHiWRNnga9zpbMGoNY6bz0XH3HtjfGWS3Waem1MS7y3TUu7YE1yG6lUtFog17Pv1tSzFKOBL9Ve+lTHnGsdxzn2TvBthbFUpqm31HmutTHathF1mnprXGu7KKINImMCOJV6Uu//vXvO+0pxEmgtW4cGOt8oCLc64vp1j+vWMq68pc4S0xI/epwBmvjWUimv9nS/vT4drP1GzPtyZVh/VJ3GOU8GXwuQufiIa2+MVN5Sp6m3xrW2m8n95hxw6j6156EbXB+W+2pNPVOi4Mt9mmv36dbul4Nvw4KrmSCsfR1xrY2JLNfWa2Na4lvaTUy5iW8tlRol7X8DL9xHp541c79YW+scb13GtafGEOCrgaYHtFrgWsBsqfNcW8o85VKdpl4bE9GmpV1rW0npgFOpker1X0mCb63RqWcOvpIb7gjf3u53NghbY1vqLDGe2Na2kf8VvX0lgFP3J49Dndn9Wj8FvaddtZxSRSkIvhT8ejljT1n92lKnuW4t48pb6jxx3viIthPSbsJbSqWOpohPn+h5X8tpV1x/HHy1874Hge9MEPbGeMulOkuMJW7vdlHta6UDTqUk7eV+NbL+1/QAXCrTAJ7bXhQkDHxY+SwO2ALbSPD2gG4klL3xEe0nJd2kt5VKWdVz6xGnI7lfa1ndnwWyQe43+qc31lJvrdNca2M85VKdJcYSt3e7qPbHHDqVOoIi3W/UJ1hdbz3xSlOmgW3n1POon9Y67WtLnebaUsaVt9RZYlriI9r3pJvUd6agU/ejXouvog/d8IwhxVgPyeDKWlY9HxS+VuC2QliK1bS3lHHlUp2m3hrX2mbvtsGa6FZSqSOJ+6+jdb/Rx01aFl55z3XWyNhub+j2cMKWOs21NsZTHlXvjY1sH0W01n7SAadSlCLcb3S7vceKdL91O+bTiGqSENbHaNtq6zT12piW+Kg+JqbcxLeWSmm0x+KriE+eVvdb1/VyvxJ8pXYOK3AUCHtee661MZ5yqU5Tb42bqW1Ee0zpgFOp0dprJXarWu67oS0HOWubnvDlxudeW+o015YyrrylzhPnjY/sY1LSTXpbqVQvaf7JR4LU+2kWOffL9YuNQbncDu63BZDePlqd72gH3Apirs7ypScqbqa2Ee0xpQNOpWr1/Are8rQj6315j5zEylpWYTdstWoBqqfN3hC21LXEeMu19dqYlvjZxvX0nQBOnV89Ur4zpJFncr/WbUerFAuvNIDiYGht0wJfL4QtdZprSxlXLtVp6q1xUe0i2k9CvkluI5U6gqzuF4u3ut+6vof7jbYoyq//2yYRLtfTZpQD7gHeSPc7q/P19BFJtc6ETACnDqoIEI1W66EbXGyL+61jWt0vIyvwtLEREPbGaMeX6jTXljJPubZeGxPZrrWPGT4CKk14S6nUXtpj8ZVFmiMnJfX+L+9wv1wdBWUuVtO/d0yvA7bUSWN7yrhybb02xhMb2U/0P3FPfzkHnEpFypp+9vTpXZxFuV/OoWrTz9R+Y0ca2+N+ufYW59r6UyrTvrbUYdeWMq5cqtPUW+Na20S0n4x4k91OKtVDo/+ZY+O1uusId869D1JaWfMeDnC/HpfaA74zgDcy7Tyj+90zzdzaTzrgVMoi60MNWtViPzTbglpgK2mA+21xu9y4e8B3Rgcc4X73cqF7gjlYk95WKsUpeqtQ70cORi++ksbz9luDtRXURPdeJ+Z1zVR7y/1o4euF7ewOeKT7PRLYKaUDTqV6auTc74yLrwynXlkAh5VFO2MJUB5XzL221HmuqTJPubZeGxPRprWPiSk38a2lUhGa6WuxR9rUeHT6uWGxFSYrzDR1dYxlXE8qmrun1lS059pSxpVLdZYYS1xk+9n+m6cDTqVGy5t+7qWW9LNFhsVX2JDeOi+wLWNZxuReS2NGwJgq75163iNlvJf7DtaEt5RKjdTei68ijp2U2rfAlbsPxfujdbqaeGudFOuN04zbKx2tjfGUS3Wa+tb4vceL6mO+oVKpCM14VvPe47Tej/aTujH9zNV5nSNX1wJlb6pc+1qqk+K1baTyljpPXGubvcbrJPGWSinfCwDfCACfXpblK/vfUioVpZH/4zTp55H3Y3nwQt2GKsf6aFx8pS3rAWWLtI47XbA/NqL96PEoKWdlXlPE/AUA+FDDraRSk2p0+lka35N+ltLSvdLPRmndI1fG9eVJG1vcr0Yj4PtSEcPFYeVSnaa+jpFiuTbesTRfHLzjdZI45LIsP1JKeX//W0mljqDZ0s+9FbT6WVunjfeAlpMF+NJ97OWCLWVcuVSnqffGHmksTqNXQZdSPgwAH75cfVlUt6nUQTULQL3awQ7sqQc41q9s/fLS2rdnHO/9eNqNahOssFtYluVNAHgTAKCUdy9R/aZS86vVYo1UnZKe5b5SXWR1vy2uWBvTEj96HG/fuQ84lTqLrNuPWtz3QZz76lgx58rV9b6fvRSRep49Jd3beUe1n3OoVCr1rD1XSE+svUGmkeUeZ/t9IqGsqbfGzdZ3Z4mroEspfwUAfgwAflsp5VOllD/e/7ZSKUpv730DEynSrXZyvg99ujWp9R4eqp/e8fZ+L0bDt3U19F59W/vC/kSloJdl+YO+O02lUj5FwXCSr/lR4tzkXq50Noe7qtU9jkhH93KuIxd/NUqzDziVSgHAYeZHj6LRjrAej7q2Ot570l7wtbhia5+trrhBM353S6VS9y6vs/QswIpysdt+qD6pmJmcdC8H2WOh1ojFXx37muWvPJWaSK3/LaxOmRsvXfeNLJCNhis2tjSG5x6igWxZJW3pQ9NPNEx7r4YeSMVMQadSpxIG7AkgzqV1vXV1jGV8aaGUd9yWtPUMKe8jLNSK6q+O3yEVnQ44lWpSz1UhKZNanGaPFLAm3awdd6YU9apZFmr1dsSevgIfxpBKTabcijSFq5Xkda2j3K5WXH89F2zN4IgB7OlrqS7a7WpivIu+qD9BSgCnUsPlhWdPCyR9qelEg6jUdAuYpZ9acYD2vNaMpb32KmqeWNtXBHg9ae2d0tAJ4FQqtVGH7EIUDDyQjZa2/9kc8Qg33ZqSjgKvRjtuPdoqAZxKHV49PkmoT+wa0I7nrkSnmC2x3kVYWocuLczSuF2PI45Qr7nUqJR0Sx8tDvflYv+Tc8CpVKpdlCN2kMGSJvXEemBlWe2sgTFVZoF0Sz97ybNKunX+NjIVDUDDtKMSwKkTa6ZPqBQpC9h6zvW2zCtTZZZUdetccK954K0s7rHXQq2oVPRA0JK3sMuoqVSz3oY5VwLPeE87qt4+o9lOo2lTbx/yjGMRNx5XhvWhfa1pO4OioNxar4WuVy8N32iKbpx0wKnUVDopwD2rdiPbRLpgrWO3rIrGFOGCuf56Ombvth9PvdjW6HJfPuB/OigBnErdtXZeWtuSYg68DdV4VhhjcZr6yMVZe8zCWOeDe7peD3QHKgGcSqUErQuxHqrrVR3mz1rmY3vMAXudcc/FWTOodeV0D9erdbxR0H356vZProJOpVIX7XxymDa92xOyUnttPRfrKZP6bn3N9bu3WhZpoeWdoIsBdvunQQng1IGVR1L6pXnvBn1aW+dtqXaWMTT1Hmes7VPqj3stjaFt20PWYyu1fazlHHzZPg3QDQSspARwKpWqZPliY/x07wHTnqlnaQyqThOP9U/VU7Ga8paxeilqnlhyvRrwDgRurQRwKrWrZtpP0pJREByIFhpR0LSmmKPngaV70brfKHfsgbMk7wlWVJkHvuS4AnidwH3t5SvVH+26iARwKnVX8loeaSFWgCLcWIs77OWIPWXYWFQ9FTujwtLRBOC04FUIB2usEsCpVGqMvO511E9OFhhr4y1lrWCObEvJm8yJcr7B4B2hBHAq1VUzpZg94j6da1e8w3F+kuONgLsnPS3dS09HbE1V91LLQxai4Suop8tlxx06WiqVmlDYh1enFea9XKvkZjVzuZ75X01MzzIgylpS8b3UvGcYgS+XchZcrxe6L16+Ev+UPIoydR/KrUix4t7P2vEO0p6pZo2jbYFwXRdVpqm3fFnpIcuWJQq+aKwOvBphcI1UAjiVUmnmM5q9dkf75SU4DT0CmD1SzVrQUjHWPrG+qDKtS6a0pzOWZIUvIS14e8EWvafuI6RSp9XR53e32vkTeKSbbUk1a2OjXLK2LzCU7fFXXf9XUW9PaoevBrwR0H3x8uHpTz4NKZW6S41KyXOf4g4X3HILFjhHttH2YxlDqx7paW8iJUId4cvJC90tbJ+g61ACOJW6C23BrP2wwGDOlTV+Uvd0rL3aRI1prQOkzlJmAbOnj5HJIQd8LeCNgi2mBHAqlaqEQdZqr4JdcC+Xq4FwFGCl2Mg6qWwmtbhfBKJcylkL3h6wxZQATqWGarbFXJ6UNdfG+YEV6VyjY6UYi6NudcK1IvjQG9Ca+d+r+jb4UpLAOwq6WyWAUyfXjF/5ZxX2XkmAXttgcQYXjJW1uF2uzupO9wC+5r48ddrxNPWj1AhfyfVaofvy5SvxT8nnAadSKb+0aWhMxk9ub4o4Eoxcf5r2lrGi7ivSGY+CseSGa/cbAF9KWvDWcI1UAjiVSgmSFmNhLngtUx5PGeXwvDDnYnqAFZMlVtPG8p5IZbMoAL4a8PYCbq0EcCp1SLVuN/J+yg78dG51eFFOVtO3B/hRoLXICuM91TAXy8FXHHbgedAJ4FTq1Np+4FDQ9mxRovoLdMFS3V7p5Ii0cWQK2ut0veqWjhb+nSjdrwe+HsebZ0GnUilEUYdxvE28xiR9KjekoqPT01ydBnhSe21MT2fscfFcjLdeK83cbyUtfLmUswW6Pc6ETgCnUmGabYuRRRZoS67a66iZYeqyFtDu4YA1/Xp+euSFcqSLtqoCngW+ZJcTnAudAE6l7lbUhxMFWIsLdm5LqrvRwBEr88ZLgLWAUXq910+rwlxuTDet8JVcbz6MIZUK05kemBChqPQ0BVvJOg2GcKQ7toCOGkeCYXS51YGPEvffsgan4jzn2zIavlw/vnOhkUcW5j7gVCoVIwra0qd2YypIM4rRAAAgAElEQVRaYrk1XRoZT8VIwNOOEVVe188obvGVIvVcywtfrSLnghPAqdSV7tkxa4BpXUndkIredmVN52rLPC5Zajc6lWyFq/bLgOaLx86qAWiFr/5s6D5p6QRwKuXSHqD2fiL2/CS1WC5HKnrbzALhnmAeCd/6/rgyb5+WfwK9wbsFqOB+tTDk4MtpxFxwAjiVujtp54EpJ6txwVT5IAhjZRQ4rQ54JHwfkGtrHxHy9tfpe6p23tcD31ELsAASwKlU6kqeRVoUqDXlnSDc0wFT8S0/6365Oo07tt6nFK+RB9LS4RuPkuZ+tfDl4Gp6RvCLV+yfovx3nQBO3amOvGd3L1m2JK3SQNggDpDUa48D5oA1g9OtX0uxknqnljVi0s9baeZ9KfhS/anmgTeAjVICOJUSdURYew/WsNRZFl5xMrjgeiisTFsvxVniW1LDHpBaIDtberrz8gkrfCVFQ3erBHAq1U1HXVHNAZSrG5SK3jaVnLCmnoqLdsLbsVtAbfkiwKk3mIO0TT9L7jcSvj3BuyoBnEoNU7STbnG5ddsIF8zVdYTw9rUmHa1J63pgLf1sSTNb6ywx2jY9AU2kn7m5X82TjTzw9YD3BTxc/dH+W04Ap1IpRByg6zrt3O5ACFuATLXxuGLuJ3bfUfDl+sbUAlMPkAckg2r3i6+UZhZhKcFbw/ZFw5uZAE6lUg55UtFcXTCEt68lIEeUaYGqAbi2DpAybTxVNzINrVwBverFVSpaTj1z7a/KFeCNgC2mBHAqFaIzLNSypqm5VdGTQXj7utXtetPP2P22wNTqdDVjTiTNsZOUbueKafiy/XSA7lYJ4FTqSUddNKVV70/ZCSDckoZuKYtOKUuQjEhL90pRc8L+iym2H1ncbwR8W8D7El5pn8WQAE6dWb2AenZQc5JcMifPgi0jhCmNhDA3fi9wtrha6/gTy3KQBlln+GVfwqubPxYlgFN3qFnSxb1B3nqqlacPS9paW7eA+cQsyglLALSknLkyTapaWxb15QGEsonEbT3aSpr7RRdiEfDVuN4W2GJKAKcOrt4wnQXWvaQBrHWuuAeEAboszrLWa2HNtbP0ifUl9UcpwjnvDO5t+tmTesbgK4E3Eri1EsCp1LTqAX/NJ6jnU7YVwg/Kus4rpKMcsPQzGrTS70nF7iVsBbThEA0pzgJfSr2gu1UCOJXqorPNE2NAjXTCdT1XN8HiLE06m/vpGZN7jSna9WN9t8IcOUyDWv3Mud/ruBj4WvQCXl39yYcxpFKHU6vj9X4aYmlorC/PnHJPCA84tMPigDXQle4Fq+faePs6kTRHSt6UEW+KxvXWsH3R4JITwKmTyupAsXgtEDVxZ51LloDqaaOFsENeyNVl1G1409XSPVjgq9FsgBbSz173a4Uvp1bYYkoApw4sD9RaQXiG1LLW3XrjpFS0pk0N4aAV0nXzCCdsSTdHpqDBWO5JWWvG2FHaOeKneOSXkFxvD/CuSgCnUofRkeAfDeG6vq7baYV0K3wl8FP3TPXLlWP9TAZV6fSrl8z2JMn9UvCl1BO8qxLAqdShwNZTkS4Yi6udLNauZfX0TmdIa+Z6LcCV4N1SvqcM/824k6+keIA2+GrBi80FP7fNRVipU2uP9HOkRkHfs3CqVnTKOhLCdX3HFdIaCEt1dUwdZ4Ev16c0lqcfr6h/6itYHWc+S48TvLo2wpcdN2Dh1VYJ4NQJFQG3FlgfwVFHfMqOhvAO88KtZaPmdFthOgrGAKanIGkWX1nngTH4clCNhu5WCeBUSq2WldJHEeWYqU/iaAhzkMX67jwvjLli6/ywta0lNV2/xq4xWcE6IIWtffpRi/ul4Ev2l3PAqVRPjXCrZ4O0Rl4IY9oJwly9NPcquVOre/XAdwSYOwgDrMb9RsLX6ni3zwt+AQ/5NKTUmcUBjQLqKAh6gT7TnHaEC6biPeloTUyHxVnaOWHPXC9Xr32tGctap5E2vvG7rbT4ypJ6tsJXUg1crxLAqdSNZnCsnnvYc+659bGEnqMuLfWd54Rb5ou519Q9adu0yNsn98+wnv99hGhr+plzv1r4Sq43Ari1EsCplEqR87+zLNLyfJBE2SVtOvpAEJbKpL6xWCnlrZHXGfeUYmvRU6jxIQ0SICn4cv1FQnerBHDqYPKkn6Pi70mco41IRVPxXgg/VPXcCmmDJLhSsdr23GstfGeBKiflf7UVrFj6WeN+pXlfC3yt4N0+tjAfxpBKPcniVKO3H93bOdEjIexdnBUI4WhnzJVjdVHOeFJZ3e9VWwG+VMpZA94tbFseW5gATqVcGg3NPdy6xwVz7aIhzI0ljR+wOpors6SgqZhWHQCwreLc71WcEpBa8EYpAZw6kPZKP8+Yqp7BNc8IYSwdTbVvWB2tKa/LNCllb50US5VRioI3uyDregGWNf2MPenoedgtmLWLsPBfutXlckoAp06uGUC1lQbmI++59ZN2BIRbtykFQrjuoiUFTcVY67BrSQdIVVvSz9Kq5+vY2xXTnqck8co54NSpFOl+PWNIcWed/209SzpqT3HrNqUgCGtdZuRcrwRADSBHQFT6b7huQTKsgAaQ3S8HX2ze9/qadr3ifbEPY9ApAZw6sSi47ZV+3iOVHQF4CcLSB2ovCGN9tDxXWKnWFDTXB9WPN1Zq71HgP2Mq/dyy+Oop3gFf7bOB82EMqTvSXu53lrnfWe6Dk/dTfTSEubaOdLRnrrV3qjmijVbkk44UMQp53a8Xvug95MMYUimrItyvtt/IQzqOfrgHpch9xa3nR3PtlRDWQC5yDpgaQ7qvCIUmgR5v0OpmMRAHwZdyvR7orm3yLOjUSTRijnSvedjRkG6RBnreVDTXVjsHbSUP9wCHhiGj5oBbYiPaeRUIa+vKZ7QP5Zyv1I6K8c79rkoApyaWBJojbT3y9je7s7UqCsLRx1YGbE+K1GypZo+u0tD0e6qZ/71po3S/VJu1nefxhDkHnEqxYOp18lXv9HO09rgPzSf8TBDW1ima9FqENaP7DZjjtT6AQXK/XOpZ+2jCqMcTaiUCuJTyvlLKD5VSPl5K+dlSykfC7yKVulF0enY294vd51nnf61qhbAUo5kPHnhmdMRK5mjYtvyTErcjYSucr93vi+rnVaxiz68HvlQ/nucCa8+C1rzNDwDwbcuy/GQp5UsB4GOllB9cluUfqO8qlQrVLNt5ervflv61cZb38m1lvw+KfrV9aVT3pRmfUkvbgG41cZ1ukZUIVUW54QELN3GI+6VSzxb4ep8JHPV0JNEBL8vyS8uy/OTj618DgI8DwHtCRk+lUHm3HVnd70xbj2ZJW0ep9QMqelHWABeMDSvVj14kRcnzz17bZjv/q3j+r8b9auB7cxvKJyNJT0fa7XnApZT3A8BXA8CPI3UfLqV8tJTyUYD/L+buUneoGUHU4n5nniO2ynLfZ0uLM/LsBU51V69n+EZKDeBSyjsB4PsB4FuXZfnVun5ZljeXZfnAsiwfAPiSyHtM3Y1aVj1HuN+9tBfg7wiSKbvqfx6WNLThn5a0+tm69QjTyOcCW6QCcCnldbjA9y8vy/LXu9xJKsUqatUzFx8NvZndrxW+6X6nkTr129B2gOxHSyJzyMq5X7lv+SEPWl1gH7QIq5RSAOB7AODjy7J8p/luUilRvaB01EcO9gZ8T/hGaaa/j46K+jV7vV0tLviq3OcgpcVXGlkf0PBcrrvnlscUahzw1wHAHwaAD5ZSfurxzze4R0ylrtQKjJ4Lr0a439Fw6w1fbf+zZAICZHGaM6xeHvV8EcM4mu1HtTzuV7ulSIJv1DOCxbdoWZYfBVAfbZlKGTQDfK19zyKP+50Fvi2K+ns5uMNuBWtL+8a3TnsAx1beOVjrs4E1Y7VCd6s8CSs1sSLnfT1jaGJb9uWOnDeeCTjRXxQ0fcz+hepRPf6aev/Va9PUzAEc+qH88Is4ySoSvpf+Uqnh6uV8uXYj3W/kf6s99ip7fv89P0pa/74C4TzybTiCC2bOgI4SlX6Odr/R8L30mUoN1Uzw7eF+W+Z+W/47Rj0zuTd8e2w166XAmbegudLw8TUQ1tyf8Xfg5ng1Zz97HhMotafg2wO8qzIFnRqoHit0ve16LLyKVgSkR+cfOY0CaM+DjANkXUkc9Vce/baoXfAz2LD533oBVi39IwT9e3X3gO+l/1Squ6IOhvA42b1Tz3u63yM5X6k/qr2m3zpm0Mde5C64nrcctVLaeRCHvnv94RncIwq17tkC37pPbe4kAZzqrN7w9bQZlXrec4wzwdeinfY4a+BjAdSM25Wa/on7FmBJh2+gQwnP89WMoYFvzKKuVKqLIo9DHDHv2yP1PGLfbwuoRyy2ioBvpPvV3scEOy81AOy939gK4asvGfoFWPX+X83xky0nX7W3i0lN5xxwqoMiIRO1uIiLH7XqeaanL3m0h7tsgS+m4Pd7tJPtsfK5RY7+rEdQXobxAc8DSs/qaa+O9L8/Nb2izwvey/lS8aPmk6NP4NK0k9QLvpGrnq00fJ2pc6j1+6AWrpayHq7Z+aWDW4ClVWT62Zp6jgTv83ipVLOiPyi9W1VmPpxjxLGWPeDr+YjoDd/eK9WD0889tiC1grX1k187n+08AxpAf6QkFRcJzB7wBUgAp5o1C3w9bUadCz3DKUwzuV5P354xuPE6Hb7xEinjbsNaL8W2QtgLa+NfJ7cAK+Lxgzd9NrjfXvC9jJlKmdXrw7zHIQ0RbrnH3O1I93sU1yv11Wu6wLn4ygtLKfXcuvrYCmFsvJYFYDdxG7eKrobm9wGvYLSkn63QjIDvNrZEPY4wlXpWTxc1A3xHrXpumWyz/h30hu+oef+Wv7OO7hcbgnLBIyFsVYTLNa6Ats7/rrKufpbcbyt8WxxyAjil1D3Cl1LkfGNE+6g58Vnha9EIt+9v1jRGj7lcy/gNsj4BaYWkZ/UzB0T9qVq5Dzg1hXpu92id7+0N39bU84iFV5axKc0O3sgsAjeWY/GVtBipdQGWF8LWFdDe8VmXr1+AZdn/+9RG8eCFllXT9Li5DSnVXb33WfZwvVy70XPEvVfqRvTZc65X2791tbOlLwm+A+Z+63hLWtoyvgW4rXAWIYynn5/neh9uyiS9UM4D8320u9/oBVkJ4FSlEQccHBW+PQDaMs5I+PZw1F74en5vCc4OYXO9mvlfqh8prm7TA7gtZZic87yXIXSpZGrxlcf9joTv5R5SqS7zj95xRsHXOoblQ7/3Xl5LXGu7HnOqnpQz13crXAPcrwVSUiwG79YlCty4mvGs97zRa1eroB/ndpGV0Wv6uXaqEeDTLLyixsltSKlOOgp4pT488LUAdRR8KWljPe9R65iW/jV99pgqCEo9Y11YwGRxuh4ItkjTt+aen177D+CgJIHQ4n61fUf1QykBfJcaBV7tWDPANyK2ddFWr9Rz9JclT99e18uNoXlvAuFrAZCnPy0EvWXUGJzMEObnf6Wymxjh6UW6pxbx7rcFvq3uOAF8d7oH+HrGaz0Vq8fBHhHzy73gG9nvHvBtUEt6VlzA5LyH1jKLtE7eMP9bp581YPXM/WpXPec2pFSgeqy0bR2vBbxc+97zxK2QGuWSLf3u5XqlmIgvS1R9gPv1zvtaIdw679zqmqWxb/p5dpza+V9JXthJK59xh51PQ0qFaEbwavr1ut7eK6T3nvfdG757g5dqZ3W+DfCl5n69Md5rTlZga8a2uHyFPI8kBMCB6nW/VvjmKuiUQbOlmrX9nhW+lEa5ZM0YLf1Z+uz5dzUAvt56S91aX/c/cq63w/wvdvwklX7m9v5a535b4JuroFNKjZ4ztIzZK+UstY2Ar2Xc1rRpD5fs7T96/jhyWkHz3jXCl5LV/WqdrzYF3JJStqSZ6/vDrsl2FfCQVdFWF2xxv9pDN7C2mvIoJYBPo6OmmzV9eeAb2Ua76CpyzlKK7QnfmcFLxXeAbwtYLbGY06X6kcpHwJm9V3z+VxK199frfrn424cztD6M4Rb2+TSku9LIdLNlvJ4pZ8t9aNq0wtc6nja21SVb40emm7nxJoQvVu8Fryb9zF17pE1Va8ZmIYwvsnopPHbw0hUFwxj3G/mEI6vLxpQAPrTS9ca0i4DvrPO+0a63N3i5tjuknSUXzA1tLdc44x5zwJr5XhHCOsdXy/LwhcuQ/njtvK+8CjrukJEE8CF1ZPBq+hs132ttEwHwkfO+EdvBLH1p4nqBF4sLPOVK64IpSG/LOahqU8C908yeexO2H11e37pgy+Kr5zay+5VgbV8FHX+6VwL4cJpxdbO275YPZ6l9b/hG9GGBb4tLPhp4ufY7wVeq5xyiJYaqk5wv1gcVaymTJEGZSC9zq58laQ/nsKSetfPAUr+tSgAfRkd2va1AmGFr0lEWXUXBd0bwUvEd4Kt1txqX3DsFrZXm/rRjMOnn14S5Xqyccr/efb9c6lm/CEsP3vrLQS7COpVmdL0j0s1S+6h0ptSmF3xboOr5rxvpekd/qdK+B53hi7XZOwVtSTVbvgxYwEw8fEF69q8WctLTizSPJ9Rca+/LOhdN95OaWGd2vS3g5dr3dr3WfkbCt3WB2IzgpdoEud66K9HtCT/rMq1bjkhBv0RiW+FscdzC6udt+Tb9TG0T4tyvZ95XA99R4H3uL3VSWf5qjzTXq70HbZue8KXU+sXK0ucR0s1c253ga3GP2vrW/jz3tI2L6k+Rfq6lcb+adLJFLY8njLoHud/UpPJ+SM/sejX9zDrf6+mnh6PV9sn1q2lridkLvADd4atJM2vipdR0HYfVczEWp2rtT8wKVCB9mgMm0tLI4qtbd3q78Mrifq0PVKCcby/wPvefmlAj4Hsm15vw1fcrtbPGjPp7CXS9dXceF9ySgpbSxFoAasDpTWfXZeh4G/erWP3MLb56ijGcSuWZ97WknXXnTbc669Rk8sD3zK5Xat97sRXXV/SeXE+/1ri9HS/XfoDrrbtscb6aWCyeKueAisGU6lsDbK+TvrnW7/0FuH3wwuW17H6pWKneA1/P/mGvEsBT6UjwnRW8XDuuzR7wbXHJVkjv7Xi5tpbfJSjlXF974Outt7rdWl6Ycn1Z7uklkO5XWnx13Y3N/VoXXnHwtbreXg9lSABPoZlSziNc7yxzilJdLzdL9R2519cSv9fcveW9CXS99bUFpJo4rcO21GmcrhamFjhz9xDofuuVz/W+4K00qWcrfKPAu8bnPuDTa1b49nS9Uvs94Wv5++jx3877ZWGmdDMVPxi+UqymP+0YotNkrrE2lDzpZW3MozTuV3uq1U3fwlORtCueo+Gbc8CHV++081ng22N1dG/49lhIFeWS91hgxbXrAN6628gUdE8IS/+NeqSgpb6urunFV5L7xeRxv1r4ck9J8oA3OhWdAN5VM8B3dvBK7T0f9r3TpSNje8DXC16urfXLyyD4asqpWGkMrp5LQdf1VNo4MgVtdd8vH57g+9rLVy73a32EoHRIRit8W58L7FECeDclfMeDl6uz9jcDqK3wne2L0ADXW197X2vdcA1N7rVUV/cttbe00/bFud9KGvcrPfFIcyY0DnD9nG/L1idKt18Ocg74ZNL+VY1a5dyzfY+2vV2vNX4kfO8EvFjXo+Bbg9QyDlanAaPkbDXtNOMFul9MlscRtsBX63rzecCnl9X9RsK3t+ud7cPe2+c9wHcG8AJ0gW8LiLX1GkCvr6Vy6p6tDpXrx+x26zb0sZOt7pdzpdp9wRb4zvAsYIAE8AEU7Xxb+2n5J5Pwtd1Ly4EcVHttPwdyvXX3rS6Yq8dAKsVqyjEnTckLZqovSzvieEkAm/ul5nU1W4Z0q5Nj4OsFr/ZfdgJ4uCygPIvzbfmwbxk3ar6Xa7O386Vie3yZmdz11tfRLtjqbuv6+l64txODstaxcu2ofiRnXq183j7z1+N+r4ePm/e1zvfm4wjvSrPCd8a0cUvbURAZuThL22+C9+Za40StEObqLUDWALO+d6y9Fs5cO/H6ee4XwLbv9/KaBqwmfdwC39ZHEebTkA6vM8J3Rsc8m+u1xrfM+Ub+7t52ncGLDRHlerevW4G8LZOcM9a2h5P1tjPM/UrP+6X2/Nb1dVscuDjora434klIdZ95EtbpNTN87831RsUfAb4Tgbcua4FvC5AxWHJtJAcMVX19rYEs1sbV7hEkiPtd4fuyWgm9PXKyJfUcDd8I8OZBHIdUtPvtDd8ebXumuaPv5+zwPZnrra81r7fXWvh6HTD1WqqTwKxpY4Uz2ub6zOeI1HNv+Fpd70joXo+bmkizw3d0O6ntyPsZPT/cAt/oL0EHAW997X3d6oCxsggH3ORkkXHEsfCFVwCX1PPLzUKsp/IXr9jjIr0PYvDC1wve1nOgMwU9jaK2B2nV0/l62iV842I1GuH4uTYHhy/X/7YMA3PLeFp3zMVIYMYkjVVtO8IeK7iWY+c96/fzao6i7A/fkedAX+4hNYki3O9ZnK8XvFy/e6ape8RGOd8JXS82jBZeXGwP51v/pNpJ5dK1xrVyfXjc7wpfwf1KTzvCDtywH0WpHyMSvB7oWtokgLtK637vCb735Hqt8Vhsr7RzFKwHgle6toK4Bcga+FLu0upePVBtgfVVDL7war1+ek3s+W2Z942Gb9QZ0JFOOAG8u44I35nAK/V9lG1J0fDt/aVjMHjrsijXS732QJgro0BN1Wmu63G4NhZYr+6XOe/5xdYBC6uePfO+rfCNOwmr3wIsgARwR42a+z06fEeDl2vXG1qtsa3wjfoCcWDXu329lwP21PV0wFf98Ht+rauet9D0wtd2KIf+MA5L2XW9fDxlLsI6hCLcr7ftCAfoHadXn7O5Xio+Gr4nBG99PaMDll5b6rbX0bC++R0u7pc7bnJ74AYH2bX8qe0g+FrBu8eDGAASwJNrpoMyPP9UEr72+ISv+ZpzxNr4CAdsuTfp95F+JyuIqTGv2mwWXj2K2vMLgM/7AvBbjq6HHw/fqLOg63FqpQPeVb336krtZ04731PK2Ro/I3w7gxcr04KKi/W4XazMCmGqHeeGLe7YDFVFPy8BLHt+63nfp7hq0dWl62tXbHW+ONjtc715FvTdaMTc75lAJNVxfUptR71PR4HvSV0vV6d1qBEOmCvD+rPUuaAK16JgrThu8vkaXxRFLZaithtty6LhawFvyznQEQu0EsC7qMX93hN8vb+rt21vp2+J9UL1AK63F4j3dsB1mfTaUre99sKaBfbtcZPcWc9ah7uWX35yW5H4uWEshhqnfo1f51GUJ1SE++3hoD1/zb3hO5Pr7Q1eKt7y3tTtD+B6sSF6uFyubk8HrC2n6iwOV9OmHgcAsAM3uLOeuXlfDJ4R8G3bB0yvkK77ofqw1gPkHPDEannLjwQXLn6WRVbediPhq23bCt87c73b1xEO2ApiCpQRoFW3ked9uf2+1IpnDKyt8PU8H/j5euxJWBYlgIdKervP4uyoeO/v19J2RIqdatP63kTDdwfwYmU9XHAUcLVlGuDW9VjMaBd8c41vOeL2+0rpZaocg69mvtd2AAfteFsO5MD645UOeLBGHbwRMe4Mzk4zbmvb6Dlxzzia2JZ/OwnfrvD11FkcsFRX9yO1V7e5Pu0K4Bq06zW23xcAX/EslWM/a/kXZPUBb889wAAJ4IHqBRlrmyPBd5Z2o7MCJ3C+kddWKHuA3OKApf5ncMBXZbenXT2nmh/QRVfPQ8grnqlyW2ral3JuOYyjbl/LshVJ+78rATyFjgQMa9+R6fHR7Xq/973hS93LwEcGSjFncMCUg9W8puq8oKVinsqutxxh8F0lzftqVkK3wNfrem0HcfTZhpSLsIaqZUVv9JgJ35h2vTMIkfDdwfVaQWu9bgFxtAOWYinIamK0YMbirTEIfFdJh21wK54t8NUutuK3IdnAq4EuBdxchHV6zXYgBBXfCzBSPzO1S/ii3fe8jn7tdcCan1SZBGWsfQucWWeMH7ahge86v2uFLwdXTcq5F3itW5BsME4HPEi93G/vvxorNLSxkYd0eAHqbdvbKbd+sZHg2xm82BBRcPXWeRxwDwhHu2APnFlAX5/zvD1so4bvqtHw5ff/0qCmYuv4uk0dqynX1ucc8CG05wrm1r4t/Y6E78h2rfC1jKdxvrUmhi/Xd0/4Yv1GOGBuDO6epN+bi1e34Q/bAADypCuAa1heutTvAb68bn8kIRZ7W0873lmfCZwAnlJRfy0RQNXGzuB8Z3S9VLm2D+37X8ftuNCqhwuOeK0BLVfXwwFTdRKo1a7YftgGt7KZ2+srzfl64RvxVKQ6DruWyuv+OeUirCHqkQb1jGf5a9wbvrOA19uuF3wj44LgK4HWeu2NjXTAvSC8vqbgzMFaE1vXm4BNH7YhwZdatWxNO9tWQceBV14FTUG47XGEOQd8WFk/4C3xCV+5XZRTbn1fNHFYzCD4WuHcCtvtdZTrxcq8UKbiuPvzulx1m+fDNqSTrrC9vq3wxQDrfRyhNB9MxWqusb7qPjXa9ptzwN012v1aZF2BrInV3nNvsO3RrpfrtcRK8D3IKucernf7ugeE19eacovT5eq8cDbAd7viOdr5ahdbtYI38jGEe2xFSgBPpQh4RS/2oeIitipZ47k2PdqNdr1Y7I4p50iXy8Va46LqPU5Y44Kp19iYFhijoMXK8GMmsWf7RsK35WlImm1Iaz/bmNt63aKsug+qjbauVs4Bd9XILTAjF1KNgu9IiHLtItuMeu93gm/LtQfE3tfWsigYe143u1ysnX67kXTEpLTVyLLYCk9H6xZjcTHc6227bdvrehuE6z5pJYBPLMtf255/xQlfXWzCtxt8sbG5f0peByy95hwxV68tewmg3W6E7fWtYUo5zFtQ43VyOlq7Ejr2TOg6jiur+9FoHSvngHfTCPfbIzba/UakqLk2Pdp52hwYvhykPNd7gngv57v+1Dhdiwvm6g3MUpMAACAASURBVNG42+1G0gMWJPhiKWQKol74jjgPuo7Dr2MewmBVAtis3guoJLW637PDd5RTHrUqHYsZAF9rfStsqbojQFh6TdVpXLEKyPIZz9ijBTXw9aSdW07Fouq3P29f38Kai9+22SpyEVbYHHAp5YsA4EcA4B2P8f/Tsiz/hfmO7kIzud/Z4XvPrlcbOwi+kde9nS71OhrC2nItiDWwxa7VMfTTjVrgSzlYySljbdb6y69gT0djsdt4Kvb52r8Iq9dKaI2degsAPrgsy2dKKa8DwI+WUv7Wsiz/d5c7mlozul/tPZ0ZvmdzvVRcI3wlV2u9jnS61Os9IOz5aYWvxuVKMFY8WpCC76oe8OVWOUedBd3rHGh+FbR+PjjMAS/LsgDAZx4vX3/8o+v9rhQJjh77eL0aBd9oiEaPtSd8B7jeumwkiC2Qldp5wdwLvsDUUfVYTA3sRvjWoJXdMA1Z76Eccp2clqbint9GzhFjAI6ZCw7dhlRKeQEAHwOA3wIA37Usy48jMR8GgA9frr5MeZupNo10pj37PxN8oxdlnTDlvL2m/jqOAt9tn1I5d611vVtVe30BAOpTrgBk+D7FbWC6jXu+boevfvWzdTGW9yhK/Vww1UeLVABeluUVAHxVKeVdAPA/l1K+clmWn6li3gSANwEASnn3CR3ynqcrRTpaqj/LwRJSTAR8jwZeKr4ldjL4RoO41e1i9Xs5YK0jphytKeb6oI16ry92vjPlcuWyW+fLwVWa65UXYN3GrOXXP+d+HnCXgziWZfmVUsoPA8CHAOBnhPATadTc76iFV5oY7xeBFofMxUvtotvsfQBK8Hzv0UDc4oAtdVIbKX5H+FIPV6AWXGlgqpnvleaHL7eqB69tLljniLexdTweG7P4ah2zwBdU8eInainlywHg7Uf4fjEAfD0A/Demuzq1ot2YJjYy9eyFb48tSVT8yDYHX2iFddsLtlxs9OuIsh4/LfDVwJZtcw1fas53PeGqhiq2pcgz36txyWvdtvw2/rYe63OVdv7Xsh2JLuMXXElp6siDOH49APzFx3ng1wDgry3L8gPK/k8gr/u1ttsr9Rx1H63w3dv1Wvu6M9dbX2tee9pY4YrV94axFcQt8H0qv4XverZzDV89aHGYavcDa+Z5ow7iwCAqwZlq93zdby9w5Crovw8AX20a/TSSIOoBpOXDHFPkgQ5Ri65aoLM3eLk6S18tsQdyvVGA1by2Aper88LYGtMCW6yMWO3MPdXIstKZc728E8ahvMbj5Zo54Fsob1/bUtD4Iq26PdaWKsOEgTwfxrCrIuaMW/5qerr2yIVZnM4IXyzuQPDlxu3xmqvn4jwxLW3X19L7iMVLZQA38N0Kc74ANvg+t7mO3ZZR/W7L6njt/PDabq27Ltc74tvXEXPAur2/df8J4GYd1f16nWdU33s4X+7voudcLxW/03zvWVxwRJnWAUswtThf6rXVCT+V6+d8JedLOVxubrgue+7bcoa0DF4rdCPngK3bkKypaEkJYFQt8N174ZVGHvhiioZv5Pt64hXOWLezwnZ73RPCHhh7fmpATIFVE8PAl5vzlVPM+GIry0Ir7dzwtp9tHVa+7evyuu04Si1sex5D+QJexa2CTlkUCYmo8TWQlNpg7TyA5sa3vke9wUvFt8Z2hu8eYI54HQnhKBh7QKyFLdamEb60y+WBKjlkrLwuq/u9/Ho+8GqgG3Uwh6UM65tSPo7QrR6pZ4u8zsrrYj1zuh5AjziIY5Z0MxY74Nm9vcBsHYOK8QA5CsJUmygwd4Yvt+BKcqhSGplOQ9PpaWnrUn0P2z7WOiy+/sm5XCtwpXlfCqyWIyhXpQN2aWTqudc+Wk4R874e+FKKAKDU5g5cb30dFeuBsgeylnoPhFt+auta4SusduZOt2p1s1pIY22x2G3Z5de1zAnbFmVhMfxrec6Xd786GKcDnlotqWcvpD2p517umIrjxrTGn2SRFdZ1D9hysVEgbgEuVtYLwtY6C2yxstX1ApDw5U63srhZ7aIsy57h27Fo8GJAtUJ35Epo/yrodMBGHWXhlbeviL49/1wSvk2SoKmN7V3XCl9sjL0csKWuLudgXJdtnS8ACd9VUtp5Kw6qT/0xK6Iv9T74tmxPWsuuf/pWQ2/bcPF1m/p9lISBOx2wSdGLoaxjaf8aNEDQxESknkdsSZoJvFR8J/ha3Gp9HRXb87W2vgW0UozXDXMuWOuEt/Bl0s5W5yutdLY4ZMu88GXM2O1Ja911OT3/q4GtdCBHHa8px5T7gNXyrgqW2re+tVFfCnqlnq19pOtVqwdALbF7OOBoCK+vPS7YA2QOtliZEr7cuc6eeVzLPmDPyul63Ou28opnaVFW3X4VB+Y6to7Druv2t3U8jBPAYfLA1xLf8oHvWfwUsZ3Iu+hKuheuP+v2ol7gpWI7wHcEiKMAq3kdAWEPjHtBd/uauq7LjPCV4EhBFUADbQq0Nkg/t5PBq0lP4z/1Z0NrD+LAAIuB1bcKOgGsUK8tR1aAeORZIBWxqKpXahqLk+JPssIZ6zrCvVrqtH1Gwnc0hDEoSrEcdKnXXJkRvtK2IgmqFjdribv8apq0NJeGjlmUhcXcvn5Ay7dtqXpt3VYJYFGt8N3zxCtPTK95X0k94Huifb1Y1xHQ9NZp2mjKI4HL1UltPIBuAXEwfD3p5MjUtG5fMe/A6/aXt+YW9GubtX6tu/5pe0rStp47kMOaggYAePmKhnECuKsiUs97K+Kv3gpw671Y3reR8A2UBFht7BHhq+kfq6PeI+09RsEXqvJG+FLywBfvIzo1LT+iUOeEr/tay65/6t0wBl0JuOj+YAawLx6EOeAlAcxoZOrZEjvS/Upjt7rjFud7hynnumwkbLVx3tdWSEc4YC90ubrOzhfAttrZ4ny51LFvNTTvrJ+v9eDFoFq7Vg641lXQNWAxqL540O3nfVHxu+ia3SOAR6eeo7cdSTERq56j4Gq5B6mvXk5254VW0rU3djSIPfVRdRbY1tcW6G7LpL6N8PWudo6MsY5/iZcd83WcvCDr8jbiQN5em1ZBb2C7BS0G2Bqmz33g5ZiKzgDfG4BbU8E9U88el+rptxWe0j8ZDeQi4HtC11tfRzvdPVxvSxkVI7XRwJark8q41zWgAVTw9a52HhEDQK+OvtTZU9PPfV7Hb8vX+LWsjtn+3MZqYFtDtoZr4aZ/NYdjJYBr9drvK7XTxHpdcoQTbe3TWm9xoXfqeqXrVsB6+tvbAff4GQ3iGr7MgxU0q53fAZ9nIPcK3gFv3bR5Ca/gDfj8pjwC0PqFXJe3g4f0tv02vi6XoFsDV4KtCFkMrJzr5UCcKeitWuHraaeFYetpUZa+JbiOrsdiuHLLe90aO2iVs3TdE8RU3BkhbK3zvH76077aeYUvBrU34K2bNu+At1DobfvZguwN+DwB1pgFWnw5nX7moKsB7tbZrsC9Am0NzRqwGFR1x0Ff95UOeFUEfKNWMXvf7oiFV0eBb6859hO5Xq4uEsqt8PVCuBXK3pgWEG/hq3iwAvcsXws0+6Wl7XuHKfDWZXV/6/Xl7SRccgXdGrhbd4sC9xVSRl1TrlcLYoAEsF4tb0EPKGju5wx/ba1bjA6ecra2p+q0wG1p73G72HhaByy11wBbM04kfG/GtW01Arhd9ftc/gylOv42rl453DflrJkTrsu2/UWB9wm627eEgi4VU9dpyqnYBDBAjHPde89vxIlXs7nf1rRzK3w7ghfrfhbna+2Lio92w1EOOLJOA9yb1/KcL0D7VqOtEwaAx+t459vijp/LcRi/gFdQu+G1TgNd1uVqYMulpSnYGlZBJ4C7p54tb11P9+vZ82vpr1YP+J5kvpdzYJprD2y1cdGvR0O45ae2TgPlBvh6tho9L6i6hW9k2vmNxwVd2oVeXL/Y77iWYeDdul0VdCngSrDlQIvBtcUN3/cirL3g29v9ev66rIDl2h8FvpY+JzhYQ7rW9DMSypHA5epaYFtfc1DlylSv9audL0308N22aYHvukgLAFu01edgj3WsLXhv3DHidl9c3koAMECXc78aF4xd1/G1OBDfrwOeDb5eQMyYeo7smyprBWqmnE39asq1cMXqLaDVxGjdrKZOUy/G4/ClpHWgeJpZB8p3bLYhbVdMx6WptWlpHXhX6AKA7HQtwI2aA5YcL1Z/nw54phXPmrEscaP/qjxfCCglfNHrvUHsfR1Rpo3hHK32Z4jLrV4LW40AgF3xXF9jaVkrBOutSNpU9xvVfmNsFTY1z7y2qfslfycEvKjbfQWyy7W4YA60rU4Ya3t/Djg6/YvJ8sFPKerEq97ul1OLc6bGOhh8sa6tMPbU7f26B3yl9tg1VhfhfKXXV9cbelSywHe7FWcLtrVs+1rqY9vGts2oTidf168x8sEb1673CtKPc7zb+d0bx6txu9s6qn5bvi3jXtdtqBhN/X0BWAuPFvdrfat6ut/Rc82RcI6Grzb139H1YmVaN2uJnRm4WP1eDri7C35OOwOAaa+vZ7UzAGxcqexkrc43avU0l24mU81bWFIgXn+2zAPXryPngrF29wPgEfCNahflfqU2rQ7V2/cM8O3oerHuI697g5jqs6cD9jjhlp/R8L26vk47A/SDrxWEEkytMF+vb+emr+eUAa5XN9fpZtLx1uClQMxBl1vpzDlkqh6Ieq6M0vkB3HqQg6Uvqn2P/ahSjPVeW+Ac6YwTvs2A3V5HwFTz+ggQ1tZZQczA97Wred54+N4unrpts24R0sI3ymlz6WZunpcEL5Z2plywdh4Yq6Pq69d13FZaCJ97EdYM8LXEe6GiGTtycZTld7XAOfr333m+FxvKC1sudtTrnkDuBWFrHVWmKScWXAFAJ+dbQw2HLwdvALhZTFXHS/Xciu31HgFuF1mR4K1BW19TLpgq06aksbq6vq7DrqXyrc7rgCPh29J+xKIvacw9U8+WcT3xB4WvF7ZcXTRkqfII4GJlURD2xjS9Hg9faTWzBF/pUA9pDzBfL6ebb1Y2b1c1U+ClFl9h0PXMAXtPwfI64fM54NYTnqx9euCrcXXauOi/mhY4WwDb+iWBupeDp5zr6x4g7gltC0S5Om2bKDC3whd5sAKADF8A+5OFtoCLhK8uRa378nC5h+pJTZLrrV8DU+ZJR9flgMRzr7kFWVQZ1narczngHm5zDwdLacS2I04tgLV8UYn853YS+HLjWl9H9ucpo+q4WM0YUr91mQbSYvktfFdt9/pSeoF8Mr/clHHAe46Rtithe3Jv66i+rNfXzvd2rvcdbwnzvBhUI+eBe5yG5XXA5wHwaOcr9bHHwqtWRcKZkzWjEJkdOCh8R7jWiD68btgb63XA2r6MzhfgdsHVU5lrr6+8+tgKSGpOt80Z61POrOv1LMDSzAN7FmVR9XUMdr0V9x3s+AD2ONSIXycKvlrN7H6lfl8ydVw/WLxmPKqvSeE7GsSRsdp6D4QjfnrKuNdP1zr4tj7ZyFtPPRO4Fb7U/DB2L+946/O3e3o/B/SWIu08cN1mew1CmWbhlXcFtOR2sfrjzgF7U8PaX2VU6nkv99sC5yhnLPWjcbSTL7aqr71gjgJxbzjPBGELaKV6Ab7UnC9A22MF/fUyfFeY1vCl5o8pqK/zvU99eV2vZgGWNA/csgraOvdrnQfG+jueA24BYxR8e7pfLWSkmMh5Va4tB26uLmJl9uTwtcCWa9sC397AlepHQdgacwD4Pm/fsW0P4oFKp5WpPcUm17yB781c71ugn+eVwCuloS1zwBhsKdBq5n41EF51DABHuNEZ4Rvl4jUnSXH1Un+963rBd9KUs3RtBbGmfLTbxcp6Qdha1xm+q8bs9eXT0tHwJV3zq8fDPj73dpvrresAqQfiWjsHrElD168tC7LqeE5zAzgqDTwCvla1nmFskeW+W7Ydaeui0tva8RplhWlLf9q+qDisvJfz1ZRp/om0/pTqsPvUwvcpXr/auV5wRelF9clNAfq57rr+cpu30Nz2tb7exq5j0yujOcg/IOM+PD2n9/HtiYEvNefbejAH54Bb54GxGEoPMNMccK8511Hw7bHwStOf9X2LPHQjok4aX/OlBIsZlHbu5Xy5uh7O1VKPxba64b0csOq1f7UzAL8dKGLLjzxXK6eOsaMl6362J1ttnS8731svumoFb+14tfPA2lXQEozrGOyaKsO0rwMu0Hex0yjjHpF69oImoo2n7Qj3q/mycVD4jgBxD/h6QcvVzQ7fp/uPg699xbPlmk8tS/DF0s7YSmcSvutcrwTYtx7fVwnMFHip+V4OxBHzwFR8LQ2E505Be2W93R7zvlHxLX1JAJvJ/SZ8TXWj4DuzAx4FX+Z4yVVb+D6VGeBbp469cK6BW/eznRNeH9Jgge+TK6YWW9Xg/BxRrnHE2pXQEogx6HLApcBrnQeu22CaJwUdpVnga7mPXu7XMqZl7663Ttrzq6njYgLgi3XbC7aW2FHO1VPfWtcTvm4Q4081AriGb33E5KU5BVYavtyiKmq7EbYtSNpOxKWvw+C7dcAUYNfUNFR1WLmUmt5eg1C+lgFSbnXB2n3AnBM+lwOOcnSe/jTtZnK/o6V1yd4vIyeCLxWnKef6sbTxOlpr/CjHi5UZ4btNOz9f8/CtF2FJK54BtIuh8GstYLm08/Y1Bd83PveF6/neNZ1cwxfbeqRJN2vAS7ldbsUzBmmqvq6ryzGwco4Xiz+HA97j9nqu0Pb03XI/PeZ+o7IDmt8rcLvRVi3wtfTtebulsSQgauM0X0qwuh7OVxoHuz/LlxQEvqs4+D43vwbpU9vKDT+X4+74+le57u9FNQbW9nYO2l631l/Bf+N8ARDnC4DDMwK+1i1IGvC2zgOnAwbw3Vqr+/XARQtIz9GLkedgc2N5XbXX/Q7c69sK22i3a30d7WhbynrAd4QDRuC7zvu+ePn8Kbo9aAPgeksPVkalli9D49uCuNSzdtEVtopZV3e92nl7utWN86VWOnPOdxsPQKesAenDuyBLgm6UA+Zgi7nj4wLYe0s9U89Wacbx3IsFlD2cfIT7PQl8I+oSvreKhu9TvzR8pVOuAPBFVJdheqWeuUM7dHVu+GKQxVZAf+7xvcXiPe4YiDLOFXNlgJRvy7jXGFQ5CNcxx0tBt9xKBHyj3C8WG3HkpKQe7pfrs2XuF5j6hK+7/ojw5Rywpkx8vaDwfepKccQkAP1owFXauVztvC+3L3gL0eex6ZXSbvhSK525cit4gagHJgaQOAAaupj7xcBMxWwlrX42amcARww/E3y16u1+e8i77Yjro9M/v3uGbzS494Kv1+0q4as533kVBV/t4RtYH5cyDXCv221XQFNbmqg9wutxkyh8a4eLQRaLAei7IIuCMQbXiFXQyvnfRXLDczvgqGFng+8o92vZW9vb/XJxHJixdh1WPB8NvlIfUllL2zPC91GW852fyhCQrore76t1yZQrpsB8DePrBVcq+NaQpVLU27S0dzEWIPXbMoBbEFNlGhdMpKVruD7UUH7U2wyEvzDHHHCv7qPcX6SL1P6uEe6XUw9nzD3tKGrshO/VawucNW1bQcvVeX9SdRb4Alauf7iC5qCN63J+v+/lVm5XMtvT0rb0NQbim/lgbJ+vBF/NIizqhCwqdQ1MHFbPXQPyU5uShmvYbkGLwfWBAW4d/4V9HXDp13XToRQWWd2vN7YVmFFwtszbamRxv532+lrqLfHauj1ccJRrpupGQFgqq+uvyq+PmNxqu+L5cv3KvN1IuyWpTj1jsdsxsTrMQdf3WW83em537ZAB4Op4yavHCW4hCJvXlGOlnKvG+daulhoDmPjtNcDtfddl22t4hi4H3Bq2lNul3LHSAPd2wNGKhG/UftZoRT5SUEuKiCcVafsYBF8LkHs4YQmsmngtnK1uWBungaclXut2sX5VDljeblTP+wLotxvVwgCIbUnCYRmReqaOqqzPd36Ad7z1+auznUmXW28dqh3sKyIOK9fOCUtgBriFKwZYAroccLegvYFwBde3QdYaozTARwLwKPh62mHjefb99lTE7xwx9ztAUUC1xHohq+nH6mCtfXjgq/l9te01YObaXJXL8F1l2W50GYpOPWOuVZNSvrRp35pErXh+nvfdPFhhC1Jq7vatzU8AGb7c3G+dugakvQW8igVYNXRr4G5hewVmuBYGXSYTbdZBADwq7expr43XxFndrxaC3jrPgRyWcSeY942ItYLY0o+1zFvHxffsRypT1+u2GwHcPt1Igq98zjPubum0MQ5PyjFvX79RHSeJpZvred83Pvf29VONapBqXDAGX8rtYtDWOGIg6qAqA6QNXKArAZeC7fZ1DVjK+XIgPokDjtwLq+mTaz/b3K+lv4iFU545Yq7NBPD1wlgLXwm6s8BXWwdIXB1rccCa+5DaAAC24nmVZtEVwG06+bmchi8d87Dp8xaQmpQyB2ntwq3nRVeI86VAyK12luDLPaxhWwdAg5lbkAVVDFw7XQm6GHAfkLK6HKvfCos9AYCPAF+sjfZwDqnd2dzvVpPBtyWWksUhW+rrMk2dBPKWdhI8rf1KDvimXF7xvIpbdMWtgq4XXNVx122eHe+lXD6j2TPve/vEJATy9XYjKsXMAVMDX8xBAxLLwZZKNTPg3bpdCbo1cCnYeiFc6+CLsHpvpTmboue0I/qPXlXd0J3F+VrGsrpiC8hb4cn1KZVJfUs/sb6tcLfe60sckgC6M54v3V9D9KoPBpScG962rftaX2NjUwCn2nDzvmULQCR1S8IQc8sYTKEqr6+18KXuB56vKcdrBa8Gut7537fhsA44YjVudPvoQzewuFHHTnr7pOJ2cr+1IgGrdcY9X3tdsBd4mrrWn9a6ugx9jR+2IS262sqTeqaAWgPS4n7reWhN6plMSVepZxSqq2vd/sTiuMcQWh/WULtjLq76osA5Xg66rc7XM/8LcDgH3PtYxxHzvp7+tW16PiIRU+t4A92v5ToqttWBa/tsdbpR0NY6XW18XYfFU/eHwbdKPQMAmnrGjpC8hSQN2lo8YLUPUKAXb1GLq9gY7KQrzNFyZdRWJC98LQ9rgOtrDLyY29VAFwOrNv3MpZ6xugM44FFw65F6nsn9Riy+0n4JodyvVo3utwW+1r6pOo8rtsS2uERrDHZvmlisXANhz++Glt/Cd1XEs33xgzP4hyfUfdcLseoxqPljzZwwt98XPWyD+sPt2aXqNfDFjq/UwNcA3hq68HhNQdcD4d5bkACGA3iPox+9fVhSz57+tW2itmB53K8nfU0Be2f49nC7FqBa+vC6YatjtjhXyf1q+8FiOVjflOMnXWmf7buV7mQrft4XBy73IAbu4A5r6pnf73sFRmoRFVVXQ7FOWT8A/jCGliclAVzBVwNei/PlgHuybUgF+i568sDE0of13nsuZurRnydF3Op+d1QUfK3A1bpcbCypzANWbXttf5rfwxKL1aPtVsLw+30Bbh+ygEHtKfYGiLdzxlthgL3c6isUktt2FGTX/p7jArYcaRdY1S4XA3I9h1xDtu7b8KQkK3gxsGoh7FkFLa2A3sYfIAXtVQR8e4+tje259ajjPCw5bicwt7rd6DptWlbbVgtnL3SlGIuLjYKwywG37fel5nQpID8Pz7nhGqz1eDRM1/J6DGqh17af+ktC87yvpo5LKXNPUKqBDNf9UenmFbwAz8DlwKuFbl2+LavL6zoqptbBFmFpFXW7ke5XqyO4Rk/6mdL2PQ5c+Sz9ExgBY02cFrTe/rd1FuhqxpLaSIDF4rwO+On1der5tat53uuPyK0DvnTDO91a3HzwNgbrn5oX5l5fQ5sCM/7Epcvb8djPA/KQhfon5Xg1dVwf29fA9A3VawB0SxHneqm5Xyt4W5wvl34GOCWA93S+3PhRB29YdSfpZwtwRzjjltfavr0OVxMbkYKuy7l+re6eff2cegYAVep5KyqlzLna53bY3O6tw63jsXHrOO7Eq9vFWrz7JeFJpYIpmGJ11FYl7VhVOZZy1rjeGrbYa20qelvGldd1terYk6WgI29zVvd7pvRzkPttAaqlL22dNQ0tvda6QQ9srRCPSD17xqnL0Nf4fl8A32lXVEp5K2nVM+VKNc7YcuIVNw564IbWzVpSz9KCKmkRF5K65uDbCl4JuhYI13VUTK2TOGDr7R3F/bYq4mxnrp3nC8NgjXa7lnux9ql1qNbUsKYNN2Y0jLF40QHrnu9rPe0KEwVbPPahase7X6x8vZ/t/W7r+BXQxIEblJuVAFvP4WKglRZiOeD72c/xrpeCcA1R76KsbRlX3ksTA7gHfPdwv5qxWtyvd1zKvdbSxHVw3L3crhSrifM6ZEs9VacBcg+n622P9SHd0/p6+3zfR2lXPW+FzedqQXuJxVPU3JwuNl/r2XZ0fb/XK6rRhVcA/J5b7o8mhS3t7RXg+/bjIqzPfs7mejHHq90HrNkDPAK2mCYF8EzwbXW/vcEeQZetImHqTD9b3a3lLeDA4AWrJ56rj3CtUW3qtpoY7ZcFqv6q/PEjEjlwYyvPgRu18FQz/zQkzP3W/VErmuttR2sdte3o6h65s545KFKQtYAag2u9P7gac1md7oOcctakoKX0NBDXox2upAkBPOEtTaXeD144uCJSzxFjcbER7lc7ngeyLS65BcJX94LPonHuF0A+cONqCMTVWoRBddvv+horr1PRVIqactwvHvftPP36mPuF6vWrqryOrfvZxtVldR/1WEh9BHw1h3BY5n731kS0897KLO5XK2v6uaVvSpHOODj93NP9euqiXnPjaAEmxbS4YCm+pR9LClrpfrmFVwDynl9M2hOv+JiHq/625VvYUwdqrPGXt+M2VX15O57dLwDI7reOkeaL1z9bJ0sdS4k54nWv72bO97Nv2eFbQ1i7EItLSc+mCQDccgszLRbqlX723r+2XS9n3OGpIel29gAAFONJREFUR7V6wNgCUK0kF6gZL9LRemJb+9CmoAHActzkVprTrZ6Hu00rU5LcMeZSMZhu72lbh9/XA12+PfEKAyKAnI7GoKuJwR7UwKS41wVXEnw/+3jbGISlvcAa9zurdgTwBOwf7n5b5V2c5TkqU7I+HRTpdj3A5RThfq1lkWljT5ueKWjq9VPsA2iOm8ROvAKgockdrmHZdiTPBVPzt5TLvYau5H4BqrlfgGsIAshOdxujATMG4hq+G9e8wpdbcPVZwCG6hS9XD8zrI2gHCkYM2TPV6pHW/fZc/dzzi4F19fMO7jcitsX99gBxXWcBs7YPawraC2HJAT+V8cdNbkWdeIWtRqYXY92ukNaoXji1HZ8qx8Bc3+v6egtqyv2anS0QMRo3LI1FON+3Gee7ha/W9VLp5aOBd9UAQs3gdDHtPfe7l7a/2yz3BMd3v56xrA5Sau9xw9pxLW2pdtrUNAC0nHgFAGQZB1qL+73c8u2iqnqcOt28ff0Sheyrq7ZXC7pq90stgOKgu42VQKyFb/WH2ue7QhdAhi8FXA7CR1QnOpZ+XU/nfveSljKR6Wcqfqud3S8Hjkgwex2v95+lFqYtaWuq3nIPaqeLtbGdeHXdnTTPe+1YMbBaVC/Oql+v19c/6ScePbe5TVev5Vcrn7ffLziHS0G1XqSFtQEgIUv1WZ9wVTvfbbca+GpTz0fVa3vfgE1Rjs37mEJtfMTiMCk9bR3PqwHzwi3uN2pMTVzkfWjhpe1HC1HrPVHlmr8j75eRlzQMa/eLxiDu9zKcZjGVbu53jcf6xg/hoMH8PDY1n/zq9oELANcQBLgGJ2xioKp/qOrqfjCIA/ITAzrAzeMEsT28ALd19wZfgNPaxB6/Vu90bVT/Fnt4QPVIN1tTzJpyD5Couoi/NmtaWhuvSSdTcVdtrvf9ag/dsB45KaWVPdK42fqe6vQzdt83bnl94AIAvc1o+5pKFwNTL80XMylrasUz5Xa3aWguDoiyM+hAn8hRqedI96vtPxre0fd5oLlgS9vWOE0bayJDKusx5+tJS2tiqWuqP/L15shJ5dwvJmw7kSXNTLnf535wh7zWX34tvLwGM97/A9Judb6vbvf9wuYaSyXXIIUqhuurLpcWeQGQi64ouGrgu97CGeELYEhBl1JelFL+XinlB3reEK69AUGN3zqhJ/WvrY9sp5k/Dpz/bQFshNmPyqq3uN+odLQkq6sd5X4raVc+U4uqNAutLJJS19iRktfl13PEWDnWzxPwscVXAPSpV1gMVk+5X+oaNj+3b8mj++Xmfan5Xs1hHGeFL4BtDvgjAPDxXjdCywKRlk+rmZIBlnsZtSit0/yvZ0isbhSMLaC1ppw5tbpfj7O13AcXw76+feACwK37vap7Uad6uYVWOKS5eV2qreSQsXvh4CqV36TVKZiu1/UcMAZSyv1SKW0ulf1YVqeeuf25np9nhS+AEsCllPcCwO8FgO/uezu1op3vHvO4I9PP2k/3vTMKj2pJN1v6peqivq/1SkP3+G7TA+Ct7ldx5GT9uMFLF7j75eZ+OVFOV2qLwbTeosSlpbfbkOrxto8cvFp8haWUMZhSgKaASjloDOKPP+vUM8Ctc/XCd7sn+IzSOuA/DwDfDgBfoAJKKR8upXy0lPJRgM8E3JoVEr2cWHT6eTZNAuNaUQ7X4357mX2vI9bCcNQ/yRb32yjszGdTe2Qe13wPW0CK88o01LF6bBHXlarU781cbZ0qrl+vfTxU11vVfTA/l8fXD48/t7AF4vXZoWqRCOBSyjcCwKeXZfkYF7csy5vLsnxgWZYPALwz7Abj1HvxlVbW+d+RGjz/61VvGHvbtECn5d68c7jS2B4XbClrPPP5+brP4iv5Gb3btPV1Cnn7unbGz28F7pCpZ/4+qYYuwK27XX9iKWUsBa1xzOs4jz+3e34Bnt3v1sEC8rqOkVZAn1kaB/x1APD7SimfBIDvA4APllL+Ute7umv3GwXoyKcUdbBcPeZwveMbFwmFpZ97pKE9jlpzX9o+NGVY+hmuF18BAHvwBn5LtKtt3Wp02x8PXCzNfH0vt+X19XbvLwDgbhdzsxRMAW5Ty1g/2lT1GrKZ+926W2wRFoAewvcgEcDLsnzHsizvXZbl/QDwzQDwd5Zl+ZZ+t9TDEe7lfo+yd3hntcwFj5jv1bSXxmr9gmGdo40CuaYf1xww7X634o6d9EBV42yp/uutR5Sb3cZvf8pnRSOOmlv9DNVPzBnXLrcuoxZura9rMG9+YnO/9RA1bKX9vfcEX4DpTsI6MlBG3HuETYza/7vTHPio+d6Itj1A3KqWxVZUH1IdmUlA5jjhduvR7RB8SnpbZoG0tPqZbnftZjFI12CWti4BwPWDF1bV8Fx/Ui54206CLQX5bf3jn+22IwB8WxFUr6VDNgDuC74Axv/yy7L8MAD8cJc7cWt0+rlF1nsd+YVk4PxvD4fbMgbWxgtpT0qWqmt1ra3yThNwEL7p8/KJL6WfNYuvsO1I13U82LWqH0O4/Sk99xdzwNghHGv6+UlY+rku4+aBOUDXq6glMMOz492+rud8LRDetr8nTeSAj+J+I4Hf8vhBS78tGux0e8/39uizpf+W9LPUhzVNTfVnTTtLqo6dXMWlnwFu3aJnTpd75i96T0zK2zunzH0RuOp/m35ehQ2JwRnglmgYtLEYaqzHuu3K5+15z3VzDLzYrd9j6nnVJAD2AkPzv3+W1c/ROslToSwuloPe6PSzJ7anY49oZ/kyoK0zLr7SPPP3qp5ID3uecERBWeoLP8GKdsZ1ObcV6WXlPAEAX1RFlWPzwnU5l36uob0CdzP3yzleawr63jQBgI/ifDmNOIDjJBo5z+ltM1P6uefiKU8/XCxVZhwbO/nq6hqFlvTwBX4e13R/V4C9TTNv4+oYbBvTWne7/agamHO6NYjrcmx1NADuchkwb0+9WiUttMJWQN+z691qZwDPCqkZ5n8t7VsXYFkt0bY8cP9vj/neqFRxa59WEFOxrYAdtQiLbGtf/QzAp23rOM1Z0NRDFbi+6nlk6jCNerw6pj71qtZ2/rdQaWMszawpX0VtNZIWYcH1vt968ZQmBU3NFd+jdgRwK+T2SD/3nP+11ke3k9p3+FLSG7jasTXfM6IWjkXE76WIRVgA3dPPlzq726XmhLk5Ynwrkm4hGHV0JQDcHr4BQLtXbDEWVc7BHCurwPx01OTDdTgAn3bevqbmg+9ROwF4BHyPJsvvNGvm4ASKnFedLf2sTXRo2kmQxcoUi69mTz/X/T6XXaeit6+p+d/69drPi6slxkCvYoaGcixtLSzQoo6dpB6akCloWTsA+GzwmHn+d/vpN8E9RS9CsvQ5Kv3ckpKWYrWrlq39WfoJex+ZtK3i1CvcYcrpZ6vqOVv8yUr1lqTtNqVbMG/LsfYAcDv/C0ADEksXa8uxhVvMIqw6/Uy5XyrdXJfduwYDeCQE9krhplTqmdK19Lf3orCW8Ufee4dFWPXe363q5/5aJM3Jco8V3I55nVJud9DYnDJ3z6UmFhDXEeWUc96GIOnnbZMaqNS8MHdL96ZBudwD70sNUc8FWB5FLcDqpN7zv5r2Fkj3cL1e9XD6lhjxvcI/erHTr7bpZ2qrUaQsJ2fRz/W9hSp33vPNSVivXj3t/wUAfLvRes0truLmfzVzwNvXr/D08zaFTC2sytOueHX+SJjVTe69AEvq2/K+jVgYFnACVk9wjurT0n/0/K3Uv3UeN2IVNFWGzg/Lq5/r+V8A/iAMqmxb3mv+F0D3BQADc90WO5YSTQ7U87ZSOcBtGpoqr1dBI2Cun3r0VL5psl5j6WmAnPut1SkFXWBe+PaW5vfu9d7c2Xs+2nnu6YSj5389as4wyO635/yv72CO23ld6n6kgzuohzFsX98swNKsUpbKt3WYk66FgLlOP1OQpcx0rnzGNcFBHBZpPwHOPP97hHsU1GP+d4btR1JfVN2ssyrRq6A30s7/XrrQp58l0HLP/q1jtn3S/fFjYTHYAqyres0CLNhcc+XaxVxUynobtoEvNhy1wrkeKt3vsw4G4Nl0Ahi6FXgAx1ajU9Wtfc84/6tNL3vS0C73rjv7+apO8fCFaFlcMrW1iFowxp1j/eSc6wM4MHHlmjS0Zv63ilk219iDF7BrQOoTvLdKAJ9Wk21BotQbfD00q2PVqueqcucY2PyvJK8zbdF2xbRusZbuiwQKbm4B1iqtK9b2R/S7XflMpZupLrC9wamLTghgCTazLcA6kga/DxFwHr3lqGWcqAVYkbKmycV58gpIzPyv5ulH1KlTa/v66UdWaZ5+hD2akLrX+v5uYrYroAH4NLMEVGoBFtcfsg0J236EQXV7q9wWpNSzDgTgo0Jwti1Imr6Dx5zxry56LljrAvdcgOUZk6uzvCfE8ZPybcjQwuKsklZXSwuwuHvSrIC+qteuXN6Kmu+t64R53qfyR2Hbj1ZRJ1xB9TrTz7QOBODemjhNS8rzCWzdahT8vsy+5Wi0Sx6RDraUR2cdNE2ZhzLgQ/FbjzRttQuw1jJsLOyaArXmOMoX9f4eaZEVpofqj6U/wa7W8791KHf0ZP06dVECOFQeWLXsAY5oN5EiFv+MdNs9XW+rtAuv6nJNn6b70D39aKt6AZZV3vlf6wIs3xwwvgJ6XYB1swJ6uy93Kw60WPta2KrnepzH7Uea+V9u6pkrv3cdBMAz5jDvGJRRurcFWD2des/MQss2pACNWAGtFeeGr5/7K7tmqsy8AhqABu3azroAqz5sg4Aw52qpldGpZx0EwFr1WIA1k+4I3hF/VXsswPLKunWIuj6QIhZgkX3D7SP+XPco7Nm19qHuR7OlqBYH2rV++1MoX6prKp3MHTuZzpfXyQDcQwf+hEvJ6rUAK1p7p9Y1dWQb38ewbi5W7tt7JKVmBbS2PR9H/A6at80CWq7dA1EOzwuwuC6lc57TBeNKAKcc6nQIxyhFLP5ugfJe3+mk3ztySsBxApZuKP2pWBZx+3W5/rEtSLr+K1f88Op2CxIAf4SkBrSeum26eV0Fjfxa0vGS6YJlHQDA9+xAIz4RT562PlKauVbv+7UuxOL6sI6pDRcWZuldpN3dYmlqbdrae1+qLUiUWkBrXVHN9CdlsKlV0KlbHQDAI9QLUkchgmYLUsDv4nWFR3kbvSugo36/XlubIvpsdL5bcS5TAiMXoz2oQ3PsJAZX9UEgEY6W29+rXGlNPfVIuo2UXglgl07uKiN1FHhqdKbfRVLEFwflGdDba+0WJM1DEHoIByu/B5iKr19fqQXC3EIsqg/CIT880CdgbRdbcSdfpQumNTmAj3jCwlE06BSsaE1+e6T2OrVq1HhHngroLAnM24cwXEkDUOm7hmfrEjHfi71OtWlyAFu0tyvde3yNJrrH3innMwPhoL+PdgGWa9vOAFmdtfVhEehjCAFkyEpvj3deWanchuTXiQDcQzN/0s18bwfX3quUPWctR99DZ71wbk0aLcupV5zEuWlsBTSA7ulGVL22H0T1HmBD05RBCeBdlRDdVSHznBE3otTewB24B7iWdQ9w9CEcdMztKVgtIk/BApCdrlfEHuC3mXvJ/b4xSgBPpYlSxCN1dIj1HiMCkr2+SHR6PyP29gLQ242sfUSp9XAPVpIjZg7b4BzydjW05QzohLGsBPBQ9QJsgntoX0dKXPRaazfJQyfiQG2bd9ZvWXJkAFrmbLXDKeM8LjilVwI4lRqpUZA6wJcEz2MIe4g7BStSve7/Rq0rpJFyyx7gBLNeEwN41CfInbrH1H3K4/R3gnmvU7Ci2nJA7XoOtOWWB66cyj3Adk0M4JRPB7A+PXSnv3bKL+5QDU5SzDCnCzDF0uSErF8J4FQqQp3PTD60Bh1DaetnP3LtOfaNqIO45th6fXolgFNjNNkq29T9ioK117lKT0LqIgvDg3ifTjdeCeBUqpfOtr3qBNrjVC33mNy2odQplABOpVLxIh7EkLrVi9Z87w4Z7YmS6IdWAvgwytXaqVQqdSaVZYn/plpK+WcA8E/CO+6rXwcAv7z3TZxc+R6PUb7PY5Tv8xgd8X3+jcuyfLkU1AXAR1Qp5aPLsnxg7/s4s/I9HqN8n8co3+cxOvP7nCnoVCqVSqV2UAI4lUqlUqkdlAB+1pt738AdKN/jMcr3eYzyfR6j077POQecSqVSqdQOSgecSqVSqdQOSgCnUqlUKrWD7h7ApZQPlVL+USnlE6WUP7P3/ZxRpZTvLaV8upTyM3vfy5lVSnlfKeWHSikfL6X8bCnlI3vf0xlVSvmiUsrfLaX89OP7/F/tfU9nVSnlRSnl75VSfmDve+mhuwZwKeUFAHwXAPweAPjtAPAHSym/fd+7OqX+AgB8aO+buAM9AMC3LcvyrwDA1wLAf5z/nrvoLQD44LIsvwMAvgoAPlRK+dqd7+ms+ggAfHzvm+iluwYwAHwNAHxiWZafW5bl8wDwfQDwTTvf0+m0LMuPAMA/3/s+zq5lWX5pWZaffHz9a3D54HrPvnd1Pi0Xfebx8vXHP7maNVillPcCwO8FgO/e+1566d4B/B4A+IXN9acgP7BSJ1Ap5f0A8NUA8OP73sk59Zga/SkA+DQA/OCyLPk+x+vPA8C3A8AX9r6RXrp3ABekLL/Jpg6tUso7AeD7AeBbl2X51b3v54xaluXVsixfBQDvBYCvKaV85d73dCaVUr4RAD69LMvH9r6Xnrp3AH8KAN63uX4vAPziTveSSjWrlPI6XOD7l5dl+et738/ZtSzLrwDAD0OucYjW1wHA7yulfBIuU4MfLKX8pX1vKV73DuCfAICvKKX8plLKGwDwzQDwN3a+p1TKpVJKAYDvAYCPL8vynXvfz1lVSvnyUsq7Hl9/MQB8PQD8w33v6lxaluU7lmV577Is74fL5/LfWZblW3a+rXDdNYCXZXkAgD8FAH8bLgtW/tqyLD+7712dT6WUvwIAPwYAv62U8qlSyh/f+55Oqq8DgD8MF7fwU49/vmHvmzqhfj0A/FAp5e/D5Uv8Dy7LcsptMqm+yqMoU6lUKpXaQXftgFOpVCqV2ksJ4FQqlUqldlACOJVKpVKpHZQATqVSqVRqByWAU6lUKpXaQQngVCqVSqV2UAI4lUqlUqkd9P8DnGSSkMm/7/MAAAAASUVORK5CYII=",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "heatmap(grid, cmap='jet', interpolation='spline16')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The peak value is 32 at the lower right corner.\n",
+ " \n",
+ "The region at the upper left corner is planar."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's instantiate `PeakFindingProblem` one last time."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 75,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "problem = PeakFindingProblem(initial, grid, directions8)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Solution by Hill Climbing"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "solution = problem.value(hill_climbing(problem))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "solution"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Solution by Simulated Annealing"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "32"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "solutions = {problem.value(simulated_annealing(problem)) for i in range(100)}\n",
+ "max(solutions)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Notice that even though both algorithms started at the same initial state, \n",
+ "Hill Climbing could never escape from the planar region and gave a locally optimum solution of **0**,\n",
+ "whereas Simulated Annealing could reach the peak at **32**.\n",
+ " \n",
+ "A very similar situation arises when there are two peaks of different heights.\n",
+ "One should carefully consider the possible search space before choosing the algorithm for the task."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## GENETIC ALGORITHM\n",
+ "\n",
+ "Genetic algorithms (or GA) are inspired by natural evolution and are particularly useful in optimization and search problems with large state spaces.\n",
+ "\n",
+ "Given a problem, algorithms in the domain make use of a *population* of solutions (also called *states*), where each solution/state represents a feasible solution. At each iteration (often called *generation*), the population gets updated using methods inspired by biology and evolution, like *crossover*, *mutation* and *natural selection*."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Overview\n",
+ "\n",
+ "A genetic algorithm works in the following way:\n",
+ "\n",
+ "1) Initialize random population.\n",
+ "\n",
+ "2) Calculate population fitness.\n",
+ "\n",
+ "3) Select individuals for mating.\n",
+ "\n",
+ "4) Mate selected individuals to produce new population.\n",
+ "\n",
+ " * Random chance to mutate individuals.\n",
+ "\n",
+ "5) Repeat from step 2) until an individual is fit enough or the maximum number of iterations is reached."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Glossary\n",
+ "\n",
+ "Before we continue, we will lay the basic terminology of the algorithm.\n",
+ "\n",
+ "* Individual/State: A list of elements (called *genes*) that represent possible solutions.\n",
+ "\n",
+ "* Population: The list of all the individuals/states.\n",
+ "\n",
+ "* Gene pool: The alphabet of possible values for an individual's genes.\n",
+ "\n",
+ "* Generation/Iteration: The number of times the population will be updated.\n",
+ "\n",
+ "* Fitness: An individual's score, calculated by a function specific to the problem."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Crossover\n",
+ "\n",
+ "Two individuals/states can \"mate\" and produce one child. This offspring bears characteristics from both of its parents. There are many ways we can implement this crossover. Here we will take a look at the most common ones. Most other methods are variations of those below.\n",
+ "\n",
+ "* Point Crossover: The crossover occurs around one (or more) point. The parents get \"split\" at the chosen point or points and then get merged. In the example below we see two parents get split and merged at the 3rd digit, producing the following offspring after the crossover.\n",
+ "\n",
+ "\n",
+ "\n",
+ "* Uniform Crossover: This type of crossover chooses randomly the genes to get merged. Here the genes 1, 2 and 5 were chosen from the first parent, so the genes 3, 4 were added by the second parent.\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Mutation\n",
+ "\n",
+ "When an offspring is produced, there is a chance it will mutate, having one (or more, depending on the implementation) of its genes altered.\n",
+ "\n",
+ "For example, let's say the new individual to undergo mutation is \"abcde\". Randomly we pick to change its third gene to 'z'. The individual now becomes \"abzde\" and is added to the population."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Selection\n",
+ "\n",
+ "At each iteration, the fittest individuals are picked randomly to mate and produce offsprings. We measure an individual's fitness with a *fitness function*. That function depends on the given problem and it is used to score an individual. Usually the higher the better.\n",
+ "\n",
+ "The selection process is this:\n",
+ "\n",
+ "1) Individuals are scored by the fitness function.\n",
+ "\n",
+ "2) Individuals are picked randomly, according to their score (higher score means higher chance to get picked). Usually the formula to calculate the chance to pick an individual is the following (for population *P* and individual *i*):\n",
+ "\n",
+ "$$ chance(i) = \\dfrac{fitness(i)}{\\sum_{k \\, in \\, P}{fitness(k)}} $$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Implementation\n",
+ "\n",
+ "Below we look over the implementation of the algorithm in the `search` module.\n",
+ "\n",
+ "First the implementation of the main core of the algorithm:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def genetic_algorithm ( population , fitness_fn , gene_pool = [ 0 , 1 ], f_thres = None , ngen = 1000 , pmut = 0.1 ): \n",
+ " """[Figure 4.8]""" \n",
+ " for i in range ( ngen ): \n",
+ " population = [ mutate ( recombine ( * select ( 2 , population , fitness_fn )), gene_pool , pmut ) \n",
+ " for i in range ( len ( population ))] \n",
+ "\n",
+ " fittest_individual = fitness_threshold ( fitness_fn , f_thres , population ) \n",
+ " if fittest_individual : \n",
+ " return fittest_individual \n",
+ "\n",
+ "\n",
+ " return argmax ( population , key = fitness_fn ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(genetic_algorithm)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The algorithm takes the following input:\n",
+ "\n",
+ "* `population`: The initial population.\n",
+ "\n",
+ "* `fitness_fn`: The problem's fitness function.\n",
+ "\n",
+ "* `gene_pool`: The gene pool of the states/individuals. By default 0 and 1.\n",
+ "\n",
+ "* `f_thres`: The fitness threshold. If an individual reaches that score, iteration stops. By default 'None', which means the algorithm will not halt until the generations are ran.\n",
+ "\n",
+ "* `ngen`: The number of iterations/generations.\n",
+ "\n",
+ "* `pmut`: The probability of mutation.\n",
+ "\n",
+ "The algorithm gives as output the state with the largest score."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For each generation, the algorithm updates the population. First it calculates the fitnesses of the individuals, then it selects the most fit ones and finally crosses them over to produce offsprings. There is a chance that the offspring will be mutated, given by `pmut`. If at the end of the generation an individual meets the fitness threshold, the algorithm halts and returns that individual.\n",
+ "\n",
+ "The function of mating is accomplished by the method `recombine`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def recombine ( x , y ): \n",
+ " n = len ( x ) \n",
+ " c = random . randrange ( 0 , n ) \n",
+ " return x [: c ] + y [ c :] \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(recombine)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The method picks at random a point and merges the parents (`x` and `y`) around it.\n",
+ "\n",
+ "The mutation is done in the method `mutate`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def mutate ( x , gene_pool , pmut ): \n",
+ " if random . uniform ( 0 , 1 ) >= pmut : \n",
+ " return x \n",
+ "\n",
+ " n = len ( x ) \n",
+ " g = len ( gene_pool ) \n",
+ " c = random . randrange ( 0 , n ) \n",
+ " r = random . randrange ( 0 , g ) \n",
+ "\n",
+ " new_gene = gene_pool [ r ] \n",
+ " return x [: c ] + [ new_gene ] + x [ c + 1 :] \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(mutate)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We pick a gene in `x` to mutate and a gene from the gene pool to replace it with.\n",
+ "\n",
+ "To help initializing the population we have the helper function `init_population`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def init_population ( pop_number , gene_pool , state_length ): \n",
+ " """Initializes population for genetic algorithm \n",
+ " pop_number : Number of individuals in population \n",
+ " gene_pool : List of possible values for individuals \n",
+ " state_length: The length of each individual""" \n",
+ " g = len ( gene_pool ) \n",
+ " population = [] \n",
+ " for i in range ( pop_number ): \n",
+ " new_individual = [ gene_pool [ random . randrange ( 0 , g )] for j in range ( state_length )] \n",
+ " population . append ( new_individual ) \n",
+ "\n",
+ " return population \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(init_population)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The function takes as input the number of individuals in the population, the gene pool and the length of each individual/state. It creates individuals with random genes and returns the population when done."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Explanation\n",
+ "\n",
+ "Before we solve problems using the genetic algorithm, we will explain how to intuitively understand the algorithm using a trivial example.\n",
+ "\n",
+ "#### Generating Phrases\n",
+ "\n",
+ "In this problem, we use a genetic algorithm to generate a particular target phrase from a population of random strings. This is a classic example that helps build intuition about how to use this algorithm in other problems as well. Before we break the problem down, let us try to brute force the solution. Let us say that we want to generate the phrase \"genetic algorithm\". The phrase is 17 characters long. We can use any character from the 26 lowercase characters and the space character. To generate a random phrase of length 17, each space can be filled in 27 ways. So the total number of possible phrases is\n",
+ "\n",
+ "$$ 27^{17} = 2153693963075557766310747 $$\n",
+ "\n",
+ "which is a massive number. If we wanted to generate the phrase \"Genetic Algorithm\", we would also have to include all the 26 uppercase characters into consideration thereby increasing the sample space from 27 characters to 53 characters and the total number of possible phrases then would be\n",
+ "\n",
+ "$$ 53^{17} = 205442259656281392806087233013 $$\n",
+ "\n",
+ "If we wanted to include punctuations and numerals into the sample space, we would have further complicated an already impossible problem. Hence, brute forcing is not an option. Now we'll apply the genetic algorithm and see how it significantly reduces the search space. We essentially want to *evolve* our population of random strings so that they better approximate the target phrase as the number of generations increase. Genetic algorithms work on the principle of Darwinian Natural Selection according to which, there are three key concepts that need to be in place for evolution to happen. They are:\n",
+ "\n",
+ "* **Heredity**: There must be a process in place by which children receive the properties of their parents. \n",
+ "For this particular problem, two strings from the population will be chosen as parents and will be split at a random index and recombined as described in the `recombine` function to create a child. This child string will then be added to the new generation.\n",
+ "\n",
+ "\n",
+ "* **Variation**: There must be a variety of traits present in the population or a means with which to introduce variation. If there is no variation in the sample space, we might never reach the global optimum. To ensure that there is enough variation, we can initialize a large population, but this gets computationally expensive as the population gets larger. Hence, we often use another method called mutation. In this method, we randomly change one or more characters of some strings in the population based on a predefined probability value called the mutation rate or mutation probability as described in the `mutate` function. The mutation rate is usually kept quite low. A mutation rate of zero fails to introduce variation in the population and a high mutation rate (say 50%) is as good as a coin flip and the population fails to benefit from the previous recombinations. An optimum balance has to be maintained between population size and mutation rate so as to reduce the computational cost as well as have sufficient variation in the population.\n",
+ "\n",
+ "\n",
+ "* **Selection**: There must be some mechanism by which some members of the population have the opportunity to be parents and pass down their genetic information and some do not. This is typically referred to as \"survival of the fittest\". \n",
+ "There has to be some way of determining which phrases in our population have a better chance of eventually evolving into the target phrase. This is done by introducing a fitness function that calculates how close the generated phrase is to the target phrase. The function will simply return a scalar value corresponding to the number of matching characters between the generated phrase and the target phrase."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Before solving the problem, we first need to define our target phrase."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "target = 'Genetic Algorithm'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "We then need to define our gene pool, i.e the elements which an individual from the population might comprise of. Here, the gene pool contains all uppercase and lowercase letters of the English alphabet and the space character."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "# The ASCII values of uppercase characters ranges from 65 to 91\n",
+ "u_case = [chr(x) for x in range(65, 91)]\n",
+ "# The ASCII values of lowercase characters ranges from 97 to 123\n",
+ "l_case = [chr(x) for x in range(97, 123)]\n",
+ "\n",
+ "gene_pool = []\n",
+ "gene_pool.extend(u_case) # adds the uppercase list to the gene pool\n",
+ "gene_pool.extend(l_case) # adds the lowercase list to the gene pool\n",
+ "gene_pool.append(' ') # adds the space character to the gene pool"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We now need to define the maximum size of each population. Larger populations have more variation but are computationally more expensive to run algorithms on."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "max_population = 100"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As our population is not very large, we can afford to keep a relatively large mutation rate."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "mutation_rate = 0.07 # 7%"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Great! Now, we need to define the most important metric for the genetic algorithm, i.e the fitness function. This will simply return the number of matching characters between the generated sample and the target phrase."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 59,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def fitness_fn(sample):\n",
+ " # initialize fitness to 0\n",
+ " fitness = 0\n",
+ " for i in range(len(sample)):\n",
+ " # increment fitness by 1 for every matching character\n",
+ " if sample[i] == target[i]:\n",
+ " fitness += 1\n",
+ " return fitness"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Before we run our genetic algorithm, we need to initialize a random population. We will use the `init_population` function to do this. We need to pass in the maximum population size, the gene pool and the length of each individual, which in this case will be the same as the length of the target phrase."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 60,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "population = init_population(max_population, gene_pool, len(target))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will now define how the individuals in the population should change as the number of generations increases. First, the `select` function will be run on the population to select *two* individuals with high fitness values. These will be the parents which will then be recombined using the `recombine` function to generate the child."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 61,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "parents = select(2, population, fitness_fn) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 62,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "# The recombine function takes two parents as arguments, so we need to unpack the previous variable\n",
+ "child = recombine(*parents)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we need to apply a mutation according to the mutation rate. We call the `mutate` function on the child with the gene pool and mutation rate as the additional arguments."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 63,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "child = mutate(child, gene_pool, mutation_rate)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The above lines can be condensed into\n",
+ "\n",
+ "`child = mutate(recombine(*select(2, population, fitness_fn)), gene_pool, mutation_rate)`\n",
+ "\n",
+ "And, we need to do this `for` every individual in the current population to generate the new population."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 64,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "population = [mutate(recombine(*select(2, population, fitness_fn)), gene_pool, mutation_rate) for i in range(len(population))]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The individual with the highest fitness can then be found using the `max` function."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 65,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "current_best = max(population, key=fitness_fn)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's print this out"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 66,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['J', 'y', 'O', 'e', ' ', 'h', 'c', 'r', 'C', 'W', 'H', 'o', 'r', 'R', 'y', 'P', 'U']\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(current_best)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We see that this is a list of characters. This can be converted to a string using the join function"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 67,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "JyOe hcrCWHorRyPU\n"
+ ]
+ }
+ ],
+ "source": [
+ "current_best_string = ''.join(current_best)\n",
+ "print(current_best_string)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We now need to define the conditions to terminate the algorithm. This can happen in two ways\n",
+ "1. Termination after a predefined number of generations\n",
+ "2. Termination when the fitness of the best individual of the current generation reaches a predefined threshold value.\n",
+ "\n",
+ "We define these variables below"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 68,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "ngen = 1200 # maximum number of generations\n",
+ "# we set the threshold fitness equal to the length of the target phrase\n",
+ "# i.e the algorithm only terminates whne it has got all the characters correct \n",
+ "# or it has completed 'ngen' number of generations\n",
+ "f_thres = len(target)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "collapsed": true
+ },
+ "source": [
+ "To generate `ngen` number of generations, we run a `for` loop `ngen` number of times. After each generation, we calculate the fitness of the best individual of the generation and compare it to the value of `f_thres` using the `fitness_threshold` function. After every generation, we print out the best individual of the generation and the corresponding fitness value. Lets now write a function to do this."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 69,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def genetic_algorithm_stepwise(population, fitness_fn, gene_pool=[0, 1], f_thres=None, ngen=1200, pmut=0.1):\n",
+ " for generation in range(ngen):\n",
+ " population = [mutate(recombine(*select(2, population, fitness_fn)), gene_pool, pmut) for i in range(len(population))]\n",
+ " # stores the individual genome with the highest fitness in the current population\n",
+ " current_best = ''.join(max(population, key=fitness_fn))\n",
+ " print(f'Current best: {current_best}\\t\\tGeneration: {str(generation)}\\t\\tFitness: {fitness_fn(current_best)}\\r', end='')\n",
+ " \n",
+ " # compare the fitness of the current best individual to f_thres\n",
+ " fittest_individual = fitness_threshold(fitness_fn, f_thres, population)\n",
+ " \n",
+ " # if fitness is greater than or equal to f_thres, we terminate the algorithm\n",
+ " if fittest_individual:\n",
+ " return fittest_individual, generation\n",
+ " return max(population, key=fitness_fn) , generation "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The function defined above is essentially the same as the one defined in `search.py` with the added functionality of printing out the data of each generation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 70,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "def genetic_algorithm ( population , fitness_fn , gene_pool = [ 0 , 1 ], f_thres = None , ngen = 1000 , pmut = 0.1 ): \n",
+ " """[Figure 4.8]""" \n",
+ " for i in range ( ngen ): \n",
+ " population = [ mutate ( recombine ( * select ( 2 , population , fitness_fn )), gene_pool , pmut ) \n",
+ " for i in range ( len ( population ))] \n",
+ "\n",
+ " fittest_individual = fitness_threshold ( fitness_fn , f_thres , population ) \n",
+ " if fittest_individual : \n",
+ " return fittest_individual \n",
+ "\n",
+ "\n",
+ " return argmax ( population , key = fitness_fn ) \n",
+ " \n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "psource(genetic_algorithm)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have defined all the required functions and variables. Let's now create a new population and test the function we wrote above."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 71,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Current best: Genetic Algorithm\t\tGeneration: 985\t\tFitness: 17\r"
+ ]
+ }
+ ],
+ "source": [
+ "population = init_population(max_population, gene_pool, len(target))\n",
+ "solution, generations = genetic_algorithm_stepwise(population, fitness_fn, gene_pool, f_thres, ngen, mutation_rate)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The genetic algorithm was able to converge!\n",
+ "We implore you to rerun the above cell and play around with `target, max_population, f_thres, ngen` etc parameters to get a better intuition of how the algorithm works. To summarize, if we can define the problem states in simple array format and if we can create a fitness function to gauge how good or bad our approximate solutions are, there is a high chance that we can get a satisfactory solution using a genetic algorithm. \n",
+ "- There is also a better GUI version of this program `genetic_algorithm_example.py` in the GUI folder for you to play around with."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Usage\n",
+ "\n",
+ "Below we give two example usages for the genetic algorithm, for a graph coloring problem and the 8 queens problem.\n",
+ "\n",
+ "#### Graph Coloring\n",
+ "\n",
+ "First we will take on the simpler problem of coloring a small graph with two colors. Before we do anything, let's imagine how a solution might look. First, we have to represent our colors. Say, 'R' for red and 'G' for green. These make up our gene pool. What of the individual solutions though? For that, we will look at our problem. We stated we have a graph. A graph has nodes and edges, and we want to color the nodes. Naturally, we want to store each node's color. If we have four nodes, we can store their colors in a list of genes, one for each node. A possible solution will then look like this: ['R', 'R', 'G', 'R']. In the general case, we will represent each solution with a list of chars ('R' and 'G'), with length the number of nodes.\n",
+ "\n",
+ "Next we need to come up with a fitness function that appropriately scores individuals. Again, we will look at the problem definition at hand. We want to color a graph. For a solution to be optimal, no edge should connect two nodes of the same color. How can we use this information to score a solution? A naive (and ineffective) approach would be to count the different colors in the string. So ['R', 'R', 'R', 'R'] has a score of 1 and ['R', 'R', 'G', 'G'] has a score of 2. Why that fitness function is not ideal though? Why, we forgot the information about the edges! The edges are pivotal to the problem and the above function only deals with node colors. We didn't use all the information at hand and ended up with an ineffective answer. How, then, can we use that information to our advantage?\n",
+ "\n",
+ "We said that the optimal solution will have all the edges connecting nodes of different color. So, to score a solution we can count how many edges are valid (aka connecting nodes of different color). That is a great fitness function!\n",
+ "\n",
+ "Let's jump into solving this problem using the `genetic_algorithm` function."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First we need to represent the graph. Since we mostly need information about edges, we will just store the edges. We will denote edges with capital letters and nodes with integers:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 72,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "edges = {\n",
+ " 'A': [0, 1],\n",
+ " 'B': [0, 3],\n",
+ " 'C': [1, 2],\n",
+ " 'D': [2, 3]\n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Edge 'A' connects nodes 0 and 1, edge 'B' connects nodes 0 and 3 etc.\n",
+ "\n",
+ "We already said our gene pool is 'R' and 'G', so we can jump right into initializing our population. Since we have only four nodes, `state_length` should be 4. For the number of individuals, we will try 8. We can increase this number if we need higher accuracy, but be careful! Larger populations need more computating power and take longer. You need to strike that sweet balance between accuracy and cost (the ultimate dilemma of the programmer!)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 73,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[['R', 'G', 'G', 'G'], ['G', 'R', 'R', 'G'], ['G', 'G', 'G', 'G'], ['G', 'R', 'G', 'G'], ['G', 'G', 'G', 'R'], ['G', 'R', 'R', 'G'], ['G', 'R', 'G', 'G'], ['G', 'G', 'R', 'G']]\n"
+ ]
+ }
+ ],
+ "source": [
+ "population = init_population(8, ['R', 'G'], 4)\n",
+ "print(population)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We created and printed the population. You can see that the genes in the individuals are random and there are 8 individuals each with 4 genes.\n",
+ "\n",
+ "Next we need to write our fitness function. We previously said we want the function to count how many edges are valid. So, given a coloring/individual `c`, we will do just that:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 74,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def fitness(c):\n",
+ " return sum(c[n1] != c[n2] for (n1, n2) in edges.values())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Great! Now we will run the genetic algorithm and see what solution it gives."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 75,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['R', 'G', 'R', 'G']\n"
+ ]
+ }
+ ],
+ "source": [
+ "solution = genetic_algorithm(population, fitness, gene_pool=['R', 'G'])\n",
+ "print(solution)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The algorithm converged to a solution. Let's check its score:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 76,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "4\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(fitness(solution))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The solution has a score of 4. Which means it is optimal, since we have exactly 4 edges in our graph, meaning all are valid!\n",
+ "\n",
+ "*NOTE: Because the algorithm is non-deterministic, there is a chance a different solution is given. It might even be wrong, if we are very unlucky!*"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Eight Queens\n",
+ "\n",
+ "Let's take a look at a more complicated problem.\n",
+ "\n",
+ "In the *Eight Queens* problem, we are tasked with placing eight queens on an 8x8 chessboard without any queen threatening the others (aka queens should not be in the same row, column or diagonal). In its general form the problem is defined as placing *N* queens in an NxN chessboard without any conflicts.\n",
+ "\n",
+ "First we need to think about the representation of each solution. We can go the naive route of representing the whole chessboard with the queens' placements on it. That is definitely one way to go about it, but for the purpose of this tutorial we will do something different. We have eight queens, so we will have a gene for each of them. The gene pool will be numbers from 0 to 7, for the different columns. The *position* of the gene in the state will denote the row the particular queen is placed in.\n",
+ "\n",
+ "For example, we can have the state \"03304577\". Here the first gene with a value of 0 means \"the queen at row 0 is placed at column 0\", for the second gene \"the queen at row 1 is placed at column 3\" and so forth.\n",
+ "\n",
+ "We now need to think about the fitness function. On the graph coloring problem we counted the valid edges. The same thought process can be applied here. Instead of edges though, we have positioning between queens. If two queens are not threatening each other, we say they are at a \"non-attacking\" positioning. We can, therefore, count how many such positionings are there.\n",
+ "\n",
+ "Let's dive right in and initialize our population:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 77,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[2, 6, 2, 0, 2, 3, 4, 7], [7, 2, 0, 6, 3, 3, 0, 6], [2, 3, 0, 6, 6, 2, 5, 5], [2, 6, 4, 2, 3, 5, 5, 5], [3, 1, 5, 1, 5, 1, 0, 3]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "population = init_population(100, range(8), 8)\n",
+ "print(population[:5])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We have a population of 100 and each individual has 8 genes. The gene pool is the integers from 0 to 7, in string form. Above you can see the first five individuals.\n",
+ "\n",
+ "Next we need to write our fitness function. Remember, queens threaten each other if they are at the same row, column or diagonal.\n",
+ "\n",
+ "Since positionings are mutual, we must take care not to count them twice. Therefore for each queen, we will only check for conflicts for the queens after her.\n",
+ "\n",
+ "A gene's value in an individual `q` denotes the queen's column, and the position of the gene denotes its row. We can check if the aforementioned values between two genes are the same. We also need to check for diagonals. A queen *a* is in the diagonal of another queen, *b*, if the difference of the rows between them is equal to either their difference in columns (for the diagonal on the right of *a*) or equal to the negative difference of their columns (for the left diagonal of *a*). Below is given the fitness function."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 78,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "def fitness(q):\n",
+ " non_attacking = 0\n",
+ " for row1 in range(len(q)):\n",
+ " for row2 in range(row1+1, len(q)):\n",
+ " col1 = int(q[row1])\n",
+ " col2 = int(q[row2])\n",
+ " row_diff = row1 - row2\n",
+ " col_diff = col1 - col2\n",
+ "\n",
+ " if col1 != col2 and row_diff != col_diff and row_diff != -col_diff:\n",
+ " non_attacking += 1\n",
+ "\n",
+ " return non_attacking"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that the best score achievable is 28. That is because for each queen we only check for the queens after her. For the first queen we check 7 other queens, for the second queen 6 others and so on. In short, the number of checks we make is the sum 7+6+5+...+1. Which is equal to 7\\*(7+1)/2 = 28.\n",
+ "\n",
+ "Because it is very hard and will take long to find a perfect solution, we will set the fitness threshold at 25. If we find an individual with a score greater or equal to that, we will halt. Let's see how the genetic algorithm will fare."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 79,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[2, 5, 7, 1, 3, 6, 4, 6]\n",
+ "25\n"
+ ]
+ }
+ ],
+ "source": [
+ "solution = genetic_algorithm(population, fitness, f_thres=25, gene_pool=range(8))\n",
+ "print(solution)\n",
+ "print(fitness(solution))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Above you can see the solution and its fitness score, which should be no less than 25."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This is where we conclude Genetic Algorithms."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### N-Queens Problem\n",
+ "Here, we will look at the generalized cae of the Eight Queens problem.\n",
+ " \n",
+ "We are given a `N` x `N` chessboard, with `N` queens, and we need to place them in such a way that no two queens can attack each other.\n",
+ " \n",
+ "We will solve this problem using search algorithms.\n",
+ "To do this, we already have a `NQueensProblem` class in `search.py`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 80,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " Codestin Search App \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "