@@ -99,53 +99,44 @@ def new_program(percept):
99
99
100
100
#______________________________________________________________________________
101
101
102
- class TableDrivenAgent ( Agent ):
102
+ def TableDrivenAgentProgram ( table ):
103
103
"""This agent selects an action based on the percept sequence.
104
104
It is practical only for tiny domains.
105
- To customize it you provide a table to the constructor. [Fig. 2.7]"""
106
-
107
- def __init__ (self , table ):
108
- "Supply as table a dictionary of all {percept_sequence:action} pairs."
109
- percepts = []
110
- def program (percept ):
111
- percepts .append (percept )
112
- action = table .get (tuple (percepts ))
113
- return action
114
- Agent .__init__ (self , program )
115
-
105
+ To customize it, provide as table a dictionary of all
106
+ {percept_sequence:action} pairs. [Fig. 2.7]"""
107
+ percepts = []
108
+ def program (percept ):
109
+ percepts .append (percept )
110
+ action = table .get (tuple (percepts ))
111
+ return action
112
+ return program
116
113
117
- class RandomAgent ( Agent ):
114
+ def RandomAgentProgram ( actions ):
118
115
"An agent that chooses an action at random, ignoring all percepts."
119
-
120
- def __init__ (self , actions ):
121
- def program (percept ):
122
- return random .choice (actions )
123
- Agent .__init__ (self , program )
116
+ def program (percept ):
117
+ return random .choice (actions )
118
+ return program
124
119
125
120
#______________________________________________________________________________
126
121
127
- class SimpleReflexAgent ( Agent ):
122
+ def SimpleReflexAgentProgram ( rules , interpret_input ):
128
123
"This agent takes action based solely on the percept. [Fig. 2.10]"
124
+ def program (percept ):
125
+ state = interpret_input (percept )
126
+ rule = rule_match (state , rules )
127
+ action = rule .action
128
+ return action
129
+ return program
129
130
130
- def __init__ (self , rules , interpret_input ):
131
- def program (percept ):
132
- state = interpret_input (percept )
133
- rule = rule_match (state , rules )
134
- action = rule .action
135
- return action
136
- Agent .__init__ (self , program )
137
-
138
- class ModelBasedReflexAgent (Agent ):
131
+ def ModelBasedReflexAgentProgram (rules , update_state ):
139
132
"This agent takes action based on the percept and state. [Fig. 2.12]"
140
-
141
- def __init__ (self , rules , update_state ):
142
- def program (percept ):
143
- program .state = update_state (program .state , program .action , percept )
144
- rule = rule_match (program .state , rules )
145
- action = rule .action
146
- return action
147
- program .state = program .action = None
148
- Agent .__init__ (self , program )
133
+ def program (percept ):
134
+ program .state = update_state (program .state , program .action , percept )
135
+ rule = rule_match (program .state , rules )
136
+ action = rule .action
137
+ return action
138
+ program .state = program .action = None
139
+ return program
149
140
150
141
def rule_match (state , rules ):
151
142
"Find the first rule that matches state."
@@ -157,20 +148,10 @@ def rule_match(state, rules):
157
148
158
149
loc_A , loc_B = (0 , 0 ), (1 , 0 ) # The two locations for the Vacuum world
159
150
160
- class ReflexVacuumAgent (Agent ):
161
- "A reflex agent for the two-state vacuum environment. [Fig. 2.8]"
162
-
163
- def __init__ (self ):
164
- def program ((location , status )):
165
- if status == 'Dirty' : return 'Suck'
166
- elif location == loc_A : return 'Right'
167
- elif location == loc_B : return 'Left'
168
- Agent .__init__ (self , program )
169
-
170
151
171
152
def RandomVacuumAgent ():
172
153
"Randomly choose one of the actions from the vacuum environment."
173
- return RandomAgent ( ['Right' , 'Left' , 'Suck' , 'NoOp' ])
154
+ return Agent ( RandomAgentProgram ( ['Right' , 'Left' , 'Suck' , 'NoOp' ]) )
174
155
175
156
176
157
def TableDrivenVacuumAgent ():
@@ -186,22 +167,28 @@ def TableDrivenVacuumAgent():
186
167
((loc_A , 'Clean' ), (loc_A , 'Clean' ), (loc_A , 'Dirty' )): 'Suck' ,
187
168
# ...
188
169
}
189
- return TableDrivenAgent ( table )
170
+ return Agent ( TableDrivenAgentProgram ( table ) )
190
171
191
172
192
- class ModelBasedVacuumAgent (Agent ):
193
- "An agent that keeps track of what locations are clean or dirty."
173
+ def ReflexVacuumAgent ():
174
+ "A reflex agent for the two-state vacuum environment. [Fig. 2.8]"
175
+ def program ((location , status )):
176
+ if status == 'Dirty' : return 'Suck'
177
+ elif location == loc_A : return 'Right'
178
+ elif location == loc_B : return 'Left'
179
+ return Agent (program )
194
180
195
- def __init__ (self ):
196
- model = {loc_A : None , loc_B : None }
197
- def program ((location , status )):
198
- "Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
199
- model [location ] = status ## Update the model here
200
- if model [loc_A ] == model [loc_B ] == 'Clean' : return 'NoOp'
201
- elif status == 'Dirty' : return 'Suck'
202
- elif location == loc_A : return 'Right'
203
- elif location == loc_B : return 'Left'
204
- Agent .__init__ (self , program )
181
+ def ModelBasedVacuumAgent ():
182
+ "An agent that keeps track of what locations are clean or dirty."
183
+ model = {loc_A : None , loc_B : None }
184
+ def program ((location , status )):
185
+ "Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
186
+ model [location ] = status ## Update the model here
187
+ if model [loc_A ] == model [loc_B ] == 'Clean' : return 'NoOp'
188
+ elif status == 'Dirty' : return 'Suck'
189
+ elif location == loc_A : return 'Right'
190
+ elif location == loc_B : return 'Left'
191
+ return Agent (program )
205
192
206
193
#______________________________________________________________________________
207
194
0 commit comments