Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit a6e3192

Browse files
antmarakisnorvig
authored andcommitted
Commenting Fixes (aimacode#294)
* Update search.py Commenting issues fixed (spacing and punctuation was off sometimes). * Update agents.py * Update canvas.py Grammar * Update grid.py * Update learning.py Added period * Update logic.py Fix quoting * Update mdp.py Fixed quoting * Update nlp.py Capitalization and punctuation fixes * Update planning.py * Update probability.py * Update rl.py 'th' to 'the' * Update search.py * Update text.py * Update utils.py * Update utils.py * Update utils.py * Update learning.py Typo * Update utils.py
1 parent 9054eef commit a6e3192

File tree

13 files changed

+106
-106
lines changed

13 files changed

+106
-106
lines changed

agents.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ class Direction():
329329
To change directions:
330330
d = d + "right" or d = d + Direction.R #Both do the same thing
331331
Note that the argument to __add__ must be a string and not a Direction object.
332-
Also, it (the argument) can only be right or left. '''
332+
Also, it (the argument) can only be right or left.'''
333333

334334
R = "right"
335335
L = "left"
@@ -428,8 +428,8 @@ def default_location(self, thing):
428428
return (random.choice(self.width), random.choice(self.height))
429429

430430
def move_to(self, thing, destination):
431-
'''Move a thing to a new location. Returns True on success or False if there is an Obstacle
432-
If thing is grabbing anything, they move with him '''
431+
'''Move a thing to a new location. Returns True on success or False if there is an Obstacle.
432+
If thing is holding anything, they move with him.'''
433433
thing.bump = self.some_things_at(destination, Obstacle)
434434
if not thing.bump:
435435
thing.location = destination
@@ -451,7 +451,7 @@ def move_to(self, thing, destination):
451451
def add_thing(self, thing, location=(1, 1), exclude_duplicate_class_items=False):
452452
'''Adds things to the world.
453453
If (exclude_duplicate_class_items) then the item won't be added if the location
454-
has at least one item of the same class'''
454+
has at least one item of the same class.'''
455455
if (self.is_inbounds(location)):
456456
if (exclude_duplicate_class_items and
457457
any(isinstance(t, thing.__class__) for t in self.list_things_at(location))):
@@ -526,7 +526,7 @@ class Wall(Obstacle):
526526
# Continuous environment
527527

528528
class ContinuousWorld(Environment):
529-
""" Model for Continuous World. """
529+
""" Model for Continuous World."""
530530
def __init__(self, width=10, height=10):
531531
super(ContinuousWorld, self).__init__()
532532
self.width = width
@@ -538,7 +538,7 @@ def add_obstacle(self, coordinates):
538538

539539
class PolygonObstacle(Obstacle):
540540
def __init__(self, coordinates):
541-
""" Coordinates is a list of tuples. """
541+
""" Coordinates is a list of tuples."""
542542
super(PolygonObstacle, self).__init__()
543543
self.coordinates = coordinates
544544

@@ -715,7 +715,7 @@ def init_world(self, program):
715715
self.add_thing(Explorer(program), (1, 1), True)
716716

717717
def get_world(self, show_walls=True):
718-
'''returns the items in the world'''
718+
'''Returns the items in the world'''
719719
result = []
720720
x_start, y_start = (0, 0) if show_walls else (1, 1)
721721
x_end, y_end = (self.width, self.height) if show_walls else (self.width - 1, self.height - 1)
@@ -765,8 +765,8 @@ def percept(self, agent):
765765
return result
766766

767767
def execute_action(self, agent, action):
768-
'''Modify the state of the environment based on the agent's actions
769-
Performance score taken directly out of the book'''
768+
'''Modify the state of the environment based on the agent's actions.
769+
Performance score taken directly out of the book.'''
770770

771771
if isinstance(agent, Explorer) and self.in_danger(agent):
772772
return
@@ -818,7 +818,7 @@ def in_danger(self, agent):
818818

819819
def is_done(self):
820820
'''The game is over when the Explorer is killed
821-
or if he climbs out of the cave only at (1,1)'''
821+
or if he climbs out of the cave only at (1,1).'''
822822
explorer = [agent for agent in self.agents if isinstance(agent, Explorer) ]
823823
if len(explorer):
824824
if explorer[0].alive:

canvas.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
class Canvas:
1313
"""Inherit from this class to manage the HTML canvas element in jupyter notebooks.
1414
To create an object of this class any_name_xyz = Canvas("any_name_xyz")
15-
The first argument given must be the name of the object being create
16-
IPython must be able to refernce the variable name that is being passed
15+
The first argument given must be the name of the object being created.
16+
IPython must be able to refernce the variable name that is being passed.
1717
"""
1818

1919
def __init__(self, varname, id=None, width=800, height=600):

grid.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# OK, the following are not as widely useful utilities as some of the other
22
# functions here, but they do show up wherever we have 2D grids: Wumpus and
3-
# Vacuum worlds, TicTacToe and Checkers, and markov decision Processes.
3+
# Vacuum worlds, TicTacToe and Checkers, and Markov Decision Processes.
44
# __________________________________________________________________________
55
import math
66

learning.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -499,9 +499,9 @@ def __init__(self, weights=None, inputs=None):
499499

500500
def network(input_units, hidden_layer_sizes, output_units):
501501
"""
502-
Create of Directed Acyclic Network of given number layers
502+
Create Directed Acyclic Network of given number layers.
503503
hidden_layers_sizes : list number of neuron units in each hidden layer
504-
excluding input and output layers.
504+
excluding input and output layers
505505
"""
506506
# Check for PerceptronLearner
507507
if hidden_layer_sizes:
@@ -523,7 +523,7 @@ def network(input_units, hidden_layer_sizes, output_units):
523523

524524

525525
def BackPropagationLearner(dataset, net, learning_rate, epoches):
526-
"[Figure 18.23] The back-propagation algorithm for multilayer network"
526+
"""[Figure 18.23] The back-propagation algorithm for multilayer network"""
527527
# Initialise weights
528528
for layer in net:
529529
for node in layer:
@@ -826,7 +826,7 @@ def cross_validation_wrapper(learner, dataset, k=10, trials=1):
826826
"""
827827
Fig 18.8
828828
Return the optimal value of size having minimum error
829-
on validataion set
829+
on validataion set.
830830
err_train: a training error array, indexed by size
831831
err_val: a validataion error array, indexed by size
832832
"""

logic.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -670,7 +670,7 @@ def sat_count(sym):
670670

671671
class HybridWumpusAgent(agents.Agent):
672672

673-
"An agent for the wumpus world that does logical inference. [Figure 7.20]"""
673+
"""An agent for the wumpus world that does logical inference. [Figure 7.20]"""
674674

675675
def __init__(self):
676676
raise NotImplementedError
@@ -789,7 +789,7 @@ def unify(x, y, s):
789789

790790

791791
def is_variable(x):
792-
"A variable is an Expr with no args and a lowercase symbol as the op."
792+
"""A variable is an Expr with no args and a lowercase symbol as the op."""
793793
return isinstance(x, Expr) and not x.args and x.op[0].islower()
794794

795795

@@ -819,7 +819,7 @@ def occur_check(var, x, s):
819819

820820

821821
def extend(s, var, val):
822-
"Copy the substitution s and extend it by setting var to val; return copy."
822+
"""Copy the substitution s and extend it by setting var to val; return copy."""
823823
s2 = s.copy()
824824
s2[var] = val
825825
return s2
@@ -932,7 +932,7 @@ def fetch_rules_for_goal(self, goal):
932932

933933
def fol_bc_ask(KB, query):
934934
"""A simple backward-chaining algorithm for first-order logic. [Figure 9.6]
935-
KB should be an instance of FolKB, and query an atomic sentence. """
935+
KB should be an instance of FolKB, and query an atomic sentence."""
936936
return fol_bc_or(KB, query, {})
937937

938938

@@ -995,7 +995,7 @@ def diff(y, x):
995995

996996

997997
def simp(x):
998-
"Simplify the expression x."
998+
"""Simplify the expression x."""
999999
if isnumber(x) or not x.args:
10001000
return x
10011001
args = list(map(simp, x.args))
@@ -1058,5 +1058,5 @@ def simp(x):
10581058

10591059

10601060
def d(y, x):
1061-
"Differentiate and then simplify."
1061+
"""Differentiate and then simplify."""
10621062
return simp(diff(y, x))

mdp.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def T(self, state, action):
8080
(0.1, self.go(state, turn_left(action)))]
8181

8282
def go(self, state, direction):
83-
"Return the state that results from going in this direction."
83+
"""Return the state that results from going in this direction."""
8484
state1 = vector_add(state, direction)
8585
return state1 if state1 in self.states else state
8686

@@ -110,7 +110,7 @@ def to_arrows(self, policy):
110110

111111

112112
def value_iteration(mdp, epsilon=0.001):
113-
"Solving an MDP by value iteration. [Figure 17.4]"
113+
"""Solving an MDP by value iteration. [Figure 17.4]"""
114114
U1 = {s: 0 for s in mdp.states}
115115
R, T, gamma = mdp.R, mdp.T, mdp.gamma
116116
while True:
@@ -134,14 +134,14 @@ def best_policy(mdp, U):
134134

135135

136136
def expected_utility(a, s, U, mdp):
137-
"The expected utility of doing a in state s, according to the MDP and U."
137+
"""The expected utility of doing a in state s, according to the MDP and U."""
138138
return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])
139139

140140
# ______________________________________________________________________________
141141

142142

143143
def policy_iteration(mdp):
144-
"Solve an MDP by policy iteration [Figure 17.7]"
144+
"""Solve an MDP by policy iteration [Figure 17.7]"""
145145
U = {s: 0 for s in mdp.states}
146146
pi = {s: random.choice(mdp.actions(s)) for s in mdp.states}
147147
while True:

nlp.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def Lexicon(**rules):
3434
class Grammar:
3535

3636
def __init__(self, name, rules, lexicon):
37-
"A grammar has a set of rules and a lexicon."
37+
"""A grammar has a set of rules and a lexicon."""
3838
self.name = name
3939
self.rules = rules
4040
self.lexicon = lexicon
@@ -44,11 +44,11 @@ def __init__(self, name, rules, lexicon):
4444
self.categories[word].append(lhs)
4545

4646
def rewrites_for(self, cat):
47-
"Return a sequence of possible rhs's that cat can be rewritten as."
47+
"""Return a sequence of possible rhs's that cat can be rewritten as."""
4848
return self.rules.get(cat, ())
4949

5050
def isa(self, word, cat):
51-
"Return True iff word is of category cat"
51+
"""Return True iff word is of category cat"""
5252
return cat in self.categories[word]
5353

5454
def __repr__(self):
@@ -293,8 +293,8 @@ def expand_pages( pages ):
293293
return expanded
294294

295295
def relevant_pages(query):
296-
"""relevant pages are pages that contain the query in its entireity.
297-
If a page's content contains the query it is returned by the function"""
296+
"""Relevant pages are pages that contain the query in its entireity.
297+
If a page's content contains the query it is returned by the function."""
298298
relevant = {}
299299
print("pagesContent in function: ", pagesContent)
300300
for addr, page in pagesIndex.items():

planning.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77

88
class PDLL:
99
"""
10-
PDLL used to define a search problem
11-
It stores states in a knowledge base consisting of first order logic statements
12-
The conjunction of these logical statements completely define a state
10+
PDLL used to define a search problem.
11+
It stores states in a knowledge base consisting of first order logic statements.
12+
The conjunction of these logical statements completely defines a state.
1313
"""
1414

1515
def __init__(self, initial_state, actions, goal_test):
@@ -22,7 +22,7 @@ def goal_test(self):
2222

2323
def act(self, action):
2424
"""
25-
Performs the action given as argument
25+
Performs the action given as argument.
2626
Note that action is an Expr like expr('Remove(Glass, Table)') or expr('Eat(Sandwich)')
2727
"""
2828
action_name = action.op
@@ -36,10 +36,10 @@ def act(self, action):
3636

3737
class Action:
3838
"""
39-
Defines an action schema using preconditions and effects
40-
Use this to describe actions in PDDL
41-
action is an Expr where variables are given as arguments(args)
42-
Precondition and effect are both lists with positive and negated literals
39+
Defines an action schema using preconditions and effects.
40+
Use this to describe actions in PDDL.
41+
action is an Expr where variables are given as arguments(args).
42+
Precondition and effect are both lists with positive and negated literals.
4343
Example:
4444
precond_pos = [expr("Human(person)"), expr("Hungry(Person)")]
4545
precond_neg = [expr("Eaten(food)")]

probability.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,7 @@ def pointwise_product(factors, bn):
357357

358358

359359
def sum_out(var, factors, bn):
360-
"Eliminate var from all factors by summing over its values."
360+
"""Eliminate var from all factors by summing over its values."""
361361
result, var_factors = [], []
362362
for f in factors:
363363
(var_factors if var in f.variables else result).append(f)
@@ -367,40 +367,40 @@ def sum_out(var, factors, bn):
367367

368368
class Factor:
369369

370-
"A factor in a joint distribution."
370+
"""A factor in a joint distribution."""
371371

372372
def __init__(self, variables, cpt):
373373
self.variables = variables
374374
self.cpt = cpt
375375

376376
def pointwise_product(self, other, bn):
377-
"Multiply two factors, combining their variables."
377+
"""Multiply two factors, combining their variables."""
378378
variables = list(set(self.variables) | set(other.variables))
379379
cpt = {event_values(e, variables): self.p(e) * other.p(e)
380380
for e in all_events(variables, bn, {})}
381381
return Factor(variables, cpt)
382382

383383
def sum_out(self, var, bn):
384-
"Make a factor eliminating var by summing over its values."
384+
"""Make a factor eliminating var by summing over its values."""
385385
variables = [X for X in self.variables if X != var]
386386
cpt = {event_values(e, variables): sum(self.p(extend(e, var, val))
387387
for val in bn.variable_values(var))
388388
for e in all_events(variables, bn, {})}
389389
return Factor(variables, cpt)
390390

391391
def normalize(self):
392-
"Return my probabilities; must be down to one variable."
392+
"""Return my probabilities; must be down to one variable."""
393393
assert len(self.variables) == 1
394394
return ProbDist(self.variables[0],
395395
{k: v for ((k,), v) in self.cpt.items()})
396396

397397
def p(self, e):
398-
"Look up my value tabulated for e."
398+
"""Look up my value tabulated for e."""
399399
return self.cpt[event_values(e, self.variables)]
400400

401401

402402
def all_events(variables, bn, e):
403-
"Yield every way of extending e with values for all variables."
403+
"""Yield every way of extending e with values for all variables."""
404404
if not variables:
405405
yield e
406406
else:
@@ -453,7 +453,7 @@ def rejection_sampling(X, e, bn, N):
453453

454454

455455
def consistent_with(event, evidence):
456-
"Is event consistent with the given evidence?"
456+
"""Is event consistent with the given evidence?"""
457457
return all(evidence.get(k, v) == v
458458
for k, v in event.items())
459459

@@ -527,7 +527,7 @@ def markov_blanket_sample(X, e, bn):
527527

528528
class HiddenMarkovModel:
529529

530-
""" A Hidden markov model which takes Transition model and Sensor model as inputs"""
530+
"""A Hidden markov model which takes Transition model and Sensor model as inputs"""
531531

532532
def __init__(self, transition_model, sensor_model, prior=[0.5, 0.5]):
533533
self.transition_model = transition_model

rl.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def __init__(self, init, actlist, terminals, gamma, states):
2424

2525
def T(self, s, a):
2626
"""Returns a list of tuples with probabilities for states
27-
based on the learnt model P. """
27+
based on the learnt model P."""
2828
return [(prob, res) for (res, prob) in self.P[(s, a)].items()]
2929

3030
def __init__(self, pi, mdp):
@@ -62,15 +62,15 @@ def __call__(self, percept):
6262

6363
def update_state(self, percept):
6464
''' To be overridden in most cases. The default case
65-
assumes th percept to be of type (state, reward)'''
65+
assumes the percept to be of type (state, reward)'''
6666
return percept
6767

6868

6969
class PassiveTDAgent:
7070
"""The abstract class for a Passive (non-learning) agent that uses
7171
temporal differences to learn utility estimates. Override update_state
7272
method to convert percept to state and reward. The mdp being provided
73-
should be an instance of a subclass of the MDP Class.[Figure 21.4]
73+
should be an instance of a subclass of the MDP Class. [Figure 21.4]
7474
"""
7575

7676
def __init__(self, pi, mdp, alpha=None):
@@ -106,7 +106,7 @@ def __call__(self, percept):
106106

107107
def update_state(self, percept):
108108
''' To be overridden in most cases. The default case
109-
assumes th percept to be of type (state, reward)'''
109+
assumes the percept to be of type (state, reward)'''
110110
return percept
111111

112112

0 commit comments

Comments
 (0)