Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commenting Fixes #294

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Mar 3, 2017
Merged
20 changes: 10 additions & 10 deletions agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ class Direction():
To change directions:
d = d + "right" or d = d + Direction.R #Both do the same thing
Note that the argument to __add__ must be a string and not a Direction object.
Also, it (the argument) can only be right or left. '''
Also, it (the argument) can only be right or left.'''

R = "right"
L = "left"
Expand Down Expand Up @@ -428,8 +428,8 @@ def default_location(self, thing):
return (random.choice(self.width), random.choice(self.height))

def move_to(self, thing, destination):
'''Move a thing to a new location. Returns True on success or False if there is an Obstacle
If thing is grabbing anything, they move with him '''
'''Move a thing to a new location. Returns True on success or False if there is an Obstacle.
If thing is holding anything, they move with him.'''
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
Expand All @@ -451,7 +451,7 @@ def move_to(self, thing, destination):
def add_thing(self, thing, location=(1, 1), exclude_duplicate_class_items=False):
'''Adds things to the world.
If (exclude_duplicate_class_items) then the item won't be added if the location
has at least one item of the same class'''
has at least one item of the same class.'''
if (self.is_inbounds(location)):
if (exclude_duplicate_class_items and
any(isinstance(t, thing.__class__) for t in self.list_things_at(location))):
Expand Down Expand Up @@ -526,7 +526,7 @@ class Wall(Obstacle):
# Continuous environment

class ContinuousWorld(Environment):
""" Model for Continuous World. """
""" Model for Continuous World."""
def __init__(self, width=10, height=10):
super(ContinuousWorld, self).__init__()
self.width = width
Expand All @@ -538,7 +538,7 @@ def add_obstacle(self, coordinates):

class PolygonObstacle(Obstacle):
def __init__(self, coordinates):
""" Coordinates is a list of tuples. """
""" Coordinates is a list of tuples."""
super(PolygonObstacle, self).__init__()
self.coordinates = coordinates

Expand Down Expand Up @@ -715,7 +715,7 @@ def init_world(self, program):
self.add_thing(Explorer(program), (1, 1), True)

def get_world(self, show_walls=True):
'''returns the items in the world'''
'''Returns the items in the world'''
result = []
x_start, y_start = (0, 0) if show_walls else (1, 1)
x_end, y_end = (self.width, self.height) if show_walls else (self.width - 1, self.height - 1)
Expand Down Expand Up @@ -765,8 +765,8 @@ def percept(self, agent):
return result

def execute_action(self, agent, action):
'''Modify the state of the environment based on the agent's actions
Performance score taken directly out of the book'''
'''Modify the state of the environment based on the agent's actions.
Performance score taken directly out of the book.'''

if isinstance(agent, Explorer) and self.in_danger(agent):
return
Expand Down Expand Up @@ -818,7 +818,7 @@ def in_danger(self, agent):

def is_done(self):
'''The game is over when the Explorer is killed
or if he climbs out of the cave only at (1,1)'''
or if he climbs out of the cave only at (1,1).'''
explorer = [agent for agent in self.agents if isinstance(agent, Explorer) ]
if len(explorer):
if explorer[0].alive:
Expand Down
4 changes: 2 additions & 2 deletions canvas.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
class Canvas:
"""Inherit from this class to manage the HTML canvas element in jupyter notebooks.
To create an object of this class any_name_xyz = Canvas("any_name_xyz")
The first argument given must be the name of the object being create
IPython must be able to refernce the variable name that is being passed
The first argument given must be the name of the object being created.
IPython must be able to refernce the variable name that is being passed.
"""

def __init__(self, varname, id=None, width=800, height=600):
Expand Down
2 changes: 1 addition & 1 deletion grid.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# OK, the following are not as widely useful utilities as some of the other
# functions here, but they do show up wherever we have 2D grids: Wumpus and
# Vacuum worlds, TicTacToe and Checkers, and markov decision Processes.
# Vacuum worlds, TicTacToe and Checkers, and Markov Decision Processes.
# __________________________________________________________________________
import math

Expand Down
8 changes: 4 additions & 4 deletions learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,9 +499,9 @@ def __init__(self, weights=None, inputs=None):

def network(input_units, hidden_layer_sizes, output_units):
"""
Create of Directed Acyclic Network of given number layers
Create Directed Acyclic Network of given number layers.
hidden_layers_sizes : list number of neuron units in each hidden layer
excluding input and output layers.
excluding input and output layers
"""
# Check for PerceptronLearner
if hidden_layer_sizes:
Expand All @@ -523,7 +523,7 @@ def network(input_units, hidden_layer_sizes, output_units):


def BackPropagationLearner(dataset, net, learning_rate, epoches):
"[Figure 18.23] The back-propagation algorithm for multilayer network"
"""[Figure 18.23] The back-propagation algorithm for multilayer network"""
# Initialise weights
for layer in net:
for node in layer:
Expand Down Expand Up @@ -826,7 +826,7 @@ def cross_validation_wrapper(learner, dataset, k=10, trials=1):
"""
Fig 18.8
Return the optimal value of size having minimum error
on validataion set
on validataion set.
err_train: a training error array, indexed by size
err_val: a validataion error array, indexed by size
"""
Expand Down
12 changes: 6 additions & 6 deletions logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def sat_count(sym):

class HybridWumpusAgent(agents.Agent):

"An agent for the wumpus world that does logical inference. [Figure 7.20]"""
"""An agent for the wumpus world that does logical inference. [Figure 7.20]"""

def __init__(self):
raise NotImplementedError
Expand Down Expand Up @@ -789,7 +789,7 @@ def unify(x, y, s):


def is_variable(x):
"A variable is an Expr with no args and a lowercase symbol as the op."
"""A variable is an Expr with no args and a lowercase symbol as the op."""
return isinstance(x, Expr) and not x.args and x.op[0].islower()


Expand Down Expand Up @@ -819,7 +819,7 @@ def occur_check(var, x, s):


def extend(s, var, val):
"Copy the substitution s and extend it by setting var to val; return copy."
"""Copy the substitution s and extend it by setting var to val; return copy."""
s2 = s.copy()
s2[var] = val
return s2
Expand Down Expand Up @@ -932,7 +932,7 @@ def fetch_rules_for_goal(self, goal):

def fol_bc_ask(KB, query):
"""A simple backward-chaining algorithm for first-order logic. [Figure 9.6]
KB should be an instance of FolKB, and query an atomic sentence. """
KB should be an instance of FolKB, and query an atomic sentence."""
return fol_bc_or(KB, query, {})


Expand Down Expand Up @@ -995,7 +995,7 @@ def diff(y, x):


def simp(x):
"Simplify the expression x."
"""Simplify the expression x."""
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm actually not a stickler for triple quotes on short doc strings. But the PEP does suggest triple quotes, and all your other work was very helpful, so I'll agree to this.

if isnumber(x) or not x.args:
return x
args = list(map(simp, x.args))
Expand Down Expand Up @@ -1058,5 +1058,5 @@ def simp(x):


def d(y, x):
"Differentiate and then simplify."
"""Differentiate and then simplify."""
return simp(diff(y, x))
8 changes: 4 additions & 4 deletions mdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def T(self, state, action):
(0.1, self.go(state, turn_left(action)))]

def go(self, state, direction):
"Return the state that results from going in this direction."
"""Return the state that results from going in this direction."""
state1 = vector_add(state, direction)
return state1 if state1 in self.states else state

Expand Down Expand Up @@ -110,7 +110,7 @@ def to_arrows(self, policy):


def value_iteration(mdp, epsilon=0.001):
"Solving an MDP by value iteration. [Figure 17.4]"
"""Solving an MDP by value iteration. [Figure 17.4]"""
U1 = {s: 0 for s in mdp.states}
R, T, gamma = mdp.R, mdp.T, mdp.gamma
while True:
Expand All @@ -134,14 +134,14 @@ def best_policy(mdp, U):


def expected_utility(a, s, U, mdp):
"The expected utility of doing a in state s, according to the MDP and U."
"""The expected utility of doing a in state s, according to the MDP and U."""
return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])

# ______________________________________________________________________________


def policy_iteration(mdp):
"Solve an MDP by policy iteration [Figure 17.7]"
"""Solve an MDP by policy iteration [Figure 17.7]"""
U = {s: 0 for s in mdp.states}
pi = {s: random.choice(mdp.actions(s)) for s in mdp.states}
while True:
Expand Down
10 changes: 5 additions & 5 deletions nlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def Lexicon(**rules):
class Grammar:

def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
Expand All @@ -44,11 +44,11 @@ def __init__(self, name, rules, lexicon):
self.categories[word].append(lhs)

def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())

def isa(self, word, cat):
"Return True iff word is of category cat"
"""Return True iff word is of category cat"""
return cat in self.categories[word]

def __repr__(self):
Expand Down Expand Up @@ -293,8 +293,8 @@ def expand_pages( pages ):
return expanded

def relevant_pages(query):
"""relevant pages are pages that contain the query in its entireity.
If a page's content contains the query it is returned by the function"""
"""Relevant pages are pages that contain the query in its entireity.
If a page's content contains the query it is returned by the function."""
relevant = {}
print("pagesContent in function: ", pagesContent)
for addr, page in pagesIndex.items():
Expand Down
16 changes: 8 additions & 8 deletions planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@

class PDLL:
"""
PDLL used to define a search problem
It stores states in a knowledge base consisting of first order logic statements
The conjunction of these logical statements completely define a state
PDLL used to define a search problem.
It stores states in a knowledge base consisting of first order logic statements.
The conjunction of these logical statements completely defines a state.
"""

def __init__(self, initial_state, actions, goal_test):
Expand All @@ -22,7 +22,7 @@ def goal_test(self):

def act(self, action):
"""
Performs the action given as argument
Performs the action given as argument.
Note that action is an Expr like expr('Remove(Glass, Table)') or expr('Eat(Sandwich)')
"""
action_name = action.op
Expand All @@ -36,10 +36,10 @@ def act(self, action):

class Action:
"""
Defines an action schema using preconditions and effects
Use this to describe actions in PDDL
action is an Expr where variables are given as arguments(args)
Precondition and effect are both lists with positive and negated literals
Defines an action schema using preconditions and effects.
Use this to describe actions in PDDL.
action is an Expr where variables are given as arguments(args).
Precondition and effect are both lists with positive and negated literals.
Example:
precond_pos = [expr("Human(person)"), expr("Hungry(Person)")]
precond_neg = [expr("Eaten(food)")]
Expand Down
18 changes: 9 additions & 9 deletions probability.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def pointwise_product(factors, bn):


def sum_out(var, factors, bn):
"Eliminate var from all factors by summing over its values."
"""Eliminate var from all factors by summing over its values."""
result, var_factors = [], []
for f in factors:
(var_factors if var in f.variables else result).append(f)
Expand All @@ -367,40 +367,40 @@ def sum_out(var, factors, bn):

class Factor:

"A factor in a joint distribution."
"""A factor in a joint distribution."""

def __init__(self, variables, cpt):
self.variables = variables
self.cpt = cpt

def pointwise_product(self, other, bn):
"Multiply two factors, combining their variables."
"""Multiply two factors, combining their variables."""
variables = list(set(self.variables) | set(other.variables))
cpt = {event_values(e, variables): self.p(e) * other.p(e)
for e in all_events(variables, bn, {})}
return Factor(variables, cpt)

def sum_out(self, var, bn):
"Make a factor eliminating var by summing over its values."
"""Make a factor eliminating var by summing over its values."""
variables = [X for X in self.variables if X != var]
cpt = {event_values(e, variables): sum(self.p(extend(e, var, val))
for val in bn.variable_values(var))
for e in all_events(variables, bn, {})}
return Factor(variables, cpt)

def normalize(self):
"Return my probabilities; must be down to one variable."
"""Return my probabilities; must be down to one variable."""
assert len(self.variables) == 1
return ProbDist(self.variables[0],
{k: v for ((k,), v) in self.cpt.items()})

def p(self, e):
"Look up my value tabulated for e."
"""Look up my value tabulated for e."""
return self.cpt[event_values(e, self.variables)]


def all_events(variables, bn, e):
"Yield every way of extending e with values for all variables."
"""Yield every way of extending e with values for all variables."""
if not variables:
yield e
else:
Expand Down Expand Up @@ -453,7 +453,7 @@ def rejection_sampling(X, e, bn, N):


def consistent_with(event, evidence):
"Is event consistent with the given evidence?"
"""Is event consistent with the given evidence?"""
return all(evidence.get(k, v) == v
for k, v in event.items())

Expand Down Expand Up @@ -527,7 +527,7 @@ def markov_blanket_sample(X, e, bn):

class HiddenMarkovModel:

""" A Hidden markov model which takes Transition model and Sensor model as inputs"""
"""A Hidden markov model which takes Transition model and Sensor model as inputs"""

def __init__(self, transition_model, sensor_model, prior=[0.5, 0.5]):
self.transition_model = transition_model
Expand Down
8 changes: 4 additions & 4 deletions rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, init, actlist, terminals, gamma, states):

def T(self, s, a):
"""Returns a list of tuples with probabilities for states
based on the learnt model P. """
based on the learnt model P."""
return [(prob, res) for (res, prob) in self.P[(s, a)].items()]

def __init__(self, pi, mdp):
Expand Down Expand Up @@ -62,15 +62,15 @@ def __call__(self, percept):

def update_state(self, percept):
''' To be overridden in most cases. The default case
assumes th percept to be of type (state, reward)'''
assumes the percept to be of type (state, reward)'''
return percept


class PassiveTDAgent:
"""The abstract class for a Passive (non-learning) agent that uses
temporal differences to learn utility estimates. Override update_state
method to convert percept to state and reward. The mdp being provided
should be an instance of a subclass of the MDP Class.[Figure 21.4]
should be an instance of a subclass of the MDP Class. [Figure 21.4]
"""

def __init__(self, pi, mdp, alpha=None):
Expand Down Expand Up @@ -106,7 +106,7 @@ def __call__(self, percept):

def update_state(self, percept):
''' To be overridden in most cases. The default case
assumes th percept to be of type (state, reward)'''
assumes the percept to be of type (state, reward)'''
return percept


Expand Down
Loading