diff --git a/agents.py b/agents.py index 746e83978..4c7801430 100644 --- a/agents.py +++ b/agents.py @@ -35,13 +35,13 @@ # # Speed control in GUI does not have any effect -- fix it. -from utils import * +from utils import * # noqa import random import copy import collections -#______________________________________________________________________________ +# ______________________________________________________________________________ class Thing(object): @@ -51,7 +51,8 @@ class Thing(object): .__name__ slot (used for output only).""" def __repr__(self): - return '<{}>'.format(getattr(self, '__name__', self.__class__.__name__)) + return '<{}>'.format(getattr(self, '__name__', + self.__class__.__name__)) def is_alive(self): "Things that are 'alive' should return true." @@ -108,7 +109,7 @@ def new_program(percept): agent.program = new_program return agent -#______________________________________________________________________________ +# ______________________________________________________________________________ def TableDrivenAgentProgram(table): @@ -129,7 +130,7 @@ def RandomAgentProgram(actions): "An agent that chooses an action at random, ignoring all percepts." return lambda percept: random.choice(actions) -#______________________________________________________________________________ +# ______________________________________________________________________________ def SimpleReflexAgentProgram(rules, interpret_input): @@ -159,7 +160,7 @@ def rule_match(state, rules): if rule.matches(state): return rule -#______________________________________________________________________________ +# ______________________________________________________________________________ loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world @@ -214,7 +215,7 @@ def program(location, status): return 'Left' return Agent(program) -#______________________________________________________________________________ +# ______________________________________________________________________________ class Environment(object): @@ -237,7 +238,10 @@ def thing_classes(self): return [] # List of classes that can go into environment def percept(self, agent): - "Return the percept that the agent sees at this point. (Implement this.)" + ''' + Return the percept that the agent sees at this point. + (Implement this.) + ''' raise NotImplementedError def execute_action(self, agent, action): @@ -305,7 +309,8 @@ def delete_thing(self, thing): except(ValueError, e): print(e) print(" in Environment delete_thing") - print(" Thing to be removed: {} at {}" .format(thing, thing.location)) + print(" Thing to be removed: {} at {}" .format(thing, + thing.location)) print(" from list: {}" .format([(thing, thing.location) for thing in self.things])) if thing in self.agents: @@ -419,7 +424,7 @@ class Obstacle(Thing): class Wall(Obstacle): pass -#______________________________________________________________________________ +# ______________________________________________________________________________ # Vacuum environment @@ -502,7 +507,7 @@ def default_location(self, thing): "Agents start in either location at random." return random.choice([loc_A, loc_B]) -#______________________________________________________________________________ +# ______________________________________________________________________________ # The Wumpus World @@ -538,7 +543,7 @@ def thing_classes(self): # Needs a lot of work ... -#______________________________________________________________________________ +# ______________________________________________________________________________ def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000): """See how well each of several agents do in n instances of an environment. @@ -559,7 +564,7 @@ def score(env): return agent.performance return mean(list(map(score, envs))) -#_________________________________________________________________________ +# _________________________________________________________________________ __doc__ += """ >>> a = ReflexVacuumAgent() @@ -590,4 +595,3 @@ def score(env): >>> 0.5 < testv(RandomVacuumAgent) < 3 True """ - diff --git a/csp.py b/csp.py index 94469a935..c671f2f26 100644 --- a/csp.py +++ b/csp.py @@ -1,11 +1,14 @@ """CSP (Constraint Satisfaction Problems) problems and solvers. (Chapter 6).""" -from utils import * +from utils import * # noqa import search from collections import defaultdict from functools import reduce +import itertools +import re + class CSP(search.Problem): @@ -44,7 +47,8 @@ class CSP(search.Problem): display(a) Print a human-readable representation >>> search.depth_first_graph_search(australia) - + """ def __init__(self, vars, domains, neighbors, constraints): @@ -70,8 +74,8 @@ def nconflicts(self, var, val, assignment): "Return the number of conflicts var=val has with other variables." # Subclasses may implement this more efficiently def conflict(var2): - return (var2 in assignment - and not self.constraints(var, val, var2, assignment[var2])) + return (var2 in assignment and + not self.constraints(var, val, var2, assignment[var2])) return count_if(conflict, self.neighbors[var]) def display(self, assignment): @@ -149,7 +153,7 @@ def conflicted_vars(self, current): return [var for var in self.vars if self.nconflicts(var, current[var], current) > 0] -#______________________________________________________________________________ +# ______________________________________________________________________________ # Constraint Propagation with AC-3 @@ -180,7 +184,7 @@ def revise(csp, Xi, Xj, removals): revised = True return revised -#______________________________________________________________________________ +# ______________________________________________________________________________ # CSP Backtracking Search # Variable ordering @@ -251,17 +255,21 @@ def backtracking_search(csp, """[Fig. 6.5] >>> backtracking_search(australia) is not None True - >>> backtracking_search(australia, select_unassigned_variable=mrv) is not None + >>> backtracking_search(australia, + >>> select_unassigned_variable=mrv) is not None True - >>> backtracking_search(australia, order_domain_values=lcv) is not None + >>> backtracking_search(australia, + >>> order_domain_values=lcv) is not None True - >>> backtracking_search(australia, select_unassigned_variable=mrv, order_domain_values=lcv) is not None + >>> backtracking_search(australia, select_unassigned_variable=mrv, + >>> order_domain_values=lcv) is not None True >>> backtracking_search(australia, inference=forward_checking) is not None True >>> backtracking_search(australia, inference=mac) is not None True - >>> backtracking_search(usa, select_unassigned_variable=mrv, order_domain_values=lcv, inference=mac) is not None + >>> backtracking_search(usa, select_unassigned_variable=mrv, + >>> order_domain_values=lcv, inference=mac) is not None True """ @@ -285,7 +293,7 @@ def backtrack(assignment): assert result is None or csp.goal_test(result) return result -#______________________________________________________________________________ +# ______________________________________________________________________________ # Min-conflicts hillclimbing search for CSPs @@ -313,12 +321,11 @@ def min_conflicts_value(csp, var, current): return argmin_random_tie(csp.domains[var], lambda val: csp.nconflicts(var, val, current)) -#______________________________________________________________________________ +# ______________________________________________________________________________ def tree_csp_solver(csp): "[Fig. 6.11]" - n = len(csp.vars) assignment = {} root = csp.vars[0] X, parent = topological_sort(csp.vars, root) @@ -339,7 +346,7 @@ def topological_sort(xs, x): def make_arc_consistent(Xj, Xk, csp): unimplemented() -#______________________________________________________________________________ +# ______________________________________________________________________________ # Map-Coloring Problems @@ -417,7 +424,7 @@ def parse_neighbors(neighbors, vars=[]): PI; PA: LR RA; PC: PL CE LI AQ; PI: NH NO CA IF; PL: BR NB CE PC; RA: AU BO FC PA LR""") -#______________________________________________________________________________ +# ______________________________________________________________________________ # n-Queens Problem @@ -507,17 +514,14 @@ def display(self, assignment): print(str(self.nconflicts(var, val, assignment))+ch, end=' ') print() -#______________________________________________________________________________ +# ______________________________________________________________________________ # Sudoku -import itertools -import re - def flatten(seqs): return sum(seqs, []) -easy1 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..' -harder1 = '4173698.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......' +easy1 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..' # noqa +harder1 = '4173698.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......' # noqa _R3 = list(range(3)) _CELL = itertools.count().__next__ @@ -531,6 +535,7 @@ def flatten(seqs): return sum(seqs, []) for v in unit: _NEIGHBORS[v].update(unit - set([v])) + class Sudoku(CSP): """A Sudoku problem. @@ -564,7 +569,8 @@ class Sudoku(CSP): 8 1 4 | 2 5 3 | 7 6 9 6 9 5 | 4 1 7 | 3 8 2 >>> h = Sudoku(harder1) - >>> None != backtracking_search(h, select_unassigned_variable=mrv, inference=forward_checking) + >>> None != backtracking_search(h, select_unassigned_variable=mrv, + >>> inference=forward_checking) True """ R3 = _R3 @@ -596,8 +602,9 @@ def show_cell(cell): return str(assignment.get(cell, '.')) def abut(lines1, lines2): return list( map(' | '.join, list(zip(lines1, lines2)))) print('\n------+-------+------\n'.join( - '\n'.join(reduce(abut, list(map(show_box, brow)))) for brow in self.bgrid)) -#______________________________________________________________________________ + '\n'.join(reduce( + abut, list(map(show_box, brow)))) for brow in self.bgrid)) +# ______________________________________________________________________________ # The Zebra Puzzle diff --git a/games.py b/games.py index fd3ebc4f0..303ffd47e 100644 --- a/games.py +++ b/games.py @@ -1,11 +1,11 @@ """Games, or Adversarial Search. (Chapter 5) """ -from utils import * +from utils import * # noqa import random -#______________________________________________________________________________ +# ______________________________________________________________________________ # Minimax Search @@ -35,7 +35,7 @@ def min_value(state): return argmax(game.actions(state), lambda a: min_value(game.result(state, a))) -#______________________________________________________________________________ +# ______________________________________________________________________________ def alphabeta_full_search(state, game): @@ -105,11 +105,12 @@ def min_value(state, alpha, beta, depth): # Body of alphabeta_search starts here: # The default test cuts off at depth d or at a terminal state cutoff_test = (cutoff_test or - (lambda state, depth: depth > d or game.terminal_test(state))) + (lambda state, depth: depth > d or + game.terminal_test(state))) eval_fn = eval_fn or (lambda state: game.utility(state, player)) return max_value(state, -infinity, infinity, 0) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Players for Games @@ -141,7 +142,7 @@ def play_game(game, *players): if game.terminal_test(state): return game.utility(state, game.to_move(game.initial)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Some Sample Games diff --git a/learning.py b/learning.py index 5812753bc..b6741d1e8 100644 --- a/learning.py +++ b/learning.py @@ -1,6 +1,6 @@ """Learn to estimate functions from examples. (Chapters 18-20)""" -from utils import * +from utils import * # noqa import copy import heapq @@ -8,7 +8,7 @@ import random from collections import defaultdict -#______________________________________________________________________________ +# ______________________________________________________________________________ def rms_error(predictions, targets): @@ -26,29 +26,29 @@ def mean_error(predictions, targets): def mean_boolean_error(predictions, targets): return mean([(p != t) for p, t in zip(predictions, targets)]) -#______________________________________________________________________________ +# ______________________________________________________________________________ class DataSet: """A data set for a machine learning problem. It has the following fields: - d.examples A list of examples. Each one is a list of attribute values. - d.attrs A list of integers to index into an example, so example[attr] - gives a value. Normally the same as range(len(d.examples[0])). - d.attrnames Optional list of mnemonic names for corresponding attrs. - d.target The attribute that a learning algorithm will try to predict. - By default the final attribute. - d.inputs The list of attrs without the target. - d.values A list of lists: each sublist is the set of possible - values for the corresponding attribute. If initially None, - it is computed from the known examples by self.setproblem. - If not None, an erroneous value raises ValueError. - d.distance A function from a pair of examples to a nonnegative number. - Should be symmetric, etc. Defaults to mean_boolean_error - since that can handle any field types. - d.name Name of the data set (for output display only). - d.source URL or other source where the data came from. + d.examples A list of examples. Each one is a list of attribute values. + d.attrs A list of integers to index into an example, so example[attr] + gives a value. Normally the same as range(len(d.examples[0])). + d.attrnames Optional list of mnemonic names for corresponding attrs. + d.target The attribute that a learning algorithm will try to predict. + By default the final attribute. + d.inputs The list of attrs without the target. + d.values A list of lists: each sublist is the set of possible + values for the corresponding attribute. If initially None, + it is computed from the known examples by self.setproblem. + If not None, an erroneous value raises ValueError. + d.distance A function from a pair of examples to a nonnegative number. + Should be symmetric, etc. Defaults to mean_boolean_error + since that can handle any field types. + d.name Name of the data set (for output display only). + d.source URL or other source where the data came from. Normally, you call the constructor and you're done; then you just access fields like d.examples and d.target and d.inputs.""" @@ -68,7 +68,7 @@ def __init__(self, examples=None, attrs=None, attrnames=None, target=-1, if isinstance(examples, str): self.examples = parse_csv(examples) elif examples is None: - self.examples = parse_csv(DataFile(name+'.csv').read()) + self.examples = parse_csv(DataFile(name + '.csv').read()) else: self.examples = examples # Attrs are the indices of examples, unless otherwise stated. @@ -138,7 +138,7 @@ def __repr__(self): return '' % ( self.name, len(self.examples), len(self.attrs)) -#______________________________________________________________________________ +# ______________________________________________________________________________ def parse_csv(input, delim=','): @@ -152,7 +152,7 @@ def parse_csv(input, delim=','): lines = [line for line in input.splitlines() if line.strip()] return [list(map(num_or_str, line.split(delim))) for line in lines] -#______________________________________________________________________________ +# ______________________________________________________________________________ class CountingProbDist: @@ -196,7 +196,8 @@ def __getitem__(self, item): def top(self, n): "Return (count, obs) tuples for the n most frequent observations." - return heapq.nlargest(n, [(v, k) for (k, v) in list(self.dictionary.items())]) + return heapq.nlargest( + n, [(v, k) for (k, v) in list(self.dictionary.items())]) def sample(self): "Return a random sample from the distribution." @@ -205,7 +206,7 @@ def sample(self): list(self.dictionary.values())) return self.sampler() -#______________________________________________________________________________ +# ______________________________________________________________________________ def PluralityLearner(dataset): @@ -218,7 +219,7 @@ def predict(example): return most_popular return predict -#______________________________________________________________________________ +# ______________________________________________________________________________ def NaiveBayesLearner(dataset): @@ -241,14 +242,14 @@ def predict(example): """Predict the target value for example. Consider each possible value, and pick the most likely by looking at each attribute independently.""" def class_probability(targetval): - return (target_dist[targetval] - * product(attr_dists[targetval, attr][example[attr]] - for attr in dataset.inputs)) + return (target_dist[targetval] * + product(attr_dists[targetval, attr][example[attr]] + for attr in dataset.inputs)) return argmax(targetvals, class_probability) return predict -#______________________________________________________________________________ +# ______________________________________________________________________________ def NearestNeighborLearner(dataset, k=1): @@ -260,12 +261,12 @@ def predict(example): return mode(e[dataset.target] for (d, e) in best) return predict -#______________________________________________________________________________ +# ______________________________________________________________________________ class DecisionFork: - """A fork of a decision tree holds an attribute to test, and a dict + """A fork of a decision tree holds an attribute to test, and a dict of branches, one for each of the attribute's values.""" def __init__(self, attr, attrname=None, branches=None): @@ -286,8 +287,8 @@ def display(self, indent=0): name = self.attrname print('Test', name) for (val, subtree) in list(self.branches.items()): - print(' '*4*indent, name, '=', val, '==>', end=' ') - subtree.display(indent+1) + print(' ' * 4 * indent, name, '=', val, '==>', end=' ') + subtree.display(indent + 1) def __repr__(self): return ('DecisionFork(%r, %r, %r)' @@ -310,7 +311,7 @@ def display(self, indent=0): def __repr__(self): return repr(self.result) -#______________________________________________________________________________ +# ______________________________________________________________________________ def DecisionTreeLearner(dataset): @@ -377,7 +378,7 @@ def information_content(values): probabilities = normalize(removeall(0, values)) return sum(-p * log2(p) for p in probabilities) -#______________________________________________________________________________ +# ______________________________________________________________________________ # A decision list is implemented as a list of (test, value) pairs. @@ -411,14 +412,14 @@ def predict(example): return predict -#______________________________________________________________________________ +# ______________________________________________________________________________ def NeuralNetLearner(dataset, sizes): """Layered feed-forward network.""" - activations = [[0.0 for i in range(n)] for n in sizes] - weights = [] + activations = [[0.0 for i in range(n)] for n in sizes] # noqa + weights = [] # noqa def predict(example): unimplemented() @@ -438,13 +439,13 @@ def PerceptronLearner(dataset, sizes): def predict(example): return sum([]) unimplemented() -#______________________________________________________________________________ +# ______________________________________________________________________________ def Linearlearner(dataset): """Fit a linear model to the data.""" unimplemented() -#______________________________________________________________________________ +# ______________________________________________________________________________ def EnsembleLearner(learners): @@ -457,7 +458,7 @@ def predict(example): return predict return train -#______________________________________________________________________________ +# ______________________________________________________________________________ def AdaBoost(L, K): @@ -465,8 +466,8 @@ def AdaBoost(L, K): def train(dataset): examples, target = dataset.examples, dataset.target N = len(examples) - epsilon = 1./(2*N) - w = [1./N] * N + epsilon = 1. / (2 * N) + w = [1. / N] * N h, z = [], [] for k in range(K): h_k = L(dataset, w) @@ -474,7 +475,7 @@ def train(dataset): error = sum(weight for example, weight in zip(examples, w) if example[target] != h_k(example)) # Avoid divide-by-0 from either 0% or 100% error rates: - error = clip(error, epsilon, 1-epsilon) + error = clip(error, epsilon, 1 - epsilon) for j, example in enumerate(examples): if example[target] == h_k(example): w[j] *= error / (1. - error) @@ -501,7 +502,7 @@ def weighted_mode(values, weights): totals[v] += w return max(list(totals.keys()), key=totals.get) -#_____________________________________________________________________________ +# _____________________________________________________________________________ # Adapting an unweighted learner for AdaBoost @@ -529,15 +530,15 @@ def weighted_replicate(seq, weights, n): ['A', 'B', 'B', 'C']""" assert len(seq) == len(weights) weights = normalize(weights) - wholes = [int(w*n) for w in weights] - fractions = [(w*n) % 1 for w in weights] - return (flatten([x] * nx for x, nx in zip(seq, wholes)) - + weighted_sample_with_replacement(seq, fractions, n - sum(wholes))) + wholes = [int(w * n) for w in weights] + fractions = [(w * n) % 1 for w in weights] + return (flatten([x] * nx for x, nx in zip(seq, wholes)) + + weighted_sample_with_replacement(seq, fractions, n - sum(wholes))) def flatten(seqs): return sum(seqs, []) -#_____________________________________________________________________________ +# _____________________________________________________________________________ # Functions for testing learners on examples @@ -584,8 +585,9 @@ def cross_validation(learner, dataset, k=10, trials=1): else: n = len(dataset.examples) random.shuffle(dataset.examples) - return mean([train_and_test(learner, dataset, i*(n/k), (i+1)*(n/k)) - for i in range(k)]) + return mean( + [train_and_test(learner, dataset, i * (n / k), + (i + 1) * (n / k)) for i in range(k)]) def leave1out(learner, dataset): @@ -595,7 +597,7 @@ def leave1out(learner, dataset): def learningcurve(learner, dataset, trials=10, sizes=None): if sizes is None: - sizes = list(range(2, len(dataset.examples)-10, 2)) + sizes = list(range(2, len(dataset.examples) - 10, 2)) def score(learner, size): random.shuffle(dataset.examples) @@ -603,7 +605,7 @@ def score(learner, size): return [(size, mean([score(learner, size) for t in range(trials)])) for size in sizes] -#______________________________________________________________________________ +# ______________________________________________________________________________ # The rest of this file gives datasets for machine learning problems. orings = DataSet(name='orings', target='Distressed', @@ -619,15 +621,15 @@ def score(learner, size): iris = DataSet(name="iris", target="class", attrnames="sepal-len sepal-width petal-len petal-width class") -#______________________________________________________________________________ +# ______________________________________________________________________________ # The Restaurant example from Fig. 18.2 def RestaurantDataSet(examples=None): "Build a DataSet of Restaurant waiting examples. [Fig. 18.3]" return DataSet(name='restaurant', target='Wait', examples=examples, - attrnames='Alternate Bar Fri/Sat Hungry Patrons Price ' - + 'Raining Reservation Type WaitEstimate Wait') + attrnames='Alternate Bar Fri/Sat Hungry Patrons Price ' + + 'Raining Reservation Type WaitEstimate Wait') restaurant = RestaurantDataSet() @@ -646,14 +648,16 @@ def T(attrname, branches): T('Alternate', {'No': T('Reservation', {'Yes': 'Yes', 'No': T('Bar', {'No': 'No', - 'Yes': 'Yes'})}), + 'Yes': 'Yes' + })}), 'Yes': T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}), '10-30': T('Hungry', {'No': 'Yes', 'Yes': T('Alternate', {'No': 'Yes', 'Yes': - T('Raining', {'No': 'No', 'Yes': 'Yes'})})})})}) + T('Raining', {'No': 'No', 'Yes': 'Yes'}) + })})})}) __doc__ += """ [Fig. 18.6] @@ -683,7 +687,7 @@ def gen(): return example return RestaurantDataSet([gen() for i in range(n)]) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Artificial, generated datasets. @@ -693,7 +697,7 @@ def Majority(k, n): examples = [] for i in range(n): bits = [random.choice([0, 1]) for i in range(k)] - bits.append(int(sum(bits) > k/2)) + bits.append(int(sum(bits) > k / 2)) examples.append(bits) return DataSet(name="majority", examples=examples) @@ -722,7 +726,7 @@ def ContinuousXor(n): examples.append([x, y, int(x) != int(y)]) return DataSet(name="continuous xor", examples=examples) -#______________________________________________________________________________ +# ______________________________________________________________________________ def compare(algorithms=[PluralityLearner, NaiveBayesLearner, diff --git a/logic.py b/logic.py index 88b1ea36e..4ae6fbeae 100644 --- a/logic.py +++ b/logic.py @@ -24,14 +24,14 @@ diff, simp Symbolic differentiation and simplification """ -from utils import * +from utils import * # noqa import agents import itertools import re from collections import defaultdict -#______________________________________________________________________________ +# ______________________________________________________________________________ class KB: @@ -93,7 +93,7 @@ def retract(self, sentence): if c in self.clauses: self.clauses.remove(c) -#______________________________________________________________________________ +# ______________________________________________________________________________ def KB_AgentProgram(KB): @@ -118,7 +118,7 @@ def make_action_sentence(self, action, t): return program -#______________________________________________________________________________ +# ______________________________________________________________________________ class Expr: @@ -193,8 +193,9 @@ def __repr__(self): def __eq__(self, other): """x and y are equal iff their ops and args are equal.""" - return (other is self) or (isinstance(other, Expr) - and self.op == other.op and self.args == other.args) + return (other is self) or (isinstance(other, Expr) and + self.op == other.op and + self.args == other.args) def __ne__(self, other): return not self.__eq__(other) @@ -326,8 +327,8 @@ def is_definite_clause(s): return True elif s.op == '>>': antecedent, consequent = s.args - return (is_symbol(consequent.op) - and every(lambda arg: is_symbol(arg.op), conjuncts(antecedent))) + return (is_symbol(consequent.op) and + every(lambda arg: is_symbol(arg.op), conjuncts(antecedent))) else: return False @@ -345,7 +346,7 @@ def parse_definite_clause(s): TRUE, FALSE, ZERO, ONE, TWO = list(map(Expr, ['TRUE', 'FALSE', 0, 1, 2])) A, B, C, D, E, F, G, P, Q, x, y, z = list(map(Expr, 'ABCDEFGPQxyz')) -#______________________________________________________________________________ +# ______________________________________________________________________________ def tt_entails(kb, alpha): @@ -447,7 +448,7 @@ def pl_true(exp, model={}): else: raise ValueError("illegal operator in logic expression" + str(exp)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Convert to Conjunctive Normal Form (CNF) @@ -509,7 +510,7 @@ def move_not_inwards(s): ((A | ~B) & ~C) """ if s.op == '~': - NOT = lambda b: move_not_inwards(~b) + def NOT(b): move_not_inwards(~b) # noqa a = s.args[0] if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A @@ -605,7 +606,7 @@ def disjuncts(s): """ return dissociate('|', [s]) -#______________________________________________________________________________ +# ______________________________________________________________________________ def pl_resolution(KB, alpha): @@ -644,7 +645,7 @@ def pl_resolve(ci, cj): clauses.append(associate('|', dnew)) return clauses -#______________________________________________________________________________ +# ______________________________________________________________________________ class PropDefiniteKB(PropKB): @@ -701,7 +702,7 @@ def pl_fc_entails(KB, q): for s in "P>>Q (L&M)>>P (B&L)>>M (A&P)>>L (A&B)>>L A B".split(): Fig[7, 15].tell(expr(s)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # DPLL-Satisfiable [Fig. 7.17] @@ -726,9 +727,9 @@ def dpll(clauses, symbols, model): unknown_clauses = [] # clauses with an unknown truth value for c in clauses: val = pl_true(c, model) - if val == False: + if val is False: return False - if val != True: + if val is not True: unknown_clauses.append(c) if not unknown_clauses: return model @@ -812,7 +813,7 @@ def inspect_literal(literal): else: return literal, True -#______________________________________________________________________________ +# ______________________________________________________________________________ # Walk-SAT [Fig. 7.18] @@ -836,7 +837,7 @@ def WalkSAT(clauses, p=0.5, max_flips=10000): raise NotImplementedError model[sym] = not model[sym] -#______________________________________________________________________________ +# ______________________________________________________________________________ class HybridWumpusAgent(agents.Agent): @@ -850,7 +851,7 @@ def __init__(self): def plan_route(current, goals, allowed): unimplemented() -#______________________________________________________________________________ +# ______________________________________________________________________________ def SAT_plan(init, transition, goal, t_max, SAT_solver=dpll_satisfiable): @@ -870,7 +871,7 @@ def translate_to_SAT(init, transition, goal, t): def extract_solution(model): unimplemented() -#______________________________________________________________________________ +# ______________________________________________________________________________ def unify(x, y, s): @@ -962,7 +963,6 @@ def fol_fc_ask(KB, alpha): """Inefficient forward chaining for first-order logic. [Fig. 9.3] KB is a FolKB and alpha must be an atomic sentence.""" while True: - new = {} for r in KB.clauses: ps, q = parse_definite_clause(standardize_variables(r)) raise NotImplementedError @@ -995,7 +995,7 @@ def standardize_variables(sentence, dic=None): standardize_variables.counter = itertools.count() -#______________________________________________________________________________ +# ______________________________________________________________________________ class FolKB(KB): @@ -1036,9 +1036,9 @@ def test_ask(query, kb=None): q = expr(query) vars = variables(q) answers = fol_bc_ask(kb or test_kb, q) - return sorted([pretty(dict((x, v) for x, v in list(a.items()) if x in vars)) - for a in answers], - key=repr) + return sorted( + [pretty(dict((x, v) for x, v in list(a.items()) if x in vars)) + for a in answers], key=repr) test_kb = FolKB( list(map(expr, ['Farmer(Mac)', @@ -1051,14 +1051,14 @@ def test_ask(query, kb=None): '(Farmer(f)) ==> Human(f)', # Note that this order of conjuncts # would result in infinite recursion: - #'(Human(h) & Mother(m, h)) ==> Human(m)' + # '(Human(h) & Mother(m, h)) ==> Human(m)' '(Mother(m, h) & Human(h)) ==> Human(m)' ])) ) crime_kb = FolKB( list(map(expr, - ['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)', + ['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)', # noqa 'Owns(Nono, M1)', 'Missile(M1)', '(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)', @@ -1107,7 +1107,7 @@ def fol_bc_and(KB, goals, theta): for theta2 in fol_bc_and(KB, rest, theta1): yield theta2 -#______________________________________________________________________________ +# ______________________________________________________________________________ # Example application (not in the book). # You can use the Expr class to do symbolic differentiation. This used to be @@ -1141,8 +1141,8 @@ def diff(y, x): elif op == '**' and isnumber(x.op): return (v * u ** (v - 1) * diff(u, x)) elif op == '**': - return (v * u ** (v - 1) * diff(u, x) - + u ** v * Expr('log')(u) * diff(v, x)) + return (v * u ** (v - 1) * diff(u, x) + + u ** v * Expr('log')(u) * diff(v, x)) elif op == 'log': return diff(u, x) / u else: @@ -1215,7 +1215,7 @@ def d(y, x): "Differentiate and then simplify." return simp(diff(y, x)) -#_________________________________________________________________________ +# _________________________________________________________________________ # Utilities for doctest cases # These functions print their arguments in a standard order @@ -1269,7 +1269,7 @@ def ppdict(d): def ppset(s): print(pretty_set(s)) -#________________________________________________________________________ +# ________________________________________________________________________ class logicTest: @@ -1331,7 +1331,7 @@ class logicTest: False ### An earlier version of the code failed on this: ->>> dpll_satisfiable(A & ~B & C & (A | ~D) & (~E | ~D) & (C | ~D) & (~A | ~F) & (E | ~F) & (~D | ~F) & (B | ~C | D) & (A | ~E | F) & (~A | E | D)) +>>> dpll_satisfiable(A & ~B & C & (A | ~D) & (~E | ~D) & (C | ~D) & (~A | ~F) & (E | ~F) & (~D | ~F) & (B | ~C | D) & (A | ~E | F) & (~A | E | D)) # noqa {B: False, C: True, A: True, F: False, D: True, E: False} ### [Fig. 7.13] diff --git a/mdp.py b/mdp.py index 28ee611d9..dbb5e4d54 100644 --- a/mdp.py +++ b/mdp.py @@ -6,7 +6,7 @@ dictionary of {state:number} pairs. We then define the value_iteration and policy_iteration algorithms.""" -from utils import * +from utils import * # noqa class MDP: @@ -15,9 +15,9 @@ class MDP: and reward function. We also keep track of a gamma value, for use by algorithms. The transition model is represented somewhat differently from the text. Instead of P(s' | s, a) being a probability number for each - state/state/action triplet, we instead have T(s, a) return a list of (p, s') - pairs. We also keep track of the possible states, terminal states, and - actions for each state. [page 646]""" + state/state/action triplet, we instead have T(s, a) return a + list of (p, s') pairs. We also keep track of the possible states, + terminal states, and actions for each state. [page 646]""" def __init__(self, init, actlist, terminals, gamma=.9): self.init = init @@ -90,16 +90,17 @@ def to_grid(self, mapping): def to_arrows(self, policy): chars = { (1, 0): '>', (0, 1): '^', (-1, 0): '<', (0, -1): 'v', None: '.'} - return self.to_grid(dict([(s, chars[a]) for (s, a) in list(policy.items())])) + return self.to_grid( + dict([(s, chars[a]) for (s, a) in list(policy.items())])) -#______________________________________________________________________________ +# ______________________________________________________________________________ Fig[17, 1] = GridMDP([[-0.04, -0.04, -0.04, +1], [-0.04, None, -0.04, -1], [-0.04, -0.04, -0.04, -0.04]], terminals=[(3, 2), (3, 1)]) -#______________________________________________________________________________ +# ______________________________________________________________________________ def value_iteration(mdp, epsilon=0.001): @@ -131,7 +132,7 @@ def expected_utility(a, s, U, mdp): "The expected utility of doing a in state s, according to the MDP and U." return sum([p * U[s1] for (p, s1) in mdp.T(s, a)]) -#______________________________________________________________________________ +# ______________________________________________________________________________ def policy_iteration(mdp): @@ -180,12 +181,20 @@ def policy_evaluation(pi, U, mdp, k=20): __doc__ += """ Random tests: >>> pi -{(3, 2): None, (3, 1): None, (3, 0): (-1, 0), (2, 1): (0, 1), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (0, 1), (1, 2): (1, 0), (2, 0): (0, 1), (0, 1): (0, 1), (2, 2): (1, 0)} +{(3, 2): None, (3, 1): None, (3, 0): (-1, 0), (2, 1): (0, 1), (0, 2): (1, 0), + (1, 0): (1, 0), (0, 0): (0, 1), (1, 2): (1, 0), (2, 0): (0, 1), + (0, 1): (0, 1), (2, 2): (1, 0)} >>> value_iteration(Fig[17,1], .01) -{(3, 2): 1.0, (3, 1): -1.0, (3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462, (0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537, (0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676, (2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926, (2, 2): 0.79536093684710951} +{(3, 2): 1.0, (3, 1): -1.0, (3, 0): 0.12958868267972745, + (0, 1): 0.39810203830605462, (0, 2): 0.50928545646220924, + (1, 0): 0.25348746162470537, (0, 0): 0.29543540628363629, + (1, 2): 0.64958064617168676, (2, 0): 0.34461306281476806, + (2, 1): 0.48643676237737926, (2, 2): 0.79536093684710951} >>> policy_iteration(Fig[17,1]) -{(3, 2): None, (3, 1): None, (3, 0): (0, -1), (2, 1): (-1, 0), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), (0, 1): (1, 0), (2, 2): (1, 0)} +{(3, 2): None, (3, 1): None, (3, 0): (0, -1), (2, 1): (-1, 0), (0, 2): (1, 0), + (1, 0): (1, 0), (0, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), + (0, 1): (1, 0), (2, 2): (1, 0)} """ diff --git a/nlp.py b/nlp.py index 2077aec74..ae418757e 100644 --- a/nlp.py +++ b/nlp.py @@ -3,11 +3,11 @@ # (Written for the second edition of AIMA; expect some discrepanciecs # from the third edition until this gets reviewed.) -from utils import * +from utils import * # noqa from collections import defaultdict -#______________________________________________________________________________ +# ______________________________________________________________________________ # Grammars and Lexicons @@ -55,16 +55,16 @@ def __repr__(self): E0 = Grammar('E0', Rules( # Grammar for E_0 [Fig. 22.4] S='NP VP | S Conjunction S', - NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause', + NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause', # noqa VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb', PP='Preposition NP', RelClause='That VP'), Lexicon( # Lexicon for E_0 [Fig. 22.3] - Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east", - Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", + Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east", # noqa + Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa Adjective="right | left | east | south | back | smelly", - Adverb="here | there | nearby | ahead | right | left | east | south | back", + Adverb="here | there | nearby | ahead | right | left | east | south | back", # noqa Pronoun="me | you | I | it", Name="John | Mary | Boston | Aristotle", Article="the | a | an", @@ -110,7 +110,7 @@ def rewrite(tokens, into): return ' '.join(rewrite(s.split(), [])) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Chart Parsing @@ -132,7 +132,8 @@ def parses(self, words, S='S'): """Return a list of parses; words can be a list or string. >>> chart = Chart(E_NP_) >>> chart.parses('happy man', 'NP') - [[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]] + [[0, 2, 'NP', [('Adj', 'happy'), + [1, 2, 'NP', [('N', 'man')], []]], []]] """ if isinstance(words, str): words = words.split() @@ -166,7 +167,7 @@ def add_edge(self, edge): self.predictor(edge) def scanner(self, j, word): - "For each edge expecting a word of this category here, extend the edge." + "For each edge expecting a word of this category here, extend the edge." # noqa for (i, j, A, alpha, Bb) in self.chart[j]: if Bb and self.grammar.isa(word, Bb[0]): self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]]) @@ -195,9 +196,15 @@ def extender(self, edge): >>> chart = Chart(E0) >>> chart.parses('the wumpus that is smelly is near 2 2') -[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]] - -### There is a built-in trace facility (compare [Fig. 22.9]) +[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', + [('Article', 'the'), ('Noun', 'wumpus')], []], + [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', + [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], + []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], + [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), + ('Digit', '2')], []]], []]], []]], []]] + +### There is a built-in trace facility (compare [Fig. 22.9]) # noqa >>> Chart(E_, trace=True).parses('I feel it') parse: added [0, 0, 'S_', [], ['S']] predictor: added [0, 0, 'S', [], ['NP', 'VP']] diff --git a/planning.py b/planning.py index 89ef53fc6..c939b9808 100644 --- a/planning.py +++ b/planning.py @@ -1,6 +1,8 @@ """Planning (Chapters 10-11) """ +# flake8: noqa + from utils import * import agents diff --git a/probability.py b/probability.py index c5bd76313..a582d128d 100644 --- a/probability.py +++ b/probability.py @@ -1,14 +1,14 @@ """Probability models. (Chapter 13-15) """ -from utils import * +from utils import * # noqa from logic import extend import random from collections import defaultdict from functools import reduce -#______________________________________________________________________________ +# ______________________________________________________________________________ def DTAgentProgram(belief_state): @@ -21,7 +21,7 @@ def program(percept): program.action = None return program -#______________________________________________________________________________ +# ______________________________________________________________________________ class ProbDist: @@ -129,7 +129,7 @@ def event_values(event, vars): else: return tuple([event[var] for var in vars]) -#______________________________________________________________________________ +# ______________________________________________________________________________ def enumerate_joint_ask(X, e, P): @@ -157,7 +157,7 @@ def enumerate_joint(vars, e, P): return sum([enumerate_joint(rest, extend(e, Y, y), P) for y in P.values(Y)]) -#______________________________________________________________________________ +# ______________________________________________________________________________ class BayesNet: @@ -281,7 +281,7 @@ def __repr__(self): ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ]) -#______________________________________________________________________________ +# ______________________________________________________________________________ def enumeration_ask(X, e, bn): @@ -312,7 +312,7 @@ def enumerate_all(vars, e, bn): return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn) for y in bn.variable_values(Y)) -#______________________________________________________________________________ +# ______________________________________________________________________________ def elimination_ask(X, e, bn): @@ -402,7 +402,7 @@ def all_events(vars, bn, e): for x in bn.variable_values(X): yield extend(e1, X, x) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Fig. 14.12a: sprinkler network @@ -413,7 +413,7 @@ def all_events(vars, bn, e): ('WetGrass', 'Sprinkler Rain', {(T, T): 0.99, (T, F): 0.90, (F, T): 0.90, (F, F): 0.00})]) -#______________________________________________________________________________ +# ______________________________________________________________________________ def prior_sample(bn): @@ -424,7 +424,7 @@ def prior_sample(bn): event[node.variable] = node.sample(event) return event -#_________________________________________________________________________ +# _________________________________________________________________________ def rejection_sampling(X, e, bn, N): @@ -451,7 +451,7 @@ def consistent_with(event, evidence): return all(evidence.get(k, v) == v for k, v in list(event.items())) -#_________________________________________________________________________ +# _________________________________________________________________________ def likelihood_weighting(X, e, bn, N): @@ -483,7 +483,7 @@ def weighted_sample(bn, e): event[Xi] = node.sample(event) return event, w -#_________________________________________________________________________ +# _________________________________________________________________________ def gibbs_ask(X, e, bn, N): @@ -522,7 +522,7 @@ def markov_blanket_sample(X, e, bn): # (assuming a Boolean variable here) return probability(Q.normalize()[True]) -#_________________________________________________________________________ +# _________________________________________________________________________ def forward_backward(ev, prior): @@ -539,7 +539,7 @@ def particle_filtering(e, N, dbn): """[Fig. 15.17]""" unimplemented() -#_________________________________________________________________________ +# _________________________________________________________________________ __doc__ += """ # We can build up a probability distribution like this (p. 469): >>> P = ProbDist() @@ -553,7 +553,7 @@ def particle_filtering(e, N, dbn): >>> P['rain'] #doctest:+ELLIPSIS 0.2... -# A Joint Probability Distribution is dealt with like this (Fig. 13.3): +# A Joint Probability Distribution is dealt with like this (Fig. 13.3): # noqa >>> P = JointProbDist(['Toothache', 'Cavity', 'Catch']) >>> T, F = True, False >>> P[T, T, T] = 0.108; P[T, T, F] = 0.012; P[F, T, T] = 0.072; P[F, T, F] = 0.008 diff --git a/rl.py b/rl.py index f30e542ba..d74062be4 100644 --- a/rl.py +++ b/rl.py @@ -1,7 +1,7 @@ """Reinforcement Learning (Chapter 21) """ -from utils import * +from utils import * # noqa import agents diff --git a/search.py b/search.py index 40816722b..66e2fdbba 100644 --- a/search.py +++ b/search.py @@ -4,16 +4,14 @@ then create problem instances and solve them with calls to the various search functions.""" -from utils import * +from utils import * # noqa import math import random import sys -import time import bisect -import string -#______________________________________________________________________________ +# ______________________________________________________________________________ class Problem(object): @@ -61,7 +59,7 @@ def value(self, state): """For optimization problems, each state has a value. Hill-climbing and related algorithms try to maximize this value.""" raise NotImplementedError -#______________________________________________________________________________ +# ______________________________________________________________________________ class Node: @@ -94,7 +92,8 @@ def child_node(self, problem, action): "Fig. 3.10" next = problem.result(self.state, action) return Node(next, self, action, - problem.path_cost(self.path_cost, self.state, action, next)) + problem.path_cost(self.path_cost, self.state, + action, next)) def solution(self): "Return the sequence of actions to go from the root to this node." @@ -119,7 +118,7 @@ def __eq__(self, other): def __hash__(self): return hash(self.state) -#______________________________________________________________________________ +# ______________________________________________________________________________ class SimpleProblemSolvingAgentProgram: @@ -151,7 +150,7 @@ def formulate_problem(self, state, goal): def search(self, problem): raise NotImplementedError -#______________________________________________________________________________ +# ______________________________________________________________________________ # Uninformed Search algorithms @@ -180,8 +179,8 @@ def graph_search(problem, frontier): return node explored.add(node.state) frontier.extend(child for child in node.expand(problem) - if child.state not in explored - and child not in frontier) + if child.state not in explored and + child not in frontier) return None @@ -283,7 +282,7 @@ def iterative_deepening_search(problem): if result != 'cutoff': return result -#______________________________________________________________________________ +# ______________________________________________________________________________ # Informed (Heuristic) Search greedy_best_first_graph_search = best_first_graph_search @@ -297,7 +296,7 @@ def astar_search(problem, h=None): h = memoize(h or problem.h, 'h') return best_first_graph_search(problem, lambda n: n.path_cost + h(n)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Other search algorithms @@ -373,32 +372,35 @@ def simulated_annealing(problem, schedule=exp_schedule()): def and_or_graph_search(problem): """Used when the environment is nondeterministic and completely observable Contains OR nodes where the agent is free to choose any action - After every action there is an AND node which contains all possible states the agent may reach due to stochastic nature of environment - The agent must be able to handle all possible states of the AND node(as it may end up in any of them) - returns a conditional plan to reach goal state, or failure if the former is not possible""" + After every action there is an AND node which contains all possible states + the agent may reach due to stochastic nature of environment + The agent must be able to handle all possible states of the AND node(as it + may end up in any of them) returns a conditional plan to reach goal state, + or failure if the former is not possible""" "[Fig. 4.11]" - #functions used by and_or_search + # functions used by and_or_search def or_search(state, problem, path): if problem.goal_test(state): return {} if state in path: return None for action in problem.action(state): - plan = and_search(problem.result(state, action), problem, path + [state,]) - if not plan == None: + plan = and_search(problem.result(state, action), + problem, path + [state, ]) + if plan is not None: return [action, plan] def and_search(states, problem, path): - "returns plan in form of dictionary where we take action plan[s] if we reach state s" - plan=dict() + "returns plan in form of dictionary where we take action plan[s] if we reach state s" # noqa + plan = dict() for s in states: plan[s] = or_search(s, problem, path) - if plan[s] == None: + if plan[s] is None: return None return plan - #body of and or search + # body of and or search return or_search(problem.initial, problem, []) @@ -426,7 +428,8 @@ def run(self, percept): self.a = None else: if current_state not in self.untried.keys(): - self.untried[current_state] = self.problem.actions(current_state) + self.untried[current_state] = self.problem.actions( + current_state) if self.s is not None: if current_state != self.result[(self.s, self.a)]: self.result[(self.s, self.a)] = current_state @@ -435,10 +438,10 @@ def run(self, percept): if len(self.unbacktracked[current_state]) == 0: self.a = None else: - # else a <- an action b such that result[s', b] = POP(unbacktracked[s']) - unbacktracked_pop = self.unbacktracked[current_state].pop(0) - for (s,b) in self.result.keys(): - if self.result[(s,b)] == unbacktracked_pop: + # else a <- an action b such that result[s', b] = POP(unbacktracked[s']) # noqa + unbacktracked_pop = self.unbacktracked[current_state].pop(0) # noqa + for (s, b) in self.result.keys(): + if self.result[(s, b)] == unbacktracked_pop: self.a = b break else: @@ -451,7 +454,7 @@ def lrta_star_agent(s1): "[Fig. 4.24]" unimplemented() -#______________________________________________________________________________ +# ______________________________________________________________________________ # Genetic Algorithm @@ -496,10 +499,10 @@ def mutate(self): "Change a few of my genes." raise NotImplementedError -#_____________________________________________________________________________ +# _____________________________________________________________________________ # The remainder of this file implements examples for the search algorithms. -#______________________________________________________________________________ +# ______________________________________________________________________________ # Graphs and Graph Problems @@ -589,7 +592,7 @@ def distance_to_node(n): g.connect(node, neighbor, int(d)) return g -#Simplified road map of Romania +# Simplified road map of Romania Fig[3, 2] = UndirectedGraph(dict( Arad=dict(Zerind=75, Sibiu=140, Timisoara=118), Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211), @@ -605,20 +608,24 @@ def distance_to_node(n): Rimnicu=dict(Sibiu=80), Urziceni=dict(Vaslui=142))) Fig[3, 2].locations = dict( - Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288), Drobeta=(165, 299), - Eforie=(562, 293), Fagaras=(305, 449), Giurgiu=(375, 270), Hirsova=(534, 350), - Iasi=(473, 506), Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537), - Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410), Sibiu=(207, 457), - Timisoara=(94, 410), Urziceni=(456, 350), Vaslui=(509, 444), Zerind=(108, 531)) - -#Principal states and territories of Australia + Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288), + Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449), + Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506), + Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537), + Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410), + Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350), + Vaslui=(509, 444), Zerind=(108, 531)) + +# Principal states and territories of Australia Fig[6, 1] = UndirectedGraph(dict( T=dict(), SA=dict(WA=1, NT=1, Q=1, NSW=1, V=1), NT=dict(WA=1, Q=1), NSW=dict(Q=1, V=1))) + Fig[6, 1].locations = dict(WA=(120, 24), NT=(135, 20), SA=(135, 30), - Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37)) + Q=(145, 20), NSW=(145, 32), T=(145, 42), + V=(145, 37)) class GraphProblem(Problem): @@ -648,7 +655,7 @@ def h(self, node): else: return infinity -#______________________________________________________________________________ +# ______________________________________________________________________________ class NQueensProblem(Problem): @@ -689,10 +696,10 @@ def conflicted(self, state, row, col): def conflict(self, row1, col1, row2, col2): "Would putting two queens in (row1, col1) and (row2, col2) conflict?" - return (row1 == row2 # same row - or col1 == col2 # same column - or row1-col1 == row2-col2 # same \ diagonal - or row1+col1 == row2+col2) # same / diagonal + return (row1 == row2 or # same row + col1 == col2 or # same column + row1-col1 == row2-col2 or # same \ diagonal + row1+col1 == row2+col2) # same / diagonal def goal_test(self, state): "Check if all columns filled, no conflicts." @@ -701,7 +708,7 @@ def goal_test(self, state): return not any(self.conflicted(state, state[col], col) for col in range(len(state))) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Inverse Boggle: Search for a high-scoring Boggle board. A good domain for # iterative-repair and related search techniques, as suggested by Justin Boyan. @@ -780,7 +787,7 @@ def exact_sqrt(n2): assert n * n == n2 return n -#_____________________________________________________________________________ +# _____________________________________________________________________________ class Wordlist: @@ -819,7 +826,7 @@ def __contains__(self, word): def __len__(self): return len(self.words) -#_____________________________________________________________________________ +# _____________________________________________________________________________ class BoggleFinder: @@ -880,7 +887,7 @@ def __len__(self): "The number of words found." return len(self.found) -#_____________________________________________________________________________ +# _____________________________________________________________________________ def boggle_hill_climbing(board=None, ntimes=100, verbose=True): @@ -911,7 +918,7 @@ def mutate_boggle(board): board[i] = random.choice(random.choice(cubes16)) return i, oldc -#______________________________________________________________________________ +# ______________________________________________________________________________ # Code to compare searchers on various problems. @@ -956,7 +963,8 @@ def __repr__(self): def compare_searchers(problems, header, searchers=[breadth_first_tree_search, - breadth_first_search, depth_first_graph_search, + breadth_first_search, + depth_first_graph_search, iterative_deepening_search, depth_limited_search, recursive_best_first_search]): @@ -977,13 +985,14 @@ def compare_graph_searchers(): depth_first_graph_search < 8/ 9/ 20/B> < 16/ 17/ 38/N> < 4/ 5/ 11/WA> iterative_deepening_search < 11/ 33/ 31/B> < 656/1815/1812/N> < 3/ 11/ 11/WA> depth_limited_search < 54/ 65/ 185/B> < 387/1012/1125/N> < 50/ 54/ 200/WA> -recursive_best_first_search < 5/ 6/ 15/B> <5887/5888/16532/N> < 11/ 12/ 43/WA>""" +recursive_best_first_search < 5/ 6/ 15/B> <5887/5888/16532/N> < 11/12/ 43/WA>""" # noqa compare_searchers(problems=[GraphProblem('Arad', 'Bucharest', Fig[3, 2]), GraphProblem('Oradea', 'Neamt', Fig[3, 2]), GraphProblem('Q', 'WA', Fig[6, 1])], - header=['Searcher', 'Fig[3, 2](Arad, Bucharest)', 'Fig[3, 2](Oradea, Neamt)', 'Fig[6, 1]']) + header=['Searcher', 'Fig[3, 2](Arad, Bucharest)', + 'Fig[3, 2](Oradea, Neamt)', 'Fig[6, 1]']) -#______________________________________________________________________________ +# ______________________________________________________________________________ __doc__ += """ >>> romania = GraphProblem('Arad', 'Bucharest', Fig[3, 2]) @@ -1006,9 +1015,9 @@ def compare_graph_searchers(): >>> board = list('SARTELNID') >>> print_boggle(board) -S A R -T E L -N I D +S A R +T E L +N I D >>> f = BoggleFinder(board) >>> len(f) 206 @@ -1017,7 +1026,20 @@ def compare_graph_searchers(): __doc__ += """ Random tests >>> ' '.join(f.words()) -'LID LARES DEAL LIE DIETS LIN LINT TIL TIN RATED ERAS LATEN DEAR TIE LINE INTER STEAL LATED LAST TAR SAL DITES RALES SAE RETS TAE RAT RAS SAT IDLE TILDES LEAST IDEAS LITE SATED TINED LEST LIT RASE RENTS TINEA EDIT EDITS NITES ALES LATE LETS RELIT TINES LEI LAT ELINT LATI SENT TARED DINE STAR SEAR NEST LITAS TIED SEAT SERAL RATE DINT DEL DEN SEAL TIER TIES NET SALINE DILATE EAST TIDES LINTER NEAR LITS ELINTS DENI RASED SERA TILE NEAT DERAT IDLEST NIDE LIEN STARED LIER LIES SETA NITS TINE DITAS ALINE SATIN TAS ASTER LEAS TSAR LAR NITE RALE LAS REAL NITER ATE RES RATEL IDEA RET IDEAL REI RATS STALE DENT RED IDES ALIEN SET TEL SER TEN TEA TED SALE TALE STILE ARES SEA TILDE SEN SEL ALINES SEI LASE DINES ILEA LINES ELD TIDE RENT DIEL STELA TAEL STALED EARL LEA TILES TILER LED ETA TALI ALE LASED TELA LET IDLER REIN ALIT ITS NIDES DIN DIE DENTS STIED LINER LASTED RATINE ERA IDLES DIT RENTAL DINER SENTI TINEAL DEIL TEAR LITER LINTS TEAL DIES EAR EAT ARLES SATE STARE DITS DELI DENTAL REST DITE DENTIL DINTS DITA DIET LENT NETS NIL NIT SETAL LATS TARE ARE SATI' +'LID LARES DEAL LIE DIETS LIN LINT TIL TIN RATED ERAS LATEN DEAR TIE LINE INTER +STEAL LATED LAST TAR SAL DITES RALES SAE RETS TAE RAT RAS SAT IDLE TILDES LEAST +IDEAS LITE SATED TINED LEST LIT RASE RENTS TINEA EDIT EDITS NITES ALES LATE +LETS RELIT TINES LEI LAT ELINT LATI SENT TARED DINE STAR SEAR NEST LITAS TIED +SEAT SERAL RATE DINT DEL DEN SEAL TIER TIES NET SALINE DILATE EAST TIDES LINTER +NEAR LITS ELINTS DENI RASED SERA TILE NEAT DERAT IDLEST NIDE LIEN STARED LIER +LIES SETA NITS TINE DITAS ALINE SATIN TAS ASTER LEAS TSAR LAR NITE RALE LAS +REAL NITER ATE RES RATEL IDEA RET IDEAL REI RATS STALE DENT RED IDES ALIEN SET +TEL SER TEN TEA TED SALE TALE STILE ARES SEA TILDE SEN SEL ALINES SEI LASE +DINES ILEA LINES ELD TIDE RENT DIEL STELA TAEL STALED EARL LEA TILES TILER LED +ETA TALI ALE LASED TELA LET IDLER REIN ALIT ITS NIDES DIN DIE DENTS STIED LINER +LASTED RATINE ERA IDLES DIT RENTAL DINER SENTI TINEAL DEIL TEAR LITER LINTS +TEAL DIES EAR EAT ARLES SATE STARE DITS DELI DENTAL REST DITE DENTIL DINTS DITA +DIET LENT NETS NIL NIT SETAL LATS TARE ARE SATI' >>> boggle_hill_climbing(list('ABCDEFGHI'), verbose=False) (['E', 'P', 'R', 'D', 'O', 'A', 'G', 'S', 'T'], 123) diff --git a/tests/test_grid.py b/tests/test_grid.py index b170a6321..d160ca6e9 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -1,7 +1,9 @@ import pytest -from grid import * +from grid import * # noqa -compare_list = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)]) + +def compare_list(x, y): + return all([elm_x == y[i] for i, elm_x in enumerate(x)]) def test_distance(): diff --git a/tests/test_probability.py b/tests/test_probability.py index 903d511bf..40bdca660 100644 --- a/tests/test_probability.py +++ b/tests/test_probability.py @@ -1,10 +1,9 @@ import pytest -from probability import * +from probability import * # noqa def tests(): cpt = burglary.variable_node('Alarm') - parents = ['Burglary', 'Earthquake'] event = {'Burglary': True, 'Earthquake': True} assert cpt.p(True, event) == 0.95 event = {'Burglary': False, 'Earthquake': True} @@ -32,21 +31,25 @@ def tests(): p = likelihood_weighting('Earthquake', {}, burglary, 1000) assert p[True], p[False] == (0.002, 0.998) + def test_probdist_basic(): P = ProbDist('Flip') - P['H'], P['T'] = 0.25, 0.75; + P['H'], P['T'] = 0.25, 0.75 assert P['H'] == 0.25 + def test_probdist_frequency(): P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500}) assert (P['lo'], P['med'], P['hi']) == (0.125, 0.375, 0.5) + def test_probdist_normalize(): P = ProbDist('Flip') P['H'], P['T'] = 35, 65 P = P.normalize() assert (P.prob['H'], P.prob['T']) == (0.350, 0.650) + def test_jointprob(): P = JointProbDist(['X', 'Y']) P[1, 1] = 0.25 @@ -54,39 +57,50 @@ def test_jointprob(): P[dict(X=0, Y=1)] = 0.5 assert P[dict(X=0, Y=1)] == 0.5 + def test_event_values(): - assert event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A']) == (8, 10) - assert event_values ((1, 2), ['C', 'A']) == (1, 2) + assert event_values({'A': 10, 'B': 9, 'C': 8}, ['C', 'A']) == (8, 10) + assert event_values((1, 2), ['C', 'A']) == (1, 2) + def test_enumerate_joint_ask(): P = JointProbDist(['X', 'Y']) - P[0,0] = 0.25 - P[0,1] = 0.5 - P[1,1] = P[2,1] = 0.125 - assert enumerate_joint_ask('X', dict(Y=1), - P).show_approx() == '0: 0.667, 1: 0.167, 2: 0.167' + P[0, 0] = 0.25 + P[0, 1] = 0.5 + P[1, 1] = P[2, 1] = 0.125 + assert enumerate_joint_ask( + 'X', dict(Y=1), P).show_approx() == '0: 0.667, 1: 0.167, 2: 0.167' + def test_bayesnode_p(): bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) assert bn.p(False, {'Burglary': False, 'Earthquake': True}) == 0.375 + def test_enumeration_ask(): - assert enumeration_ask('Burglary', - dict(JohnCalls=T, MaryCalls=T), burglary).show_approx() == 'False: 0.716, True: 0.284' + assert enumeration_ask( + 'Burglary', dict(JohnCalls=T, MaryCalls=T), + burglary).show_approx() == 'False: 0.716, True: 0.284' + def test_elemination_ask(): - elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), - burglary).show_approx() == 'False: 0.716, True: 0.284' + elimination_ask( + 'Burglary', dict(JohnCalls=T, MaryCalls=T), + burglary).show_approx() == 'False: 0.716, True: 0.284' + def test_rejection_sampling(): random.seed(47) - rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), - burglary, 10000).show_approx() == 'False: 0.7, True: 0.3' + rejection_sampling( + 'Burglary', dict(JohnCalls=T, MaryCalls=T), + burglary, 10000).show_approx() == 'False: 0.7, True: 0.3' + def test_likelihood_weighting(): random.seed(1017) - assert likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), - burglary, 10000).show_approx() == 'False: 0.702, True: 0.298' + assert likelihood_weighting( + 'Burglary', dict(JohnCalls=T, MaryCalls=T), + burglary, 10000).show_approx() == 'False: 0.702, True: 0.298' if __name__ == '__main__': pytest.main() diff --git a/tests/test_search.py b/tests/test_search.py index 8dce793ea..4c0eb9ed3 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -1,24 +1,35 @@ import pytest -from search import * +from search import * # noqa romania = GraphProblem('Arad', 'Bucharest', Fig[3, 2]) + def test_breadth_first_tree_search(): - assert breadth_first_tree_search(romania).solution() == ['Sibiu', 'Fagaras', 'Bucharest'] + assert breadth_first_tree_search(romania).solution() == ['Sibiu', + 'Fagaras', + 'Bucharest'] + def test_breadth_first_search(): - assert breadth_first_search(romania).solution() == ['Sibiu', 'Fagaras', 'Bucharest'] + assert breadth_first_search(romania).solution() == ['Sibiu', 'Fagaras', + 'Bucharest'] + def test_uniform_cost_search(): - assert uniform_cost_search(romania).solution() == ['Sibiu', 'Rimnicu', 'Pitesti', 'Bucharest'] + assert uniform_cost_search(romania).solution() == ['Sibiu', 'Rimnicu', + 'Pitesti', 'Bucharest'] + def test_depth_first_graph_search(): solution = depth_first_graph_search(romania).solution() assert solution[-1] == 'Bucharest' + def test_iterative_deepening_search(): - assert iterative_deepening_search(romania).solution() == ['Sibiu', 'Fagaras', 'Bucharest'] + assert iterative_deepening_search(romania).solution() == ['Sibiu', + 'Fagaras', + 'Bucharest'] if __name__ == '__main__': pytest.main() diff --git a/tests/test_text.py b/tests/test_text.py index 0b01545b5..391a381e0 100644 --- a/tests/test_text.py +++ b/tests/test_text.py @@ -1,8 +1,7 @@ import pytest -from text import * +from text import * # noqa -from random import choice from math import isclose @@ -58,21 +57,25 @@ def test_ngram_models(): P3 = NgramTextModel(3, wordseq) # The most frequent entries in each model - assert P1.top(10) == [(2081, 'the'), (1479, 'of'), (1021, 'and'), (1008, 'to'), (850, 'a'), - (722, 'i'), (640, 'in'), (478, 'that'), (399, 'is'), (348, 'you')] - - assert P2.top(10) == [(368, ('of', 'the')), (152, ('to', 'the')), (152, ('in', 'the')), (86, ('of', 'a')), - (80, ('it', 'is')), (71, - ('by', 'the')), (68, ('for', 'the')), - (68, ('and', 'the')), (62, ('on', 'the')), (60, ('to', 'be'))] - - assert P3.top(10) == [(30, ('a', 'straight', 'line')), (19, ('of', 'three', 'dimensions')), - (16, ('the', 'sense', 'of')), (13, - ('by', 'the', 'sense')), - (13, ('as', 'well', 'as')), (12, - ('of', 'the', 'circles')), - (12, ('of', 'sight', 'recognition') - ), (11, ('the', 'number', 'of')), + assert P1.top(10) == [(2081, 'the'), (1479, 'of'), (1021, 'and'), + (1008, 'to'), (850, 'a'), (722, 'i'), (640, 'in'), + (478, 'that'), (399, 'is'), (348, 'you')] + + assert P2.top(10) == [(368, ('of', 'the')), (152, ('to', 'the')), + (152, ('in', 'the')), (86, ('of', 'a')), + (80, ('it', 'is')), + (71, ('by', 'the')), (68, ('for', 'the')), + (68, ('and', 'the')), (62, ('on', 'the')), + (60, ('to', 'be'))] + + assert P3.top(10) == [(30, ('a', 'straight', 'line')), + (19, ('of', 'three', 'dimensions')), + (16, ('the', 'sense', 'of')), + (13, ('by', 'the', 'sense')), + (13, ('as', 'well', 'as')), + (12, ('of', 'the', 'circles')), + (12, ('of', 'sight', 'recognition')), + (11, ('the', 'number', 'of')), (11, ('that', 'i', 'had')), (11, ('so', 'as', 'to'))] assert isclose(P1['the'], 0.0611, rel_tol=0.001) diff --git a/tests/test_utils.py b/tests/test_utils.py index cddfff4d8..6fa9ba5f4 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,5 @@ import pytest -from utils import * +from utils import * # noqa def test_struct_initialization(): @@ -47,15 +47,14 @@ def test_product(): def test_find_if(): - assert find_if(callable, [1, 2, 3]) == None + assert find_if(callable, [1, 2, 3]) is None assert find_if(callable, [3, min, max]) == min def test_count_if(): assert count_if(callable, [42, None, max, min]) == 2 - is_odd = lambda x: x % 2 - assert count_if(is_odd, []) == 0 - assert count_if(is_odd, [1, 2, 3, 4, 5]) == 3 + assert count_if(lambda x: x, []) == 0 + assert count_if(lambda x: x % 2, [1, 2, 3, 4, 5]) == 3 def test_every(): @@ -70,8 +69,8 @@ def test_some(): def test_is_in(): e = [] - assert is_in(e, [1, e, 3]) == True - assert is_in(e, [1, [], 3]) == False + assert is_in(e, [1, e, 3]) is True + assert is_in(e, [1, [], 3]) is False def test_argmin(): @@ -102,9 +101,15 @@ def test_argmax_gen(): def test_histogram(): - assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1]) == [(1, 2), (2, 3), (4, 2), (5, 1), (7, 1), (9, 1)] - assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 0, lambda x: x*x) == [(1, 2), (4, 3), (16, 2), (25, 1), (49, 1), (81, 1)] - assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 1) == [(2, 3), (4, 2), (1, 2), (9, 1), (7, 1), (5, 1)] + assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1]) == [(1, 2), (2, 3), + (4, 2), (5, 1), + (7, 1), (9, 1)] + assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 0, + lambda x: x*x) == [(1, 2), (4, 3), (16, 2), (25, 1), + (49, 1), (81, 1)] + assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 1) == [(2, 3), (4, 2), + (1, 2), (9, 1), + (7, 1), (5, 1)] def test_dotproduct(): diff --git a/text.py b/text.py index 7559474a2..d4e48aa65 100644 --- a/text.py +++ b/text.py @@ -4,7 +4,7 @@ Then we show a very simple Information Retrieval system, and an example working on a tiny sample of Unix manual pages.""" -from utils import * +from utils import * # noqa from learning import CountingProbDist import search @@ -13,7 +13,6 @@ import re - class UnigramTextModel(CountingProbDist): """This is a discrete probability distribution over words, so you @@ -71,7 +70,7 @@ def samples(self, nwords): nminus1gram = nminus1gram[1:] + (wn,) return ' '.join(output) -#______________________________________________________________________________ +# ______________________________________________________________________________ def viterbi_segment(text, P): @@ -99,7 +98,7 @@ def viterbi_segment(text, P): return sequence, best[-1] -#______________________________________________________________________________ +# ______________________________________________________________________________ # TODO(tmrts): Expose raw index @@ -124,7 +123,8 @@ def index_collection(self, filenames): "Index a whole collection of files." prefix = os.path.dirname(__file__) for filename in filenames: - self.index_document(open(filename).read(), os.path.relpath(filename, prefix)) + self.index_document(open(filename).read(), + os.path.relpath(filename, prefix)) def index_document(self, text, url): "Index the text of a document." @@ -155,15 +155,16 @@ def query(self, query_text, n=10): def score(self, word, docid): "Compute a score for this word on this docid." # There are many options; here we take a very simple approach - return (math.log(1 + self.index[word][docid]) - / math.log(1 + self.documents[docid].nwords)) + return (math.log(1 + self.index[word][docid]) / + math.log(1 + self.documents[docid].nwords)) def present(self, results): "Present the results as a list." for (score, d) in results: doc = self.documents[d] print( - ("{:5.2}|{:25} | {}".format(100 * score, doc.url, doc.title[:45].expandtabs()))) + ("{:5.2}|{:25} | {}".format(100 * score, doc.url, + doc.title[:45].expandtabs()))) def present_results(self, query_text, n=10): "Get results for the query and present them." @@ -209,7 +210,7 @@ def canonicalize(text): return ' '.join(words(text)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Example application (not in book): decode a cipher. # A cipher is a code that substitutes one character for another. @@ -358,19 +359,19 @@ def actions(self, state): # Find the best p, plainchar = max([(self.decoder.P1[c], c) for c in alphabet if c not in state]) - succs = [extend(state, plainchar, cipherchar)] # ???? + succs = [extend(state, plainchar, cipherchar)] # ???? # noqa def goal_test(self, state): "We're done when we get all 26 letters assigned." return len(state) >= 26 -#______________________________________________________________________________ +# ______________________________________________________________________________ # TODO(tmrts): Set RNG seed to test random functions __doc__ += """ Random tests: -## Generate random text from the N-gram models +## Generate random text from the N-gram models # noqa >>> P1.samples(20) 'you thought known but were insides of see in depend by us dodecahedrons just but i words are instead degrees' diff --git a/utils.py b/utils.py index 0fe21d7ab..766aabbdf 100644 --- a/utils.py +++ b/utils.py @@ -1,4 +1,4 @@ -"""Provide some widely useful utilities. Safe for "from utils import *". +"""Provide some widely useful utilities. Safe for "from utils import *". # noqa TODO[COMPLETED]: Let's take the >>> doctest examples out of the docstrings, and put them in utils_test.py TODO: count_if and the like are leftovers from COmmon Lisp; let's make replace thenm with Pythonic alternatives. @@ -6,17 +6,14 @@ TODO: Priority queues may not belong here -- see treatment in search.py """ -from grid import * +from grid import * # noqa import operator -import math import random import os.path import bisect -import re -from functools import reduce -#______________________________________________________________________________ +# ______________________________________________________________________________ # Simple Data Structures: infinity, Dict, Struct infinity = float('inf') @@ -37,7 +34,7 @@ def __cmp__(self, other): return self.__dict__ == other def __repr__(self): - args = ['{!s}={!s}'.format(k, repr(v)) + return ['{!s}={!s}'.format(k, repr(v)) for (k, v) in list(vars(self).items())] @@ -50,7 +47,7 @@ def update(x, **entries): return x -#______________________________________________________________________________ +# ______________________________________________________________________________ # Functions on Sequences (mostly inspired by Common Lisp) # NOTE: Sequence functions (count_if, find_if, every, some) take function # argument first (like reduce, filter, and map). @@ -103,15 +100,15 @@ def some(predicate, seq): return predicate(elem) if elem is not None else False -# TODO[COMPLETED]: rename to is_in or possibily add 'identity' to function name to -# clarify intent +# TODO[COMPLETED]: rename to is_in or possibily add 'identity' to function +# name to clarify intent def is_in(elt, seq): """Similar to (elt in seq), but compares with 'is', not '=='.""" return any(x is elt for x in seq) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Functions on sequences of numbers # NOTE: these take the sequence argument first, like min and max, # and like standard math notation: \sigma (i = 1..n) fn(i) @@ -125,14 +122,18 @@ def argmin(seq, fn): def argmin_list(seq, fn): - """Return a list of elements of seq[i] with the lowest fn(seq[i]) scores.’""" + """Return a list of elements of seq[i] with + the lowest fn(seq[i]) scores.’ + """ smallest_score = fn(min(seq, key=fn)) return [elem for elem in seq if fn(elem) == smallest_score] def argmin_gen(seq, fn): - """Return a generator of elements of seq[i] with the lowest fn(seq[i]) scores.""" + """Return a generator of elements of seq[i] with the + lowest fn(seq[i]) scores. + """ smallest_score = fn(min(seq, key=fn)) @@ -146,20 +147,26 @@ def argmin_random_tie(seq, fn): def argmax(seq, fn): - """Return an element with highest fn(seq[i]) score; tie goes to first one.""" + """Return an element with highest fn(seq[i]) score; + tie goes to first one. + """ return max(seq, key=fn) def argmax_list(seq, fn): """Return a list of elements of seq[i] with the highest fn(seq[i]) scores. - Not good to use 'argmin_list(seq, lambda x: -fn(x))' as method breaks if fn is len""" + Not good to use 'argmin_list(seq, lambda x: -fn(x))' as method + breaks if fn is len + """ largest_score = fn(max(seq, key=fn)) return [elem for elem in seq if fn(elem) == largest_score] def argmax_gen(seq, fn): - """Return a generator of elements of seq[i] with the highest fn(seq[i]) scores.""" + """Return a generator of elements of seq[i] with + the highest fn(seq[i]) scores. + """ largest_score = fn(min(seq, key=fn)) yield from (elem for elem in seq if fn(elem) == largest_score) @@ -169,7 +176,7 @@ def argmax_random_tie(seq, fn): "Return an element with highest fn(seq[i]) score; break ties at random." return argmin_random_tie(seq, lambda x: -fn(x)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Statistical and mathematical functions @@ -185,13 +192,11 @@ def histogram(values, mode=0, bin_function=None): bins[val] = bins.get(val, 0) + 1 if mode: - return sorted(list(bins.items()), key=lambda x: (x[1], x[0]), reverse=True) + return sorted(list(bins.items()), key=lambda x: (x[1], x[0]), + reverse=True) else: return sorted(bins.items()) -from math import log2 -from statistics import mode, median, mean, stdev - def dotproduct(X, Y): """Return the sum of the element-wise product of vectors x and y.""" @@ -227,7 +232,9 @@ def weighted_sampler(seq, weights): def num_or_str(x): - """The argument is a string; convert to a number if possible, or strip it.""" + """The argument is a string; convert to a number if + possible, or strip it. + """ try: return int(x) except ValueError: @@ -248,7 +255,7 @@ def clip(x, lowest, highest): return max(lowest, min(x, highest)) -#______________________________________________________________________________ +# ______________________________________________________________________________ # Misc Functions @@ -261,7 +268,9 @@ def printf(format_str, *args): def caller(n=1): - """Return the name of the calling function n levels up in the frame stack.""" + """Return the name of the calling function n levels up + in the frame stack. + """ import inspect return inspect.getouterframes(inspect.currentframe())[n][3] @@ -294,9 +303,9 @@ def memoized_fn(*args): def name(obj): "Try to find some reasonable name for the object." - return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) - or getattr(getattr(obj, '__class__', 0), '__name__', 0) - or str(obj)) + return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or + getattr(getattr(obj, '__class__', 0), '__name__', 0) or + str(obj)) def isnumber(x): @@ -313,8 +322,8 @@ def print_table(table, header=None, sep=' ', numfmt='%g'): """Print a list of lists as a table, so that columns line up nicely. header, if specified, will be printed as the first row. numfmt is the format for all numbers; you might want e.g. '%6.2f'. - (If you want different formats in different columns, don't use print_table.) - sep is the separator between columns.""" + (If you want different formats in different columns, + don't use print_table.) sep is the separator between columns.""" justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]] if header: @@ -323,14 +332,13 @@ def print_table(table, header=None, sep=' ', numfmt='%g'): table = [[numfmt.format(x) if isnumber(x) else x for x in row] for row in table] - maxlen = lambda seq: max(list(map(len, seq))) - sizes = list( - map(maxlen, list(zip(*[list(map(str, row)) for row in table])))) + map(lambda seq: max(list(map(len, seq))), + list(zip(*[list(map(str, row)) for row in table])))) for row in table: - print(sep.join(getattr(str(x), j)(size) - for (j, size, x) in zip(justs, sizes, row))) + print(sep.join(getattr( + str(x), j)(size) for (j, size, x) in zip(justs, sizes, row))) def AIMAFile(components, mode='r'): @@ -351,7 +359,7 @@ def unimplemented(): "Use this as a stub for not-yet-implemented functions." raise NotImplementedError -#______________________________________________________________________________ +# ______________________________________________________________________________ # Queues: Stack, FIFOQueue, PriorityQueue # TODO: Use queue.Queue