Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 3ff5f40

Browse files
antmarakisnorvig
authored andcommitted
Fix Randomised Testing (aimacode#652)
* add failure_test method to utils * comment fix * Update test_learning.py * Update test_csp.py
1 parent 80190fd commit 3ff5f40

File tree

4 files changed

+29
-8
lines changed

4 files changed

+29
-8
lines changed

learning.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -984,8 +984,8 @@ def flatten(seqs): return sum(seqs, [])
984984

985985

986986
def err_ratio(predict, dataset, examples=None, verbose=0):
987-
"""Return the proportion of the examples that are NOT correctly predicted."""
988-
"""verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
987+
"""Return the proportion of the examples that are NOT correctly predicted.
988+
verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
989989
if examples is None:
990990
examples = dataset.examples
991991
if len(examples) == 0:

tests/test_csp.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
11
import pytest
2+
from utils import failure_test
23
from csp import *
4+
import random
5+
6+
7+
random.seed("aima-python")
38

49

510
def test_csp_assign():
@@ -331,10 +336,12 @@ def test_backtracking_search():
331336

332337

333338
def test_min_conflicts():
334-
random.seed("aima-python")
335339
assert min_conflicts(australia)
336-
assert min_conflicts(usa)
337340
assert min_conflicts(france)
341+
342+
tests = [(usa, None)] * 3
343+
assert failure_test(min_conflicts, tests) > 1/3
344+
338345
australia_impossible = MapColoringCSP(list('RG'), 'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ')
339346
assert min_conflicts(australia_impossible, 1000) is None
340347

@@ -351,7 +358,7 @@ def test_parse_neighbours():
351358
def test_topological_sort():
352359
root = 'NT'
353360
Sort, Parents = topological_sort(australia,root)
354-
361+
355362
assert Sort == ['NT','SA','Q','NSW','V','WA']
356363
assert Parents['NT'] == None
357364
assert Parents['SA'] == 'NT'

tests/test_learning.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,9 +168,13 @@ def test_decision_tree_learner():
168168
def test_random_forest():
169169
iris = DataSet(name="iris")
170170
rF = RandomForest(iris)
171-
assert rF([5, 3, 1, 0.1]) == "setosa"
172-
assert rF([6, 5, 3, 1]) == "versicolor"
173-
assert rF([7.5, 4, 6, 2]) == "virginica"
171+
tests = [([5.0, 3.0, 1.0, 0.1], "setosa"),
172+
([5.1, 3.3, 1.1, 0.1], "setosa"),
173+
([6.0, 5.0, 3.0, 1.0], "versicolor"),
174+
([6.1, 2.2, 3.5, 1.0], "versicolor"),
175+
([7.5, 4.1, 6.2, 2.3], "virginica"),
176+
([7.3, 3.7, 6.1, 2.5], "virginica")]
177+
assert grade_learner(rF, tests) >= 1/3
174178

175179

176180
def test_neural_network_learner():

utils.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,16 @@ def open_data(name, mode='r'):
416416
return open(aima_file)
417417

418418

419+
def failure_test(algorithm, tests):
420+
"""Grades the given algorithm based on how many tests it passes.
421+
Most algorithms have arbitary output on correct execution, which is difficult
422+
to check for correctness. On the other hand, a lot of algorithms output something
423+
particular on fail (for example, False, or None).
424+
tests is a list with each element in the form: (values, failure_output)."""
425+
from statistics import mean
426+
return mean(int(algorithm(x) != y) for x, y in tests)
427+
428+
419429
# ______________________________________________________________________________
420430
# Expressions
421431

0 commit comments

Comments
 (0)