Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 0879c4b

Browse files
antmarakisnorvig
authored andcommitted
Learning: Grading Learners (aimacode#499)
* Update learning.py * Update test_learning.py * Update test_learning.py
1 parent b009d1f commit 0879c4b

File tree

2 files changed

+24
-19
lines changed

2 files changed

+24
-19
lines changed

learning.py

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -806,8 +806,9 @@ def flatten(seqs): return sum(seqs, [])
806806
# Functions for testing learners on examples
807807

808808

809-
def test(predict, dataset, examples=None, verbose=0):
809+
def err_ratio(predict, dataset, examples=None, verbose=0):
810810
"""Return the proportion of the examples that are NOT correctly predicted."""
811+
"""verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
811812
if examples is None:
812813
examples = dataset.examples
813814
if len(examples) == 0:
@@ -826,6 +827,12 @@ def test(predict, dataset, examples=None, verbose=0):
826827
return 1 - (right / len(examples))
827828

828829

830+
def grade_learner(predict, tests):
831+
"""Grades the given learner based on how many tests it passes.
832+
tests is a list with each element in the form: (values, output)."""
833+
return mean(int(predict(X) == y) for X, y in tests)
834+
835+
829836
def train_and_test(dataset, start, end):
830837
"""Reserve dataset.examples[start:end] for test; train on the remainder."""
831838
start = int(start)
@@ -863,8 +870,8 @@ def cross_validation(learner, size, dataset, k=10, trials=1):
863870
(fold + 1) * (n / k))
864871
dataset.examples = train_data
865872
h = learner(dataset, size)
866-
fold_errT += test(h, dataset, train_data)
867-
fold_errV += test(h, dataset, val_data)
873+
fold_errT += err_ratio(h, dataset, train_data)
874+
fold_errV += err_ratio(h, dataset, val_data)
868875
# Reverting back to original once test is completed
869876
dataset.examples = examples
870877
return fold_errT / k, fold_errV / k
@@ -908,16 +915,6 @@ def score(learner, size):
908915
return [(size, mean([score(learner, size) for t in range(trials)]))
909916
for size in sizes]
910917

911-
912-
def grade_learner(predict, tests):
913-
"""Grades the given learner based on how many tests it passes.
914-
tests is a list with each element in the form: (values, output)."""
915-
correct = 0
916-
for t in tests:
917-
if predict(t[0]) == t[1]:
918-
correct += 1
919-
return correct
920-
921918
# ______________________________________________________________________________
922919
# The rest of this file gives datasets for machine learning problems.
923920

tests/test_learning.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from learning import parse_csv, weighted_mode, weighted_replicate, DataSet, \
22
PluralityLearner, NaiveBayesLearner, NearestNeighborLearner, \
33
NeuralNetLearner, PerceptronLearner, DecisionTreeLearner, \
4-
euclidean_distance, grade_learner
4+
euclidean_distance, grade_learner, err_ratio
55
from utils import DataFile
66

77

@@ -76,10 +76,14 @@ def test_neural_network_learner():
7676

7777
nNL = NeuralNetLearner(iris, [5], 0.15, 75)
7878
tests = [([5, 3, 1, 0.1], 0),
79-
([6, 3, 3, 1.5], 1),
80-
([7.5, 4, 6, 2], 2)]
79+
([5, 3.5, 1, 0], 0),
80+
([6, 3, 4, 1.1], 1),
81+
([6, 2, 3.5, 1], 1),
82+
([7.5, 4, 6, 2], 2),
83+
([7, 3, 6, 2.5], 2)]
8184

82-
assert grade_learner(nNL, tests) >= 2
85+
assert grade_learner(nNL, tests) >= 2/3
86+
assert err_ratio(nNL, iris) < 0.25
8387

8488

8589
def test_perceptron():
@@ -90,7 +94,11 @@ def test_perceptron():
9094

9195
perceptron = PerceptronLearner(iris)
9296
tests = [([5, 3, 1, 0.1], 0),
97+
([5, 3.5, 1, 0], 0),
9398
([6, 3, 4, 1.1], 1),
94-
([7.5, 4, 6, 2], 2)]
99+
([6, 2, 3.5, 1], 1),
100+
([7.5, 4, 6, 2], 2),
101+
([7, 3, 6, 2.5], 2)]
95102

96-
assert grade_learner(perceptron, tests) >= 2
103+
assert grade_learner(perceptron, tests) > 1/2
104+
assert err_ratio(perceptron, iris) < 0.4

0 commit comments

Comments
 (0)