Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Fix flake8 for test files #303

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 7, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[flake8]
max-line-length = 100
ignore = E121,E123,E126,E221,E222,E225,E226,E242,E701,E702,E704,E731,W503,F405
2 changes: 1 addition & 1 deletion tests/test_csp.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import pytest
from csp import * #noqa
from csp import * # noqa


def test_csp_assign():
Expand Down
1 change: 1 addition & 0 deletions tests/test_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,6 @@ def test_distance2():
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)


if __name__ == '__main__':
pytest.main()
12 changes: 7 additions & 5 deletions tests/test_learning.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import pytest
from learning import parse_csv, weighted_mode, weighted_replicate, DataSet, \
PluralityLearner, NaiveBayesLearner, NearestNeighborLearner
from utils import DataFile


def test_parse_csv():
Iris = DataFile('iris.csv').read()
assert parse_csv(Iris)[0] == [5.1,3.5,1.4,0.2,'setosa']
assert parse_csv(Iris)[0] == [5.1, 3.5, 1.4, 0.2, 'setosa']


def test_weighted_mode():
Expand All @@ -16,20 +15,23 @@ def test_weighted_mode():
def test_weighted_replicate():
assert weighted_replicate('ABC', [1, 2, 1], 4) == ['A', 'B', 'B', 'C']


def test_plurality_learner():
zoo = DataSet(name="zoo")

pL = PluralityLearner(zoo)
assert pL([]) == "mammal"


def test_naive_bayes():
iris = DataSet(name="iris")

nB = NaiveBayesLearner(iris)
assert nB([5,3,1,0.1]) == "setosa"
assert nB([5, 3, 1, 0.1]) == "setosa"


def test_k_nearest_neighbors():
iris = DataSet(name="iris")

kNN = NearestNeighborLearner(iris,k=3)
assert kNN([5,3,1,0.1]) == "setosa"
kNN = NearestNeighborLearner(iris, k=3)
assert kNN([5, 3, 1, 0.1]) == "setosa"
14 changes: 7 additions & 7 deletions tests/test_logic.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
from logic import *
from utils import expr_handle_infix_ops, count
from logic import * # noqa
from utils import expr_handle_infix_ops, count, Symbol


def test_expr():
Expand Down Expand Up @@ -56,10 +56,10 @@ def test_KB_wumpus():
assert kb_wumpus.ask(~P[1, 2]) == {}

# Statement: There is a pit in [2,2].
assert kb_wumpus.ask(P[2, 2]) == False
assert kb_wumpus.ask(P[2, 2]) is False

# Statement: There is a pit in [3,1].
assert kb_wumpus.ask(P[3, 1]) == False
assert kb_wumpus.ask(P[3, 1]) is False

# Statement: Neither [1,2] nor [2,1] contains a pit.
assert kb_wumpus.ask(~P[1, 2] & ~P[2, 1]) == {}
Expand Down Expand Up @@ -112,7 +112,7 @@ def test_dpll():
& (~D | ~F) & (B | ~C | D) & (A | ~E | F) & (~A | E | D))
== {B: False, C: True, A: True, F: False, D: True, E: False})
assert dpll_satisfiable(A & ~B) == {A: True, B: False}
assert dpll_satisfiable(P & ~P) == False
assert dpll_satisfiable(P & ~P) is False


def test_unify():
Expand Down Expand Up @@ -159,7 +159,7 @@ def test_move_not_inwards():
def test_to_cnf():
assert (repr(to_cnf(wumpus_world_inference & ~expr('~P12'))) ==
"((~P12 | B11) & (~P21 | B11) & (P12 | P21 | ~B11) & ~B11 & P12)")
assert repr(to_cnf((P&Q) | (~P & ~Q))) == '((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))'
assert repr(to_cnf((P & Q) | (~P & ~Q))) == '((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))'
assert repr(to_cnf("B <=> (P1 | P2)")) == '((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))'
assert repr(to_cnf("a | (b & c) | d")) == '((b | a | d) & (c | a | d))'
assert repr(to_cnf("A & (B | (D & E))")) == '(A & (D | B) & (E | B))'
Expand All @@ -169,7 +169,7 @@ def test_to_cnf():
def test_standardize_variables():
e = expr('F(a, b, c) & G(c, A, 23)')
assert len(variables(standardize_variables(e))) == 3
#assert variables(e).intersection(variables(standardize_variables(e))) == {}
# assert variables(e).intersection(variables(standardize_variables(e))) == {}
assert is_variable(standardize_variables(expr('x')))


Expand Down
30 changes: 16 additions & 14 deletions tests/test_mdp.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,27 @@
import pytest
from mdp import * # noqa


def test_value_iteration():
assert value_iteration(sequential_decision_environment, .01) == {(3, 2): 1.0, (3, 1): -1.0,
(3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462,
(0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537,
(0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676,
(2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926,
(2, 2): 0.79536093684710951}
assert value_iteration(sequential_decision_environment, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462,
(0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537,
(0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676,
(2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926,
(2, 2): 0.79536093684710951}


def test_policy_iteration():
assert policy_iteration(sequential_decision_environment) == {(0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0),
(2, 0): (0, 1), (2, 1): (0, 1), (2, 2): (1, 0),
(3, 0): (-1, 0), (3, 1): None, (3, 2): None}
assert policy_iteration(sequential_decision_environment) == {
(0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1),
(2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0),
(3, 1): None, (3, 2): None}


def test_best_policy():
pi = best_policy(sequential_decision_environment, value_iteration(sequential_decision_environment, .01))
pi = best_policy(sequential_decision_environment,
value_iteration(sequential_decision_environment, .01))
assert sequential_decision_environment.to_arrows(pi) == [['>', '>', '>', '.'],
['^', None, '^', '.'],
['^', '>', '^', '<']]
['^', None, '^', '.'],
['^', '>', '^', '<']]
64 changes: 38 additions & 26 deletions tests/test_nlp.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import pytest
import nlp
from nlp import loadPageHTML, stripRawHTML, determineInlinks, findOutlinks, onlyWikipediaURLS
from nlp import loadPageHTML, stripRawHTML, findOutlinks, onlyWikipediaURLS
from nlp import expand_pages, relevant_pages, normalize, ConvergenceDetector, getInlinks
from nlp import getOutlinks, Page, HITS
from nlp import getOutlinks, Page
from nlp import Rules, Lexicon
# Clumsy imports because we want to access certain nlp.py globals explicitly, because
# they are accessed by function's within nlp.py


def test_rules():
assert Rules(A="B C | D E") == {'A': [['B', 'C'], ['D', 'E']]}

Expand All @@ -27,18 +28,18 @@ def test_lexicon():
href="/wiki/TestLiving" href="/wiki/TestMan" >"""
testHTML2 = "Nothing"

pA = Page("A", 1, 6, ["B","C","E"],["D"])
pB = Page("B", 2, 5, ["E"],["A","C","D"])
pC = Page("C", 3, 4, ["B","E"],["A","D"])
pD = Page("D", 4, 3, ["A","B","C","E"],[])
pE = Page("E", 5, 2, [],["A","B","C","D","F"])
pF = Page("F", 6, 1, ["E"],[])
pageDict = {pA.address:pA,pB.address:pB,pC.address:pC,
pD.address:pD,pE.address:pE,pF.address:pF}
pA = Page("A", 1, 6, ["B", "C", "E"], ["D"])
pB = Page("B", 2, 5, ["E"], ["A", "C", "D"])
pC = Page("C", 3, 4, ["B", "E"], ["A", "D"])
pD = Page("D", 4, 3, ["A", "B", "C", "E"], [])
pE = Page("E", 5, 2, [], ["A", "B", "C", "D", "F"])
pF = Page("F", 6, 1, ["E"], [])
pageDict = {pA.address: pA, pB.address: pB, pC.address: pC,
pD.address: pD, pE.address: pE, pF.address: pF}
nlp.pagesIndex = pageDict
nlp.pagesContent ={pA.address:testHTML,pB.address:testHTML2,
pC.address:testHTML,pD.address:testHTML2,
pE.address:testHTML,pF.address:testHTML2}
nlp.pagesContent ={pA.address: testHTML, pB.address: testHTML2,
pC.address: testHTML, pD.address: testHTML2,
pE.address: testHTML, pF.address: testHTML2}

# This test takes a long time (> 60 secs)
# def test_loadPageHTML():
Expand All @@ -50,17 +51,20 @@ def test_lexicon():
# assert all(x in loadedPages for x in fullURLs)
# assert all(loadedPages.get(key,"") != "" for key in addresses)


def test_stripRawHTML():
addr = "https://en.wikipedia.org/wiki/Ethics"
aPage = loadPageHTML([addr])
someHTML = aPage[addr]
strippedHTML = stripRawHTML(someHTML)
assert "<head>" not in strippedHTML and "</head>" not in strippedHTML


def test_determineInlinks():
# TODO
assert True


def test_findOutlinks_wiki():
testPage = pageDict[pA.address]
outlinks = findOutlinks(testPage, handleURLs=onlyWikipediaURLS)
Expand All @@ -70,35 +74,39 @@ def test_findOutlinks_wiki():
# ______________________________________________________________________________
# HITS Helper Functions


def test_expand_pages():
pages = {k: pageDict[k] for k in ('F')}
pagesTwo = {k: pageDict[k] for k in ('A','E')}
pagesTwo = {k: pageDict[k] for k in ('A', 'E')}
expanded_pages = expand_pages(pages)
assert all(x in expanded_pages for x in ['F','E'])
assert all(x not in expanded_pages for x in ['A','B','C','D'])
assert all(x in expanded_pages for x in ['F', 'E'])
assert all(x not in expanded_pages for x in ['A', 'B', 'C', 'D'])
expanded_pages = expand_pages(pagesTwo)
print(expanded_pages)
assert all(x in expanded_pages for x in ['A','B','C','D','E','F'])
assert all(x in expanded_pages for x in ['A', 'B', 'C', 'D', 'E', 'F'])


def test_relevant_pages():
pages = relevant_pages("male")
assert all((x in pages.keys()) for x in ['A','C','E'])
assert all((x not in pages) for x in ['B','D','F'])
assert all((x in pages.keys()) for x in ['A', 'C', 'E'])
assert all((x not in pages) for x in ['B', 'D', 'F'])


def test_normalize():
normalize( pageDict )
print(page.hub for addr,page in nlp.pagesIndex.items())
expected_hub = [1/91,2/91,3/91,4/91,5/91,6/91] # Works only for sample data above
normalize(pageDict)
print(page.hub for addr, page in nlp.pagesIndex.items())
expected_hub = [1/91, 2/91, 3/91, 4/91, 5/91, 6/91] # Works only for sample data above
expected_auth = list(reversed(expected_hub))
assert len(expected_hub) == len(expected_auth) == len(nlp.pagesIndex)
assert expected_hub == [page.hub for addr,page in sorted(nlp.pagesIndex.items())]
assert expected_auth == [page.authority for addr,page in sorted(nlp.pagesIndex.items())]
assert expected_hub == [page.hub for addr, page in sorted(nlp.pagesIndex.items())]
assert expected_auth == [page.authority for addr, page in sorted(nlp.pagesIndex.items())]


def test_detectConvergence():
# run detectConvergence once to initialise history
convergence = ConvergenceDetector()
convergence()
assert convergence() # values haven't changed so should return True
assert convergence() # values haven't changed so should return True
# make tiny increase/decrease to all values
for _, page in nlp.pagesIndex.items():
page.hub += 0.0003
Expand All @@ -111,17 +119,21 @@ def test_detectConvergence():
# retest function with values. Should now return false
assert not convergence()


def test_getInlinks():
inlnks = getInlinks(pageDict['A'])
assert sorted([page.address for page in inlnks]) == pageDict['A'].inlinks


def test_getOutlinks():
outlnks = getOutlinks(pageDict['A'])
assert sorted([page.address for page in outlnks]) == pageDict['A'].outlinks


def test_HITS():
# TODO
assert True # leave for now
assert True # leave for now


if __name__ == '__main__':
pytest.main()
20 changes: 11 additions & 9 deletions tests/test_planning.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
from planning import *
from planning import * # noqa
from utils import expr
from logic import FolKB


def test_action():
precond = [[expr("P(x)"), expr("Q(y, z)")]
,[expr("Q(x)")]]
effect = [[expr("Q(x)")]
, [expr("P(x)")]]
a=Action(expr("A(x,y,z)"),precond, effect)
precond = [[expr("P(x)"), expr("Q(y, z)")], [expr("Q(x)")]]
effect = [[expr("Q(x)")], [expr("P(x)")]]
a=Action(expr("A(x,y,z)"), precond, effect)
args = [expr("A"), expr("B"), expr("C")]
assert a.substitute(expr("P(x, z, y)"), args) == expr("P(A, C, B)")
test_kb = FolKB([expr("P(A)"), expr("Q(B, C)"), expr("R(D)")])
Expand All @@ -34,7 +32,8 @@ def test_air_cargo():
p.act(action)

assert p.goal_test()



def test_spare_tire():
p = spare_tire()
assert p.goal_test() is False
Expand All @@ -44,9 +43,10 @@ def test_spare_tire():

for action in solution:
p.act(action)

assert p.goal_test()


def test_three_block_tower():
p = three_block_tower()
assert p.goal_test() is False
Expand All @@ -56,9 +56,10 @@ def test_three_block_tower():

for action in solution:
p.act(action)

assert p.goal_test()


def test_have_cake_and_eat_cake_too():
p = have_cake_and_eat_cake_too()
assert p.goal_test() is False
Expand All @@ -70,6 +71,7 @@ def test_have_cake_and_eat_cake_too():

assert p.goal_test()


def test_graph_call():
pdll = spare_tire()
negkb = FolKB([expr('At(Flat, Trunk)')])
Expand Down
Loading