|
| 1 | +import pytest |
| 2 | + |
| 3 | +from rl import * |
| 4 | +from mdp import sequential_decision_environment |
| 5 | + |
| 6 | + |
| 7 | +north = (0, 1) |
| 8 | +south = (0,-1) |
| 9 | +west = (-1, 0) |
| 10 | +east = (1, 0) |
| 11 | + |
| 12 | +policy = { |
| 13 | + (0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, |
| 14 | + (0, 1): north, (2, 1): north, (3, 1): None, |
| 15 | + (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west, |
| 16 | +} |
| 17 | + |
| 18 | + |
| 19 | + |
| 20 | +def test_PassiveADPAgent(): |
| 21 | + agent = PassiveADPAgent(policy, sequential_decision_environment) |
| 22 | + for i in range(75): |
| 23 | + run_single_trial(agent,sequential_decision_environment) |
| 24 | + |
| 25 | + # Agent does not always produce same results. |
| 26 | + # Check if results are good enough. |
| 27 | + assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 |
| 28 | + assert agent.U[(0, 1)] > 0.15 # In reality around 0.4 |
| 29 | + assert agent.U[(1, 0)] > 0 # In reality around 0.2 |
| 30 | + |
| 31 | + |
| 32 | + |
| 33 | +def test_PassiveTDAgent(): |
| 34 | + agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60./(59+n)) |
| 35 | + for i in range(200): |
| 36 | + run_single_trial(agent,sequential_decision_environment) |
| 37 | + |
| 38 | + # Agent does not always produce same results. |
| 39 | + # Check if results are good enough. |
| 40 | + assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 |
| 41 | + assert agent.U[(0, 1)] > 0.15 # In reality around 0.35 |
| 42 | + assert agent.U[(1, 0)] > 0.15 # In reality around 0.25 |
| 43 | + |
| 44 | + |
| 45 | +def test_QLearning(): |
| 46 | + q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, |
| 47 | + alpha=lambda n: 60./(59+n)) |
| 48 | + |
| 49 | + for i in range(200): |
| 50 | + run_single_trial(q_agent,sequential_decision_environment) |
| 51 | + |
| 52 | + # Agent does not always produce same results. |
| 53 | + # Check if results are good enough. |
| 54 | + assert q_agent.Q[((0, 1), (0, 1))] >= -0.5 # In reality around 0.1 |
| 55 | + assert q_agent.Q[((1, 0), (0, -1))] <= 0.5 # In reality around -0.1 |
0 commit comments