diff --git a/README.md b/README.md index 41d08f431..5adc57f63 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ Here is a table of algorithms, the figure, name of the algorithm in the book and | 2.7 | Table-Driven-Agent | `TableDrivenAgent` | [`agents.py`][agents] | Done | Included | | 2.8 | Reflex-Vacuum-Agent | `ReflexVacuumAgent` | [`agents.py`][agents] | Done | Included | | 2.10 | Simple-Reflex-Agent | `SimpleReflexAgent` | [`agents.py`][agents] | Done | Included | -| 2.12 | Model-Based-Reflex-Agent | `ReflexAgentWithState` | [`agents.py`][agents] | | Included | +| 2.12 | Model-Based-Reflex-Agent | `ReflexAgentWithState` | [`agents.py`][agents] | Done | Included | | 3 | Problem | `Problem` | [`search.py`][search] | Done | Included | | 3 | Node | `Node` | [`search.py`][search] | Done | Included | | 3 | Queue | `Queue` | [`utils.py`][utils] | Done | No Need | diff --git a/agents.py b/agents.py index eb085757a..d343fd55a 100644 --- a/agents.py +++ b/agents.py @@ -152,8 +152,8 @@ def ModelBasedReflexAgentProgram(rules, update_state, model): def program(percept): program.state = update_state(program.state, program.action, percept, model) rule = rule_match(program.state, rules) - action = rule.action - return action + program.action = rule.action + return program.action program.state = program.action = None return program diff --git a/tests/test_agents.py b/tests/test_agents.py index ded9b7d95..ea91f7eab 100644 --- a/tests/test_agents.py +++ b/tests/test_agents.py @@ -3,7 +3,7 @@ from agents import Agent from agents import ReflexVacuumAgent, ModelBasedVacuumAgent, TrivialVacuumEnvironment, compare_agents,\ RandomVacuumAgent, TableDrivenVacuumAgent, TableDrivenAgentProgram, RandomAgentProgram, \ - SimpleReflexAgentProgram, rule_match + SimpleReflexAgentProgram, rule_match, ModelBasedReflexAgentProgram random.seed("aima-python") @@ -164,6 +164,64 @@ def interpret_input(state): # check final status of the environment assert environment.status == {(1,0):'Clean' , (0,0) : 'Clean'} +def test_ModelBasedReflexAgentProgram(): + + loc_A = (0, 0) + loc_B = (1, 0) + + model = {loc_A: None, loc_B: None} + + class Rule: + def __init__(self, state, action): + self.__state = state + self.action = action + + def matches(self, state): + return self.__state == state + + # create rules for a two state Vacuum Environment + rules = [Rule((loc_A, "Dirty"), "Suck"), Rule((loc_A, "Clean"), "Right"), + Rule((loc_B, "Dirty"), "Suck"), Rule((loc_B, "Clean"), "Left")] + + def update_state(state, action, percept, model): + loc, status = percept + + # the other location + loc2 = tuple(map(lambda x: x[0]-x[1], zip((1,0), loc))) + + # initial guess of the other location + if not state or not action or not model[loc2]: + model[loc2] = random.choice(['Dirty', 'Clean']) + + model[loc] = status + + # the model think environment will keep clean if agent chose to suck last step + if action == 'Suck': + state = percept + return state + + # rubbish may appears suddenly, so the model guess randomly + if status == 'Clean': + status = random.choice(['Dirty', 'Clean']) + model[loc] = status + + # move right or left will not influence the environment + state = (loc, model[loc]) + return state + + # create a program and then an object of the ModelBasedReflexAgentProgram + + program = ModelBasedReflexAgentProgram(rules, update_state, model) + agent = Agent(program) + + # create an object of TrivialVacuumEnvironment + environment = TrivialVacuumEnvironment() + # add agent to the environment + environment.add_thing(agent) + # run the environment + environment.run() + # check final status of the environment + assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'} def test_ModelBasedVacuumAgent() : # create an object of the ModelBasedVacuumAgent @@ -227,4 +285,4 @@ def constant_prog(percept): return percept agent = Agent(constant_prog) result = agent.program(5) - assert result == 5 + assert result == 5 \ No newline at end of file