1- """Implement Agents and Environments (Chapters 1-2).
1+ """
2+ Implement Agents and Environments. (Chapters 1-2)
23
34The class hierarchies are as follows:
45
2324EnvToolbar ## contains buttons for controlling EnvGUI
2425
2526EnvCanvas ## Canvas to display the environment of an EnvGUI
26-
2727"""
2828
29- # TO DO:
29+ # TODO
3030# Implement grabbing correctly.
3131# When an object is grabbed, does it still have a location?
3232# What if it is released?
3333# What if the grabbed or the grabber is deleted?
3434# What if the grabber moves?
35- #
3635# Speed control in GUI does not have any effect -- fix it.
3736
3837from utils4e import distance_squared , turn_heading
@@ -90,8 +89,7 @@ def __init__(self, program=None):
9089 self .holding = []
9190 self .performance = 0
9291 if program is None or not isinstance (program , collections .Callable ):
93- print ("Can't find a valid program for {}, falling back to default." .format (
94- self .__class__ .__name__ ))
92+ print ("Can't find a valid program for {}, falling back to default." .format (self .__class__ .__name__ ))
9593
9694 def program (percept ):
9795 return eval (input ('Percept={}; action? ' .format (percept )))
@@ -122,10 +120,13 @@ def new_program(percept):
122120
123121
124122def TableDrivenAgentProgram (table ):
125- """This agent selects an action based on the percept sequence.
123+ """
124+ [Figure 2.7]
125+ This agent selects an action based on the percept sequence.
126126 It is practical only for tiny domains.
127127 To customize it, provide as table a dictionary of all
128- {percept_sequence:action} pairs. [Figure 2.7]"""
128+ {percept_sequence:action} pairs.
129+ """
129130 percepts = []
130131
131132 def program (percept ):
@@ -154,7 +155,10 @@ def RandomAgentProgram(actions):
154155
155156
156157def SimpleReflexAgentProgram (rules , interpret_input ):
157- """This agent takes action based solely on the percept. [Figure 2.10]"""
158+ """
159+ [Figure 2.10]
160+ This agent takes action based solely on the percept.
161+ """
158162
159163 def program (percept ):
160164 state = interpret_input (percept )
@@ -166,7 +170,10 @@ def program(percept):
166170
167171
168172def ModelBasedReflexAgentProgram (rules , update_state , trainsition_model , sensor_model ):
169- """This agent takes action based on the percept and state. [Figure 2.12]"""
173+ """
174+ [Figure 2.12]
175+ This agent takes action based on the percept and state.
176+ """
170177
171178 def program (percept ):
172179 program .state = update_state (program .state , program .action , percept , trainsition_model , sensor_model )
@@ -219,7 +226,9 @@ def TableDrivenVacuumAgent():
219226
220227
221228def ReflexVacuumAgent ():
222- """A reflex agent for the two-state vacuum environment. [Figure 2.8]
229+ """
230+ [Figure 2.8]
231+ A reflex agent for the two-state vacuum environment.
223232 >>> agent = ReflexVacuumAgent()
224233 >>> environment = TrivialVacuumEnvironment()
225234 >>> environment.add_thing(agent)
@@ -333,8 +342,7 @@ def run(self, steps=1000):
333342
334343 def list_things_at (self , location , tclass = Thing ):
335344 """Return all things exactly at a given location."""
336- return [thing for thing in self .things
337- if thing .location == location and isinstance (thing , tclass )]
345+ return [thing for thing in self .things if thing .location == location and isinstance (thing , tclass )]
338346
339347 def some_things_at (self , location , tclass = Thing ):
340348 """Return true if at least one of the things at location
@@ -437,13 +445,13 @@ def move_forward(self, from_location):
437445 """
438446 x , y = from_location
439447 if self .direction == self .R :
440- return ( x + 1 , y )
448+ return x + 1 , y
441449 elif self .direction == self .L :
442- return ( x - 1 , y )
450+ return x - 1 , y
443451 elif self .direction == self .U :
444- return ( x , y - 1 )
452+ return x , y - 1
445453 elif self .direction == self .D :
446- return ( x , y + 1 )
454+ return x , y + 1
447455
448456
449457class XYEnvironment (Environment ):
@@ -498,7 +506,7 @@ def execute_action(self, agent, action):
498506 agent .holding .pop ()
499507
500508 def default_location (self , thing ):
501- return ( random .choice (self .width ), random .choice (self .height ) )
509+ return random .choice (self .width ), random .choice (self .height )
502510
503511 def move_to (self , thing , destination ):
504512 """Move a thing to a new location. Returns True on success or False if there is an Obstacle.
@@ -724,7 +732,7 @@ def percept(self, agent):
724732 status = ('Dirty' if self .some_things_at (
725733 agent .location , Dirt ) else 'Clean' )
726734 bump = ('Bump' if agent .bump else 'None' )
727- return ( status , bump )
735+ return status , bump
728736
729737 def execute_action (self , agent , action ):
730738 agent .bump = False
@@ -753,12 +761,11 @@ def __init__(self):
753761 loc_B : random .choice (['Clean' , 'Dirty' ])}
754762
755763 def thing_classes (self ):
756- return [Wall , Dirt , ReflexVacuumAgent , RandomVacuumAgent ,
757- TableDrivenVacuumAgent , ModelBasedVacuumAgent ]
764+ return [Wall , Dirt , ReflexVacuumAgent , RandomVacuumAgent , TableDrivenVacuumAgent , ModelBasedVacuumAgent ]
758765
759766 def percept (self , agent ):
760767 """Returns the agent's location, and the location status (Dirty/Clean)."""
761- return ( agent .location , self .status [agent .location ])
768+ return agent .location , self .status [agent .location ]
762769
763770 def execute_action (self , agent , action ):
764771 """Change agent's location and/or location's status; track performance.
@@ -994,8 +1001,7 @@ def is_done(self):
9941001 print ("Death by {} [-1000]." .format (explorer [0 ].killed_by ))
9951002 else :
9961003 print ("Explorer climbed out {}."
997- .format (
998- "with Gold [+1000]!" if Gold () not in self .things else "without Gold [+0]" ))
1004+ .format ("with Gold [+1000]!" if Gold () not in self .things else "without Gold [+0]" ))
9991005 return True
10001006
10011007 # TODO: Arrow needs to be implemented
0 commit comments