agent.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. import random
  2. import time
  3. class Agent():
  4. """
  5. Macro-Cognitive Agent definition
  6. Will have several paramters affecting performance:
  7. Memory decay rate (from lit)
  8. Learning / skill mastery rate(from lit)
  9. Knowledge interference levels?
  10. """
  11. def __init__(self, x, y, id, harvest_duration,
  12. refine_duration, learning_rate, world):
  13. self.x = x
  14. self.y = y
  15. # next method, iterator fn, seems like ruby's first fn
  16. self.position = next((square for square in world.squares \
  17. if square.x == self.x and square.y == self.y), None)
  18. self.id = id
  19. self.inventory = []
  20. # harvest duration may be a sim param, may be a skill param, that can
  21. # vary per agent
  22. self.harvest_duration = harvest_duration
  23. self.refine_duration = refine_duration
  24. self.action_type = None
  25. self.sub_action = None
  26. self.action_start_time = None
  27. self.action_end_time = None
  28. # learning rate will determine how quickly skills improve
  29. # higher rate, sooner to reach the next level of master
  30. # Note - this will be a step-wise learning model, with discrete jumps
  31. self.learning_rate = learning_rate
  32. #memory to be built
  33. memory = {}
  34. # TODO: parameterized skills to be built
  35. # move defintiions; note that world wraps around on both axes
  36. def move_left(self, sim_params):
  37. if self.x - 1 >= 0:
  38. self.x -= 1
  39. else:
  40. self.x = sim_params.world_size - 1
  41. def move_right(self, sim_params):
  42. if self.x + 1 <= sim_params.world_size - 1:
  43. self.x += 1
  44. else:
  45. self.x = 0
  46. def move_down(self, sim_params):
  47. if self.y - 1 >= 0:
  48. self.y -= 1
  49. else:
  50. self.y = sim_params.world_size - 1
  51. def move_up(self, sim_params):
  52. if self.y + 1 <= sim_params.world_size - 1:
  53. self.y += 1
  54. else:
  55. self.y = 0
  56. def find_targets(self, world, sim_params):
  57. # find squares adjacent to agent
  58. # check up square
  59. if self.y == sim_params.world_size-1:
  60. up_target = next((square for square in world.squares if square.x ==\
  61. self.x and square.y == 0), None)
  62. else:
  63. up_target = next((square for square in world.squares if square.x ==\
  64. self.x and square.y == self.y + 1), None)
  65. # check down square
  66. if self.y == 0:
  67. down_target = next((square for square in world.squares if square.x ==\
  68. self.x and square.y == sim_params.world_size-1), None)
  69. else:
  70. down_target = next((square for square in world.squares if square.x ==\
  71. self.x and square.y == self.y - 1), None)
  72. # check right square
  73. if self.x == sim_params.world_size-1:
  74. right_target = next((square for square in world.squares if\
  75. square.x == 0 and square.y == self.y), None)
  76. else:
  77. right_target = next((square for square in world.squares if square.x ==\
  78. self.x + 1 and square.y == self.y), None)
  79. # check left square
  80. if self.x == 0:
  81. left_target = next((square for square in world.squares if square.x ==\
  82. sim_params.world_size-1 and square.y == self.y), None)
  83. else:
  84. left_target = next((square for square in world.squares if square.x ==\
  85. self.x - 1 and square.y == self.y), None)
  86. return(up_target, down_target, right_target, left_target)
  87. def filter_move_actns(self, world, sim_params):
  88. # two agents cannot occupy the same point in space
  89. # check which moves are allowable before choosing one
  90. # this is based on which adjacent squares are occupied
  91. # find_targets will grab surrounding squares
  92. up_target, down_target, right_target, left_target = \
  93. self.find_targets(world, sim_params)
  94. # append moves to filtered_move list
  95. if not up_target.occupied:
  96. self.filtered_move_list.append(self.move_up)
  97. if not down_target.occupied:
  98. self.filtered_move_list.append(self.move_down)
  99. if not right_target.occupied:
  100. self.filtered_move_list.append(self.move_right)
  101. if not left_target.occupied:
  102. self.filtered_move_list.append(self.move_left)
  103. # def filter_reso_actns(self, world, sim_params)
  104. #!! agent resource action definitions
  105. def harvest(self, round_num, world, sim_params):
  106. # if agent has already started a harvest
  107. if self.action_end_time != None:
  108. # check if harvest is finished
  109. if round_num > self.action_end_time:
  110. # add harvested resource to inentory
  111. self.inventory.append(self.position.square_resource)
  112. # remove resource from world square
  113. self.position.square_resource = None
  114. # add 1 to count of harvested resources
  115. world.harvested_resource_count += 1
  116. # subtract 1 from count of raw / unharvested resources
  117. world.raw_resource_count -= 1
  118. # if all resources have now been harvested,
  119. # return current round number as num_rounds_to_completion
  120. if world.raw_resource_count == 0:
  121. sim_params.num_rounds_to_completion = round_num
  122. # reset self.action to none
  123. self.action_type = None
  124. self.sub_action = None
  125. self.action_end_time = None
  126. self.action_start_time = None
  127. # self.filtered_move_list = []
  128. # if agent hasn't finished req'd num of harvest rounds ...
  129. # don't change action yet
  130. else:
  131. pass
  132. #if agent hasn't started harvest, check if resource present
  133. else:
  134. if self.position.square_resource:
  135. self.action_start_time = round_num
  136. self.action_end_time = round_num + self.harvest_duration
  137. # if agent attempts to harvest, but no reso present ...
  138. # turn is basically wasted
  139. else:
  140. self.action_type = None
  141. # end harvest() definition
  142. def refine(self, sim_params, resource):
  143. pass
  144. def trade(self, world):
  145. up_target, down_target, right_target, left_target = \
  146. self.find_targets(world, sim_params)
  147. targets = [up_target, down_target, right_target, left_target]
  148. trade_targets = [target for target in targets if target.occupied]
  149. print("possible targets:")
  150. print(targets)
  151. time.sleep(3)
  152. move_list = [move_left, move_right, move_down, move_up]
  153. resource_actions = [harvest]
  154. # resource_actions = [harvest, refine, trade,]
  155. # skills will be a combination of a transformation (e.g., a --> b) and
  156. # a speed (e.g., 3 rounds)
  157. # This might involve interaction with memory...
  158. # skills = {}
  159. # full action list is a combination of all possible actions
  160. full_action_list = [move_list, resource_actions]
  161. # print agent position to screen
  162. def print_position(self):
  163. print("Agent ", self.id, " position x,y:", self.x, self.y)
  164. pass
  165. def act(self, world, round_num, sim_params):
  166. # filtered move list is built based on which adjacent squares are empty
  167. # it is rebuilt on each turn
  168. self.filtered_move_list = []
  169. self.position = next((square for square in world.squares \
  170. if square.x == self.x and square.y == self.y), None)
  171. # self.print_position()
  172. # grab agent's current square from world.squares
  173. # "next" grabs matching instance from iterator
  174. # self.position = next((square for square in world.squares \
  175. # if square.x == self.x and square.y == self.y), None)
  176. # if agent currently not doing anything ...
  177. # self.action_type: choose between moving, manipulating resources,
  178. # or something else (TBD); currently random
  179. if self.action_type == None:
  180. self.action_type = random.choice(self.full_action_list)
  181. # 1. if agents selects a move actn ...
  182. if self.action_type == self.move_list:
  183. # filter to moves to only those that lead to unoccupied squares
  184. self.filter_move_actns(world, sim_params)
  185. # choose from subset of filtered moves
  186. if len(self.filtered_move_list) == 0:
  187. pass
  188. else:
  189. # set current square to unoccupied
  190. self.position.occupied, self.position.occupant = False, None
  191. self.sub_action = random.choice(self.filtered_move_list)
  192. # sim params passed to movement, for dimensions of world
  193. self.sub_action(sim_params)
  194. # after running sub act, reset both to None
  195. self.action_type = None
  196. self.sub_action = None
  197. # grab new position and set square.occupied
  198. self.position = next((square for square in world.squares \
  199. if square.x == self.x and square.y == self.y), None)
  200. self.position.occupied, self.position.occupant = True, self
  201. # 2. if agent selects a reso action ...
  202. elif self.action_type == self.resource_actions:
  203. # start new reso action
  204. # # filter reso actions based on whether agent holds resos
  205. # if self.inventory not None:
  206. # self.filtered_reso_action
  207. self.sub_action = random.choice(self.action_type)
  208. self.sub_action(self, round_num, world, sim_params)
  209. # if agent was already in reso action, continue
  210. # could DRY up, duplicates call above
  211. elif self.action_type == self.resource_actions:
  212. self.sub_action = random.choice(self.action_type)
  213. self.sub_action(self, round_num, world, sim_params)