def _step()

in gym-compete/gym_compete/new_envs/multi_agent_env.py [0:0]


    def _step(self, actions):
        for i in range(self.n_agents):
            self.agents[i].before_step()
        self.env_scene.simulate(actions)
        move_rews = []
        infos = []
        dones = []
        for i in range(self.n_agents):
            move_r, agent_done, rinfo = self.agents[i].after_step(actions[i])
            move_rews.append(move_r)
            dones.append(agent_done)
            rinfo['agent_done'] = agent_done
            infos.append(rinfo)
        goal_rews, game_done = self.goal_rewards(infos=infos, agent_dones=dones)
        rews = []
        for i, info in enumerate(infos):
            info['reward_remaining'] = float(goal_rews[i])
            rews.append(float(goal_rews[i] + self.move_reward_weight * move_rews[i]))
        rews = tuple(rews)
        done = self._get_done(dones, game_done)
        infos = tuple(infos)
        obses = self._get_obs()
        return obses, rews, done, infos