hucc/agents/hsd3.py [607:758]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        subgoal_obs_hi = copy(obs_hi)
        nd = self._action_space_d.n
        subgoal_obs_hi[self._dkey] = (
            F.one_hot(action_d, nd).float().view(-1, nd)
        )
        dist_c = self._model_pi_c(subgoal_obs_hi)
        if self.training:
            action_c = dist_c.sample()
        else:
            action_c = dist_c.mean
        action_c = action_c * self._action_factor_c

        assert action_c.ndim == 3, 'Subgoal policy not multihead?'
        if self._action_c_mask is not None:
            action_c = action_c * self._action_c_mask
        action_c = dim_select(action_c, 1, action_d)

        return {self._dkey: action_d, self._ckey: action_c}

    def action_hi(self, env, obs, prev_action):
        if self._n_samples < self._randexp_samples and self.training:
            action = self.action_hi_rand(env, obs['time'])
        else:
            action = self.action_hi_cd(env, obs)
        return action

    def action_lo(self, env, obs):
        action = self._pi_lo_det(obs)
        action = action * self._action_factor_lo
        return action

    def action(self, env, obs) -> Tuple[th.Tensor, Any]:
        step = obs['time'].remainder(self._action_interval).long().view(-1)
        keep_action_hi = step != 0

        def retain(x, y, mask):
            return mask * x + th.logical_not(mask) * y

        prev_gs_obs = env.ctx.get('gs_obs', None)
        action_hi = env.ctx.get('action_hi', None)
        obs_hi = copy(obs)

        tr_action_hi = env.ctx.get('tr_action_hi', None)
        if action_hi is None or not keep_action_hi.all().item():
            with th.no_grad():
                new_action_hi = self.action_hi(env, obs_hi, action_hi)
            tr_new_action_hi = self._iface.translate(
                self._iface.gs_obs(obs),
                new_action_hi[self._dkey],
                new_action_hi[self._ckey],
            )
            if action_hi is None:
                action_hi = deepcopy(new_action_hi)
                tr_action_hi = deepcopy(tr_new_action_hi)
            else:
                c = self._ckey
                d = self._dkey
                # Replace raw actions
                action_hi[d] = retain(
                    action_hi[d], new_action_hi[d], keep_action_hi
                )
                action_hi[c] = retain(
                    action_hi[c],
                    new_action_hi[c],
                    keep_action_hi.unsqueeze(1).expand_as(action_hi[c]),
                )
                # Replace translated actions
                tr_action_hi['task'] = retain(
                    tr_action_hi['task'],
                    tr_new_action_hi['task'],
                    keep_action_hi.unsqueeze(1).expand_as(tr_action_hi['task']),
                )
                tr_action_hi['desired_goal'] = self._iface.update_bp_subgoal(
                    prev_gs_obs, self._iface.gs_obs(obs), tr_action_hi
                )
                tr_action_hi['desired_goal'] = retain(
                    tr_action_hi['desired_goal'],
                    tr_new_action_hi['desired_goal'],
                    keep_action_hi.unsqueeze(1).expand_as(
                        tr_action_hi['desired_goal']
                    ),
                )
        else:
            tr_action_hi['desired_goal'] = self._iface.update_bp_subgoal(
                prev_gs_obs, self._iface.gs_obs(obs), tr_action_hi
            )

        env.ctx['action_hi'] = action_hi
        env.ctx['tr_action_hi'] = tr_action_hi
        if not 'gs_obs' in env.ctx:
            env.ctx['gs_obs'] = self._iface.gs_obs(obs).clone()
        else:
            env.ctx['gs_obs'].copy_(self._iface.gs_obs(obs))

        with th.no_grad():
            obs_lo = self._iface.observation_lo(
                obs['observation'], tr_action_hi
            )
            action_lo = self.action_lo(env, obs_lo)

        if self.training:
            return action_lo, {
                'action_hi': action_hi,
                'tr_action_hi': tr_action_hi,
                #'gs_obs0': env.ctx['gs_obs0'],
                'obs_hi': obs_hi,
            }

        # Additional visualization info for evals
        subsets = [
            self._iface.subsets[i.item()] for i in action_hi['task'].cpu()
        ]
        sg_cpu = action_hi['subgoal'].cpu().numpy()
        sgd_cpu = tr_action_hi['desired_goal'].cpu().numpy()
        subgoals = []
        subgoals_d = []
        for i in range(env.num_envs):
            n = len(subsets[i].split(','))
            subgoals.append(sg_cpu[i, :n])
            feats = [self._iface.task_map[f] for f in subsets[i].split(',')]
            subgoals_d.append(sgd_cpu[i, feats])
        return action_lo, {
            'action_hi': action_hi,
            'tr_action_hi': tr_action_hi,
            'obs_hi': obs_hi,
            'st': subsets,
            'sg': subgoals,
            'sgd': subgoals_d,
            'viz': ['st', 'sg', 'sgd'],
        }

    def step(
        self,
        env,
        obs,
        action,
        extra: Any,
        result: Tuple[th.Tensor, th.Tensor, th.Tensor, List[Dict]],
    ) -> None:
        next_obs, reward, done, info = result
        action_hi = extra['action_hi']
        tr_action_hi = extra['tr_action_hi']
        obs_hi = extra['obs_hi']
        # Ignore terminal state if we have a timeout
        fell_over = th.zeros_like(done, device='cpu')
        for i in range(len(info)):
            if 'TimeLimit.truncated' in info[i]:
                # log.info('Ignoring timeout')
                done[i] = False
            elif 'fell_over' in info[i]:
                fell_over[i] = True
        fell_over = fell_over.to(done.device)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



hucc/agents/hsdb.py [257:408]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        subgoal_obs_hi = copy(obs_hi)
        nd = self._action_space_d.n
        subgoal_obs_hi[self._dkey] = (
            F.one_hot(action_d, nd).float().view(-1, nd)
        )
        dist_c = self._model_pi_c(subgoal_obs_hi)
        if self.training:
            action_c = dist_c.sample()
        else:
            action_c = dist_c.mean
        action_c = action_c * self._action_factor_c

        assert action_c.ndim == 3, 'Subgoal policy not multihead?'
        if self._action_c_mask is not None:
            action_c = action_c * self._action_c_mask
        action_c = dim_select(action_c, 1, action_d)

        return {self._dkey: action_d, self._ckey: action_c}

    def action_hi(self, env, obs, prev_action):
        if self._n_samples < self._randexp_samples and self.training:
            action = self.action_hi_rand(env, obs['time'])
        else:
            action = self.action_hi_cd(env, obs)
        return action

    def action_lo(self, env, obs):
        action = self._pi_lo_det(obs)
        action = action * self._action_factor_lo
        return action

    def action(self, env, obs) -> Tuple[th.Tensor, Any]:
        step = obs['time'].remainder(self._action_interval).long().view(-1)
        keep_action_hi = step != 0

        def retain(x, y, mask):
            return mask * x + th.logical_not(mask) * y

        prev_gs_obs = env.ctx.get('gs_obs', None)
        action_hi = env.ctx.get('action_hi', None)
        obs_hi = copy(obs)

        tr_action_hi = env.ctx.get('tr_action_hi', None)
        if action_hi is None or not keep_action_hi.all().item():
            with th.no_grad():
                new_action_hi = self.action_hi(env, obs_hi, action_hi)
            tr_new_action_hi = self._iface.translate(
                self._iface.gs_obs(obs),
                new_action_hi[self._dkey],
                new_action_hi[self._ckey],
            )
            if action_hi is None:
                action_hi = deepcopy(new_action_hi)
                tr_action_hi = deepcopy(tr_new_action_hi)
            else:
                c = self._ckey
                d = self._dkey
                # Replace raw actions
                action_hi[d] = retain(
                    action_hi[d], new_action_hi[d], keep_action_hi
                )
                action_hi[c] = retain(
                    action_hi[c],
                    new_action_hi[c],
                    keep_action_hi.unsqueeze(1).expand_as(action_hi[c]),
                )
                # Replace translated actions
                tr_action_hi['task'] = retain(
                    tr_action_hi['task'],
                    tr_new_action_hi['task'],
                    keep_action_hi.unsqueeze(1).expand_as(tr_action_hi['task']),
                )
                tr_action_hi['desired_goal'] = self._iface.update_bp_subgoal(
                    prev_gs_obs, self._iface.gs_obs(obs), tr_action_hi
                )
                tr_action_hi['desired_goal'] = retain(
                    tr_action_hi['desired_goal'],
                    tr_new_action_hi['desired_goal'],
                    keep_action_hi.unsqueeze(1).expand_as(
                        tr_action_hi['desired_goal']
                    ),
                )
        else:
            tr_action_hi['desired_goal'] = self._iface.update_bp_subgoal(
                prev_gs_obs, self._iface.gs_obs(obs), tr_action_hi
            )

        env.ctx['action_hi'] = action_hi
        env.ctx['tr_action_hi'] = tr_action_hi
        if not 'gs_obs' in env.ctx:
            env.ctx['gs_obs'] = self._iface.gs_obs(obs).clone()
        else:
            env.ctx['gs_obs'].copy_(self._iface.gs_obs(obs))

        with th.no_grad():
            obs_lo = self._iface.observation_lo(
                obs['observation'], tr_action_hi
            )
            action_lo = self.action_lo(env, obs_lo)

        if self.training:
            return action_lo, {
                'action_hi': action_hi,
                'tr_action_hi': tr_action_hi,
                #'gs_obs0': env.ctx['gs_obs0'],
                'obs_hi': obs_hi,
            }

        # Additional visualization info for evals
        subsets = [
            self._iface.subsets[i.item()] for i in action_hi['task'].cpu()
        ]
        sg_cpu = action_hi['subgoal'].cpu().numpy()
        sgd_cpu = tr_action_hi['desired_goal'].cpu().numpy()
        subgoals = []
        subgoals_d = []
        for i in range(env.num_envs):
            n = len(subsets[i].split(','))
            subgoals.append(sg_cpu[i, :n])
            feats = [self._iface.task_map[f] for f in subsets[i].split(',')]
            subgoals_d.append(sgd_cpu[i, feats])
        return action_lo, {
            'action_hi': action_hi,
            'tr_action_hi': tr_action_hi,
            'obs_hi': obs_hi,
            'st': subsets,
            'sg': subgoals,
            'sgd': subgoals_d,
            'viz': ['st', 'sg', 'sgd'],
        }

    def step(
        self,
        env,
        obs,
        action,
        extra: Any,
        result: Tuple[th.Tensor, th.Tensor, th.Tensor, List[Dict]],
    ) -> None:
        next_obs, reward, done, info = result
        action_hi = extra['action_hi']
        tr_action_hi = extra['tr_action_hi']
        obs_hi = extra['obs_hi']
        # Ignore terminal state if we have a timeout
        fell_over = th.zeros_like(done, device='cpu')
        for i in range(len(info)):
            if 'TimeLimit.truncated' in info[i]:
                # log.info('Ignoring timeout')
                done[i] = False
            elif 'fell_over' in info[i]:
                fell_over[i] = True
        fell_over = fell_over.to(done.device)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



