in src/algos/rnd.py [0:0]
def learn(actor_model,
model,
random_target_network,
predictor_network,
batch,
initial_agent_state,
optimizer,
predictor_optimizer,
scheduler,
flags,
frames=None,
lock=threading.Lock()):
"""Performs a learning (optimization) step."""
with lock:
if flags.use_fullobs_intrinsic:
random_embedding = random_target_network(batch, next_state=True)\
.reshape(flags.unroll_length, flags.batch_size, 128)
predicted_embedding = predictor_network(batch, next_state=True)\
.reshape(flags.unroll_length, flags.batch_size, 128)
else:
random_embedding = random_target_network(batch['partial_obs'][1:].to(device=flags.device))
predicted_embedding = predictor_network(batch['partial_obs'][1:].to(device=flags.device))
intrinsic_rewards = torch.norm(predicted_embedding.detach() - random_embedding.detach(), dim=2, p=2)
intrinsic_reward_coef = flags.intrinsic_reward_coef
intrinsic_rewards *= intrinsic_reward_coef
num_samples = flags.unroll_length * flags.batch_size
actions_flat = batch['action'][1:].reshape(num_samples).cpu().detach().numpy()
intrinsic_rewards_flat = intrinsic_rewards.reshape(num_samples).cpu().detach().numpy()
rnd_loss = flags.rnd_loss_coef * \
losses.compute_forward_dynamics_loss(predicted_embedding, random_embedding.detach())
learner_outputs, unused_state = model(batch, initial_agent_state)
bootstrap_value = learner_outputs['baseline'][-1]
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {
key: tensor[:-1]
for key, tensor in learner_outputs.items()
}
rewards = batch['reward']
if flags.no_reward:
total_rewards = intrinsic_rewards
else:
total_rewards = rewards + intrinsic_rewards
clipped_rewards = torch.clamp(total_rewards, -1, 1)
discounts = (~batch['done']).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch['policy_logits'],
target_policy_logits=learner_outputs['policy_logits'],
actions=batch['action'],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs['baseline'],
bootstrap_value=bootstrap_value)
pg_loss = losses.compute_policy_gradient_loss(learner_outputs['policy_logits'],
batch['action'],
vtrace_returns.pg_advantages)
baseline_loss = flags.baseline_cost * losses.compute_baseline_loss(
vtrace_returns.vs - learner_outputs['baseline'])
entropy_loss = flags.entropy_cost * losses.compute_entropy_loss(
learner_outputs['policy_logits'])
total_loss = pg_loss + baseline_loss + entropy_loss + rnd_loss
episode_returns = batch['episode_return'][batch['done']]
stats = {
'mean_episode_return': torch.mean(episode_returns).item(),
'total_loss': total_loss.item(),
'pg_loss': pg_loss.item(),
'baseline_loss': baseline_loss.item(),
'entropy_loss': entropy_loss.item(),
'rnd_loss': rnd_loss.item(),
'mean_rewards': torch.mean(rewards).item(),
'mean_intrinsic_rewards': torch.mean(intrinsic_rewards).item(),
'mean_total_rewards': torch.mean(total_rewards).item(),
}
scheduler.step()
optimizer.zero_grad()
predictor_optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.max_grad_norm)
nn.utils.clip_grad_norm_(predictor_network.parameters(), flags.max_grad_norm)
optimizer.step()
predictor_optimizer.step()
actor_model.load_state_dict(model.state_dict())
return stats