in src_code/learners/q_interactive_learner.py [0:0]
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
mac_out_interactive = []
mac_out_interactive_ = []
mac_out_alone = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs, agent_outs_interactive, agent_outs_interactive_, agent_outs_alone = self.mac.get_individual_q(batch, t=t)
mac_out.append(agent_outs)
mac_out_interactive.append(agent_outs_interactive)
mac_out_interactive_.append(agent_outs_interactive_)
mac_out_alone.append(agent_outs_alone)
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
mac_out_interactive = th.stack(mac_out_interactive, dim=1) # Concat over time
mac_out_interactive_ = th.stack(mac_out_interactive_, dim=1) # Concat over time
mac_out_alone = th.stack(mac_out_alone, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
if self.args.regulization == "all":
#Optimize for 0 interactive
min_q_interactive = mac_out_interactive_[:, :-1] * avail_actions[:, :-1] * mask.unsqueeze(-1)
reg_loss = (min_q_interactive ** 2).sum() / mask.unsqueeze(-1).sum()
loss += reg_loss
elif self.args.regulization == "chosen_":
#Optimize for 0 interactive
chosen_action_qvals_interactive = th.gather(mac_out_interactive_[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
reg_loss = ((chosen_action_qvals_interactive * mask) ** 2).sum() / mask.sum()
loss += reg_loss
elif self.args.regulization == "all_":
#Optimize for 0 interactive
min_q_interactive = mac_out_interactive_[:, :-1] * avail_actions[:, :-1] * mask.unsqueeze(-1)
reg_loss = (min_q_interactive ** 2).sum() / (mask.unsqueeze(-1) * avail_actions[:, :-1]).sum()
loss += reg_loss
else:
reg_loss = th.zeros(1).sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.self_last_target_update_episode) / (self.args.minus_target_update_interval) >= 1.0:
self.mac.update_targets()
self.self_last_target_update_episode = episode_num
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("reg_loss", reg_loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env