rlalgos/deprecated/dqn/duelling_dqn.py [39:103]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ReplayBuffer:
    """
    This class is used to store transitions. Each transition is a TemporalDictTensor of size T
    """

    def __init__(self, N):
        self.N = N
        self.buffer = None

    def _init_buffer(self, trajectories):
        self.buffer = {}
        for k in trajectories.keys():
            dtype = trajectories[k].dtype
            size = trajectories[k].size()
            b_size = (self.N,) + size[2:]
            self.buffer[k] = torch.zeros(*b_size, dtype=dtype)
        self.pos = 0
        self.full = False

    def write(self, trajectories):
        rs = {}
        new_pos = None
        for k in trajectories.keys():
            v = trajectories[k]
            size = v.size()
            b_size = (size[0] * size[1],) + size[2:]
            v = v.reshape(*b_size)
            n = v.size()[0]
            overhead = self.N - (self.pos + n)
            if new_pos is None:
                new_pos = torch.arange(n) + self.pos
                mask = new_pos.ge(self.N).float()
                nidx = torch.arange(n) + self.pos - self.N
                new_pos = (new_pos * (1 - mask) + mask * (nidx)).long()

            self.buffer[k][new_pos] = v
        self.pos = self.pos + n
        if self.pos >= self.N:
            self.pos = self.pos - self.N
            self.full = True
        assert self.pos < self.N

    def size(self):
        if self.full:
            return self.N
        else:
            return self.pos

    def push(self, trajectories):
        """
        Add transitions to the replay buffer
        """
        max_length = trajectories.lengths.max().item()
        assert trajectories.lengths.eq(max_length).all()
        if self.buffer is None:
            self._init_buffer(trajectories)
        self.write(trajectories)

    def sample(self, n=1):
        limit = self.pos
        if self.full:
            limit = self.N
        transitions = torch.randint(0, high=limit, size=(n,))
        d = {k: self.buffer[k][transitions] for k in self.buffer}
        return DictTensor(d)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



rlalgos/deprecated/sac/sac.py [26:90]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ReplayBuffer:
    """
    This class is used to store transitions. Each transition is a TemporalDictTensor of size T
    """

    def __init__(self, N):
        self.N = N
        self.buffer = None

    def _init_buffer(self, trajectories):
        self.buffer = {}
        for k in trajectories.keys():
            dtype = trajectories[k].dtype
            size = trajectories[k].size()
            b_size = (self.N,) + size[2:]
            self.buffer[k] = torch.zeros(*b_size, dtype=dtype)
        self.pos = 0
        self.full = False

    def write(self, trajectories):
        rs = {}
        new_pos = None
        for k in trajectories.keys():
            v = trajectories[k]
            size = v.size()
            b_size = (size[0] * size[1],) + size[2:]
            v = v.reshape(*b_size)
            n = v.size()[0]
            overhead = self.N - (self.pos + n)
            if new_pos is None:
                new_pos = torch.arange(n) + self.pos
                mask = new_pos.ge(self.N).float()
                nidx = torch.arange(n) + self.pos - self.N
                new_pos = (new_pos * (1 - mask) + mask * (nidx)).long()

            self.buffer[k][new_pos] = v
        self.pos = self.pos + n
        if self.pos >= self.N:
            self.pos = self.pos - self.N
            self.full = True
        assert self.pos < self.N

    def size(self):
        if self.full:
            return self.N
        else:
            return self.pos

    def push(self, trajectories):
        """
        Add transitions to the replay buffer
        """
        max_length = trajectories.lengths.max().item()
        assert trajectories.lengths.eq(max_length).all()
        if self.buffer is None:
            self._init_buffer(trajectories)
        self.write(trajectories)

    def sample(self, n=1):
        limit = self.pos
        if self.full:
            limit = self.N
        transitions = torch.randint(0, high=limit, size=(n,))
        d = {k: self.buffer[k][transitions] for k in self.buffer}
        return DictTensor(d)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



