modules/SwissArmyTransformer/sat/model/official/chatglm2_model.py [93:141]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.add_mixin("mlp", SwiGLUMixin(args.num_layers, args.hidden_size, args.inner_hidden_size, bias=args.use_bias))

    def position_embedding_forward(self, position_ids, output_cross_layer, **kw_args):
        return None
    
    def get_masks(self, input_ids, past_key_values, padding_mask=None):
        batch_size, seq_length = input_ids.shape
        full_attention_mask = torch.ones(batch_size, seq_length, seq_length, dtype=next(self.parameters()).dtype, device=input_ids.device)
        full_attention_mask.tril_()
        past_length = 0
        if past_key_values:
            past_length = past_key_values[0][0].shape[2]
        if past_length:
            full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length, dtype=next(self.parameters()).dtype,
                                                        device=input_ids.device), full_attention_mask), dim=-1)
        if padding_mask is not None:
            full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
        if not past_length and padding_mask is not None:
            full_attention_mask -= padding_mask.unsqueeze(-1) - 1
        full_attention_mask = (full_attention_mask < 0.5).bool()
        full_attention_mask.unsqueeze_(1)
        return full_attention_mask
    
    def get_position_ids(self, input_ids):
        batch_size, seq_length = input_ids.shape
        position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device).unsqueeze(0).repeat(batch_size, 1)
        return position_ids
    
    def forward(self, input_ids, position_ids=None, attention_mask=None, past_key_values=None, **kwargs):
        if position_ids is None:
            position_ids = self.get_position_ids(input_ids)
        if attention_mask is not None and attention_mask.ndim == 4:
            pass
        elif past_key_values is not None and input_ids.size(0) == 1:
            attention_mask = torch.tensor([[1]], dtype=torch.long, device=input_ids.device)
        else:
            attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
        if attention_mask is not None and attention_mask.dtype is torch.bool:
            attention_mask = ~attention_mask
        attention_mask = attention_mask.to(next(self.parameters()).dtype)
        if past_key_values is not None:
            input_ids = input_ids[:, -1:]
            position_ids = position_ids[..., -1:]
            if input_ids.size(0) != 1:
                attention_mask = attention_mask[:, :, -1:]
        return super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, **kwargs)
    
    @classmethod
    def add_model_specific_args(cls, parser):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



modules/SwissArmyTransformer/sat/model/official/chatglm3_model.py [92:140]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.add_mixin("mlp", SwiGLUMixin(args.num_layers, args.hidden_size, args.inner_hidden_size, bias=args.use_bias))

    def position_embedding_forward(self, position_ids, output_cross_layer, **kw_args):
        return None
    
    def get_masks(self, input_ids, past_key_values, padding_mask=None):
        batch_size, seq_length = input_ids.shape
        full_attention_mask = torch.ones(batch_size, seq_length, seq_length, dtype=next(self.parameters()).dtype, device=input_ids.device)
        full_attention_mask.tril_()
        past_length = 0
        if past_key_values:
            past_length = past_key_values[0][0].shape[2]
        if past_length:
            full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length, dtype=next(self.parameters()).dtype,
                                                        device=input_ids.device), full_attention_mask), dim=-1)
        if padding_mask is not None:
            full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
        if not past_length and padding_mask is not None:
            full_attention_mask -= padding_mask.unsqueeze(-1) - 1
        full_attention_mask = (full_attention_mask < 0.5).bool()
        full_attention_mask.unsqueeze_(1)
        return full_attention_mask
    
    def get_position_ids(self, input_ids):
        batch_size, seq_length = input_ids.shape
        position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device).unsqueeze(0).repeat(batch_size, 1)
        return position_ids
    
    def forward(self, input_ids, position_ids=None, attention_mask=None, past_key_values=None, **kwargs):
        if position_ids is None:
            position_ids = self.get_position_ids(input_ids)
        if attention_mask is not None and attention_mask.ndim == 4:
            pass
        elif past_key_values is not None and input_ids.size(0) == 1:
            attention_mask = torch.tensor([[1]], dtype=torch.long, device=input_ids.device)
        else:
            attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
        if attention_mask is not None and attention_mask.dtype is torch.bool:
            attention_mask = ~attention_mask
        attention_mask = attention_mask.to(next(self.parameters()).dtype)
        if past_key_values is not None:
            input_ids = input_ids[:, -1:]
            position_ids = position_ids[..., -1:]
            if input_ids.size(0) != 1:
                attention_mask = attention_mask[:, :, -1:]
        return super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, **kwargs)
    
    @classmethod
    def add_model_specific_args(cls, parser):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



