| """ PyTorch GLM-4V model. """ |
| import math |
| import sys |
| import torch |
| import torch.utils.checkpoint |
| import torch.nn.functional as F |
| from torch import nn |
| from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss |
| from torch.nn.utils import skip_init |
| from typing import Optional, Tuple, Union, List, Dict, Any |
|
|
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPast, |
| CausalLMOutputWithPast, |
| SequenceClassifierOutputWithPast, |
| ) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import logging, is_torch_npu_available |
| from transformers.generation.logits_process import LogitsProcessor |
| from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput |
|
|
| from .visual import EVA2CLIPModel |
| from .configuration_chatglm import ChatGLMConfig |
|
|
| try: |
| from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available |
|
|
| if is_flash_attn_2_available(): |
| from flash_attn import flash_attn_func, flash_attn_varlen_func |
| from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input |
| except: |
| pass |
|
|
| |
|
|
| if sys.platform != 'darwin' and not is_torch_npu_available(): |
| torch._C._jit_set_profiling_mode(False) |
| torch._C._jit_set_profiling_executor(False) |
| torch._C._jit_override_can_fuse_on_cpu(True) |
| torch._C._jit_override_can_fuse_on_gpu(True) |
|
|
| logger = logging.get_logger(__name__) |
|
|
| LANGUAGE_TOKEN_TYPE = 0 |
| VISION_TOKEN_TYPE = 1 |
|
|
| _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM" |
| _CONFIG_FOR_DOC = "ChatGLMConfig" |
|
|
|
|
| def default_init(cls, *args, **kwargs): |
| return cls(*args, **kwargs) |
|
|
|
|
| class InvalidScoreLogitsProcessor(LogitsProcessor): |
| def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: |
| if torch.isnan(scores).any() or torch.isinf(scores).any(): |
| scores.zero_() |
| scores[..., 198] = 5e4 |
| return scores |
|
|
|
|
| class PrefixEncoder(torch.nn.Module): |
| """ |
| The torch.nn model to encode the prefix |
| Input shape: (batch-size, prefix-length) |
| Output shape: (batch-size, prefix-length, 2*layers*hidden) |
| """ |
|
|
| def __init__(self, config: ChatGLMConfig): |
| super().__init__() |
| self.prefix_projection = config.prefix_projection |
| if self.prefix_projection: |
| |
| kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2 |
| self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size) |
| self.trans = torch.nn.Sequential( |
| torch.nn.Linear(kv_size, config.hidden_size), |
| torch.nn.Tanh(), |
| torch.nn.Linear(config.hidden_size, kv_size) |
| ) |
| else: |
| self.embedding = torch.nn.Embedding(config.pre_seq_len, |
| config.num_layers * config.kv_channels * config.multi_query_group_num * 2) |
|
|
| def forward(self, prefix: torch.Tensor): |
| if self.prefix_projection: |
| prefix_tokens = self.embedding(prefix) |
| past_key_values = self.trans(prefix_tokens) |
| else: |
| past_key_values = self.embedding(prefix) |
| return past_key_values |
|
|
|
|
| def split_tensor_along_last_dim( |
| tensor: torch.Tensor, |
| num_partitions: int, |
| contiguous_split_chunks: bool = False, |
| ) -> List[torch.Tensor]: |
| """Split a tensor along its last dimension. |
| |
| Arguments: |
| tensor: input tensor. |
| num_partitions: number of partitions to split the tensor |
| contiguous_split_chunks: If True, make each chunk contiguous |
| in memory. |
| |
| Returns: |
| A list of Tensors |
| """ |
| |
| last_dim = tensor.dim() - 1 |
| last_dim_size = tensor.size()[last_dim] // num_partitions |
| |
| tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) |
| |
| if contiguous_split_chunks: |
| return tuple(chunk.contiguous() for chunk in tensor_list) |
|
|
| return tensor_list |
|
|
|
|
| class RotaryEmbedding(nn.Module): |
| def __init__(self, dim, rope_ratio=1, original_impl=False, device=None, dtype=None): |
| super().__init__() |
| inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) |
| self.register_buffer("inv_freq", inv_freq) |
| self.dim = dim |
| self.original_impl = original_impl |
| self.rope_ratio = rope_ratio |
|
|
| def impl(self, seq_length: int, dim: int, device: torch.device, dtype: torch.dtype): |
| base = 10000 * self.rope_ratio |
| inv_freq = 1.0 / ( |
| base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)) |
| seq = torch.arange(seq_length, device=inv_freq.device, dtype=torch.float32) |
| freqs = torch.outer(seq, inv_freq) |
| |
| |
| emb = torch.cat((freqs, freqs), dim=-1) |
| return emb |
|
|
| def forward_impl( |
| self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000 |
| ): |
| """Enhanced Transformer with Rotary Position Embedding. |
| |
| Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ |
| transformers/rope/__init__.py. MIT License: |
| https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. |
| """ |
| |
| base = base * self.rope_ratio |
| theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem)) |
|
|
| |
| seq_idx = torch.arange(seq_len, dtype=torch.float, device=device) |
|
|
| |
| idx_theta = torch.outer(seq_idx, theta).float() |
|
|
| cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) |
|
|
| |
| if dtype in (torch.float16, torch.bfloat16, torch.int8): |
| cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() |
| return cache |
|
|
| def forward(self, max_seq_len, offset=0): |
| if self.original_impl: |
| return self.forward_impl( |
| max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device |
| ) |
| else: |
| return self.impl(max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device) |
|
|
|
|
| @torch.jit.script |
| def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: |
| |
| b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3) |
| rot_dim = rope_cache.shape[-2] * 2 |
| x, x_pass = x[..., :rot_dim], x[..., rot_dim:] |
| |
| rope_cache = rope_cache[:, :sq] |
| xshaped = x.reshape(b, np, sq, rot_dim // 2, 2) |
| rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2) |
| x_out2 = torch.stack( |
| [ |
| xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], |
| xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1], |
| ], |
| -1, |
| ) |
| x_out2 = x_out2.flatten(3) |
| return torch.cat((x_out2, x_pass), dim=-1) |
|
|
|
|
| class RMSNorm(torch.nn.Module): |
| def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs): |
| super().__init__() |
| self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) |
| self.eps = eps |
|
|
| def forward(self, hidden_states: torch.Tensor): |
| input_dtype = hidden_states.dtype |
| variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) |
| hidden_states = hidden_states * torch.rsqrt(variance + self.eps) |
|
|
| return (self.weight * hidden_states).to(input_dtype) |
|
|
|
|
|
|
| class CoreAttention(torch.nn.Module): |
| def __init__(self, config: ChatGLMConfig, layer_number): |
| super(CoreAttention, self).__init__() |
|
|
| self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling |
| self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 |
| if self.apply_query_key_layer_scaling: |
| self.attention_softmax_in_fp32 = True |
| self.layer_number = max(1, layer_number) |
|
|
| projection_size = config.kv_channels * config.num_attention_heads |
|
|
| |
| self.hidden_size_per_partition = projection_size |
| self.hidden_size_per_attention_head = projection_size // config.num_attention_heads |
| self.num_attention_heads_per_partition = config.num_attention_heads |
|
|
| coeff = None |
| self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) |
| if self.apply_query_key_layer_scaling: |
| coeff = self.layer_number |
| self.norm_factor *= coeff |
| self.coeff = coeff |
|
|
| self.attention_dropout = torch.nn.Dropout(config.attention_dropout) |
|
|
| def forward(self, query_layer, key_layer, value_layer, attention_mask): |
| pytorch_major_version = int(torch.__version__.split('.')[0]) |
| if pytorch_major_version >= 2: |
| if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: |
| context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, |
| is_causal=True) |
| else: |
| if attention_mask is not None: |
| attention_mask = ~attention_mask |
| context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, |
| attention_mask) |
| context_layer = context_layer.transpose(1, 2).contiguous() |
| new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) |
| context_layer = context_layer.reshape(*new_context_layer_shape) |
| else: |
| |
|
|
| |
| output_size = (query_layer.size(0), query_layer.size(1), query_layer.size(2), key_layer.size(2)) |
|
|
| |
| query_layer = query_layer.view(output_size[0] * output_size[1], output_size[2], -1) |
| |
| key_layer = key_layer.view(output_size[0] * output_size[1], output_size[3], -1) |
|
|
| |
| matmul_input_buffer = torch.empty( |
| output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype, |
| device=query_layer.device |
| ) |
|
|
| |
| matmul_result = torch.baddbmm( |
| matmul_input_buffer, |
| query_layer, |
| key_layer.transpose(1, 2), |
| beta=0.0, |
| alpha=(1.0 / self.norm_factor), |
| ) |
|
|
| |
| attention_scores = matmul_result.view(*output_size) |
|
|
| |
| |
| |
|
|
| |
| if self.attention_softmax_in_fp32: |
| attention_scores = attention_scores.float() |
| if self.coeff is not None: |
| attention_scores = attention_scores * self.coeff |
| if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: |
| attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3], |
| device=attention_scores.device, dtype=torch.bool) |
| attention_mask.tril_() |
| attention_mask = ~attention_mask |
| if attention_mask is not None: |
| attention_scores = attention_scores.masked_fill(attention_mask, float("-inf")) |
| attention_probs = F.softmax(attention_scores, dim=-1) |
| attention_probs = attention_probs.type_as(value_layer) |
|
|
| |
| |
| attention_probs = self.attention_dropout(attention_probs) |
| |
| |
| |
|
|
| |
| |
|
|
| |
| output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) |
| |
| value_layer = value_layer.view(output_size[0] * output_size[1], value_layer.size(2), -1) |
| |
| attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) |
| |
| context_layer = torch.bmm(attention_probs, value_layer) |
| |
| context_layer = context_layer.view(*output_size) |
| |
| context_layer = context_layer.transpose(1, 2).contiguous() |
| |
| new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) |
| context_layer = context_layer.reshape(*new_context_layer_shape) |
|
|
| return context_layer |
|
|
| class SdpaAttention(CoreAttention): |
| def forward(self, query_layer, key_layer, value_layer, attention_mask): |
| if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: |
| context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, |
| is_causal=True, |
| dropout_p=self.config.attention_dropout if self.training else 0.0) |
| else: |
| if attention_mask is not None: |
| attention_mask = ~attention_mask |
| context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, |
| attention_mask, |
| dropout_p=self.config.attention_dropout if self.training else 0.0) |
| context_layer = context_layer.transpose(1, 2).contiguous() |
| new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) |
| context_layer = context_layer.reshape(*new_context_layer_shape) |
| return context_layer |
|
|
|
|
| def _get_unpad_data(attention_mask): |
| seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) |
| indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() |
| max_seqlen_in_batch = seqlens_in_batch.max().item() |
| cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) |
| return ( |
| indices, |
| cu_seqlens, |
| max_seqlen_in_batch, |
| ) |
|
|
|
|
| |
| class FlashAttention2(CoreAttention): |
| def __init__(self, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
| self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() |
|
|
| def forward(self, query_states, key_states, value_states, attention_mask): |
| query_states = query_states.transpose(1, 2) |
| key_states = key_states.transpose(1, 2) |
| value_states = value_states.transpose(1, 2) |
| batch_size, query_length = query_states.shape[:2] |
| if not self._flash_attn_uses_top_left_mask: |
| causal = self.is_causal |
| else: |
| |
| causal = self.is_causal and query_length != 1 |
| dropout = self.config.attention_dropout if self.training else 0.0 |
| |
| if attention_mask is not None: |
| query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( |
| query_states, key_states, value_states, attention_mask, query_length |
| ) |
|
|
| cu_seqlens_q, cu_seqlens_k = cu_seq_lens |
| max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens |
|
|
| attn_output_unpad = flash_attn_varlen_func( |
| query_states, |
| key_states, |
| value_states, |
| cu_seqlens_q=cu_seqlens_q, |
| cu_seqlens_k=cu_seqlens_k, |
| max_seqlen_q=max_seqlen_in_batch_q, |
| max_seqlen_k=max_seqlen_in_batch_k, |
| dropout_p=dropout, |
| softmax_scale=None, |
| causal=causal, |
| ) |
|
|
| attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) |
| else: |
| attn_output = flash_attn_func( |
| query_states, key_states, value_states, dropout, softmax_scale=None, causal=causal |
| ) |
| attn_output = attn_output.reshape(batch_size, query_length, self.hidden_size_per_partition).contiguous() |
| return attn_output |
|
|
| def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): |
| indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) |
| batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape |
|
|
| key_layer = index_first_axis( |
| key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
| ) |
| value_layer = index_first_axis( |
| value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
| ) |
| if query_length == kv_seq_len: |
| query_layer = index_first_axis( |
| query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads_per_partition, head_dim), |
| indices_k |
| ) |
| cu_seqlens_q = cu_seqlens_k |
| max_seqlen_in_batch_q = max_seqlen_in_batch_k |
| indices_q = indices_k |
| elif query_length == 1: |
| max_seqlen_in_batch_q = 1 |
| cu_seqlens_q = torch.arange( |
| batch_size + 1, dtype=torch.int32, device=query_layer.device |
| ) |
| indices_q = cu_seqlens_q[:-1] |
| query_layer = query_layer.squeeze(1) |
| else: |
| |
| attention_mask = attention_mask[:, -query_length:] |
| query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) |
|
|
| return ( |
| query_layer, |
| key_layer, |
| value_layer, |
| indices_q, |
| (cu_seqlens_q, cu_seqlens_k), |
| (max_seqlen_in_batch_q, max_seqlen_in_batch_k), |
| ) |
|
|
|
|
| CORE_ATTENTION_CLASSES = { |
| "eager": CoreAttention, |
| "sdpa": SdpaAttention, |
| "flash_attention_2": FlashAttention2 |
| } |
|
|
| class SelfAttention(torch.nn.Module): |
| """Parallel self-attention layer abstract class. |
| |
| Self-attention layer takes input with size [s, b, h] |
| and returns output of the same size. |
| """ |
|
|
| def __init__(self, config: ChatGLMConfig, layer_number, device=None): |
| super(SelfAttention, self).__init__() |
| self.layer_number = max(1, layer_number) |
|
|
| self.projection_size = config.kv_channels * config.num_attention_heads |
|
|
| |
| self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads |
| self.num_attention_heads_per_partition = config.num_attention_heads |
|
|
| self.multi_query_attention = config.multi_query_attention |
| self.qkv_hidden_size = 3 * self.projection_size |
| self.original_rope = config.original_rope |
| if self.multi_query_attention: |
| self.num_multi_query_groups_per_partition = config.multi_query_group_num |
| self.qkv_hidden_size = ( |
| self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num |
| ) |
| self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size, |
| bias=config.add_bias_linear or config.add_qkv_bias, |
| device=device, **_config_to_kwargs(config) |
| ) |
|
|
| self.core_attention = CoreAttention(config, self.layer_number) |
|
|
| |
| self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, |
| device=device, **_config_to_kwargs(config) |
| ) |
|
|
| def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): |
| if self.multi_query_attention: |
| num_attention_heads = self.num_multi_query_groups_per_partition |
| else: |
| num_attention_heads = self.num_attention_heads_per_partition |
| return torch.empty( |
| inference_max_sequence_len, |
| batch_size, |
| num_attention_heads, |
| self.hidden_size_per_attention_head, |
| dtype=dtype, |
| device=device, |
| ) |
|
|
| def forward( |
| self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True |
| ): |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| mixed_x_layer = self.query_key_value(hidden_states) |
|
|
| if self.multi_query_attention: |
| (query_layer, key_layer, value_layer) = mixed_x_layer.split( |
| [ |
| self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, |
| self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, |
| self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, |
| ], |
| dim=-1, |
| ) |
| query_layer = query_layer.view( |
| query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) |
| ) |
| key_layer = key_layer.view( |
| key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) |
| ) |
| value_layer = value_layer.view( |
| value_layer.size()[:-1] |
| + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) |
| ) |
| else: |
| new_tensor_shape = mixed_x_layer.size()[:-1] + \ |
| (self.num_attention_heads_per_partition, |
| 3 * self.hidden_size_per_attention_head) |
| mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) |
|
|
| |
| (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) |
|
|
| |
| query_layer, key_layer, value_layer = [k.transpose(1, 2) for k in [query_layer, key_layer, value_layer]] |
|
|
| |
| if rotary_pos_emb is not None: |
| query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) |
| key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) |
|
|
| |
| if kv_cache is not None: |
| cache_k, cache_v = kv_cache |
| key_layer = torch.cat((cache_k, key_layer), dim=2) |
| value_layer = torch.cat((cache_v, value_layer), dim=2) |
| if use_cache: |
| kv_cache = (key_layer, value_layer) |
| else: |
| kv_cache = None |
|
|
| if self.multi_query_attention: |
| key_layer = key_layer.unsqueeze(2) |
| key_layer = key_layer.expand( |
| -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1 |
| ) |
| key_layer = key_layer.contiguous().view( |
| key_layer.size()[:1] + (self.num_attention_heads_per_partition,) + key_layer.size()[3:] |
| ) |
| value_layer = value_layer.unsqueeze(2) |
| value_layer = value_layer.expand( |
| -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1 |
| ) |
| value_layer = value_layer.contiguous().view( |
| value_layer.size()[:1] + (self.num_attention_heads_per_partition,) + value_layer.size()[3:] |
| ) |
|
|
| |
| |
| |
|
|
| context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) |
|
|
| |
| |
| |
|
|
| output = self.dense(context_layer) |
|
|
| return output, kv_cache |
|
|
|
|
| def _config_to_kwargs(args): |
| common_kwargs = { |
| "dtype": args.torch_dtype, |
| } |
| return common_kwargs |
|
|
|
|
| class MLP(torch.nn.Module): |
| """MLP. |
| |
| MLP will take the input with h hidden state, project it to 4*h |
| hidden dimension, perform nonlinear transformation, and project the |
| state back into h hidden dimension. |
| """ |
|
|
| def __init__(self, config: ChatGLMConfig, device=None): |
| super(MLP, self).__init__() |
|
|
| self.add_bias = config.add_bias_linear |
|
|
| |
| self.dense_h_to_4h = nn.Linear( |
| config.hidden_size, |
| config.ffn_hidden_size * 2, |
| bias=self.add_bias, |
| device=device, |
| **_config_to_kwargs(config) |
| ) |
|
|
| def swiglu(x): |
| x = torch.chunk(x, 2, dim=-1) |
| return F.silu(x[0]) * x[1] |
|
|
| self.activation_func = swiglu |
|
|
| |
| self.dense_4h_to_h = nn.Linear( |
| config.ffn_hidden_size, |
| config.hidden_size, |
| bias=self.add_bias, |
| device=device, |
| **_config_to_kwargs(config) |
| ) |
|
|
| def forward(self, hidden_states): |
| |
| intermediate_parallel = self.dense_h_to_4h(hidden_states) |
| intermediate_parallel = self.activation_func(intermediate_parallel) |
| |
| output = self.dense_4h_to_h(intermediate_parallel) |
| return output |
|
|
|
|
| class GLMBlock(torch.nn.Module): |
| """A single transformer layer. |
| |
| Transformer layer takes input with size [s, b, h] and returns an |
| output of the same size. |
| """ |
|
|
| def __init__(self, config: ChatGLMConfig, layer_number, device=None): |
| super(GLMBlock, self).__init__() |
| self.layer_number = layer_number |
|
|
| self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm |
|
|
| self.fp32_residual_connection = config.fp32_residual_connection |
|
|
| LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm |
| |
| self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, |
| dtype=config.torch_dtype) |
|
|
| |
| self.self_attention = SelfAttention(config, layer_number, device=device) |
| self.hidden_dropout = config.hidden_dropout |
|
|
| |
| self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, |
| dtype=config.torch_dtype) |
|
|
| |
| self.mlp = MLP(config, device=device) |
|
|
| def forward( |
| self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, |
| ): |
| |
|
|
| |
| layernorm_output = self.input_layernorm(hidden_states) |
| |
| attention_output, kv_cache = self.self_attention( |
| layernorm_output, |
| attention_mask, |
| rotary_pos_emb, |
| kv_cache=kv_cache, |
| use_cache=use_cache |
| ) |
|
|
| |
| if self.apply_residual_connection_post_layernorm: |
| residual = layernorm_output |
| else: |
| residual = hidden_states |
|
|
| layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) |
| layernorm_input = residual + layernorm_input |
|
|
| |
| layernorm_output = self.post_attention_layernorm(layernorm_input) |
|
|
| |
| mlp_output = self.mlp(layernorm_output) |
|
|
| |
| if self.apply_residual_connection_post_layernorm: |
| residual = layernorm_output |
| else: |
| residual = layernorm_input |
|
|
| output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) |
| output = residual + output |
|
|
| return output, kv_cache |
|
|
|
|
| class GLMTransformer(torch.nn.Module): |
| """Transformer class.""" |
|
|
| def __init__(self, config: ChatGLMConfig, device=None): |
| super(GLMTransformer, self).__init__() |
|
|
| self.fp32_residual_connection = config.fp32_residual_connection |
| self.post_layer_norm = config.post_layer_norm |
|
|
| |
| self.num_layers = config.num_layers |
|
|
| |
| def build_layer(layer_number): |
| return GLMBlock(config, layer_number, device=device) |
|
|
| self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) |
|
|
| if self.post_layer_norm: |
| LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm |
| |
| self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, |
| dtype=config.torch_dtype) |
|
|
| self.gradient_checkpointing = False |
|
|
| def _get_layer(self, layer_number): |
| return self.layers[layer_number] |
|
|
| def forward( |
| self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, |
| use_cache: Optional[bool] = True, |
| output_hidden_states: Optional[bool] = False, |
| ): |
| if not kv_caches: |
| kv_caches = [None for _ in range(self.num_layers)] |
| presents = () if use_cache else None |
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| all_self_attentions = None |
| all_hidden_states = () if output_hidden_states else None |
| for index in range(self.num_layers): |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| layer = self._get_layer(index) |
| if self.gradient_checkpointing and self.training: |
| layer_ret = torch.utils.checkpoint.checkpoint( |
| layer, |
| hidden_states, |
| attention_mask, |
| rotary_pos_emb, |
| kv_caches[index], |
| use_cache, |
| use_reentrant=False |
| ) |
| else: |
| layer_ret = layer( |
| hidden_states, |
| attention_mask, |
| rotary_pos_emb, |
| kv_cache=kv_caches[index], |
| use_cache=use_cache |
| ) |
| hidden_states, kv_cache = layer_ret |
| if use_cache: |
| presents = presents + (kv_cache,) |
|
|
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| |
| if self.post_layer_norm: |
| hidden_states = self.final_layernorm(hidden_states) |
|
|
| return hidden_states, presents, all_hidden_states, all_self_attentions |
|
|
|
|
| class ChatGLMPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and |
| a simple interface for downloading and loading pretrained models. |
| """ |
|
|
| is_parallelizable = False |
| supports_gradient_checkpointing = True |
| config_class = ChatGLMConfig |
| base_model_prefix = "transformer" |
| _no_split_modules = ["GLMBlock"] |
| _supports_flash_attn_2 = True |
| _supports_sdpa = True |
|
|
| def _init_weights(self, module: nn.Module): |
| """Initialize the weights.""" |
| return |
|
|
| def get_masks(self, input_embeds, past_key_values, padding_mask=None): |
| batch_size, seq_length, embed_size = input_embeds.shape |
| full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_embeds.device) |
| full_attention_mask.tril_() |
| past_length = 0 |
| if past_key_values: |
| past_length = past_key_values[0][0].shape[2] |
| if past_length: |
| full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length, |
| device=input_embeds.device), full_attention_mask), dim=-1) |
| if padding_mask is not None: |
| full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) |
| if not past_length and padding_mask is not None: |
| full_attention_mask -= padding_mask.unsqueeze(-1) - 1 |
| full_attention_mask = (full_attention_mask < 0.5).bool() |
| full_attention_mask.unsqueeze_(1) |
| return full_attention_mask |
|
|
| def get_position_ids(self, input_ids, device): |
| batch_size, seq_length = input_ids.shape |
| position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) |
| return position_ids |
|
|
| def get_multimodal_position_ids(self, input_ids, device): |
| batch_size, seq_length = input_ids.shape |
| position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) |
|
|
| class Embedding(torch.nn.Module): |
| """Language model embeddings.""" |
|
|
| def __init__(self, config: ChatGLMConfig, device=None): |
| super(Embedding, self).__init__() |
|
|
| self.hidden_size = config.hidden_size |
| |
| self.word_embeddings = nn.Embedding( |
| config.padded_vocab_size, |
| self.hidden_size, |
| dtype=config.torch_dtype, |
| device=device |
| ) |
| self.fp32_residual_connection = config.fp32_residual_connection |
|
|
| def forward(self, input_ids): |
| |
| words_embeddings = self.word_embeddings(input_ids) |
| embeddings = words_embeddings |
| |
| if self.fp32_residual_connection: |
| embeddings = embeddings.float() |
| return embeddings |
|
|
|
|
| def is_empty(images_list: Optional[List[List[torch.Tensor]]]): |
| if images_list is None or len(images_list) == 0: |
| return True |
| for image_list in images_list: |
| if image_list is not None: |
| return False |
| return True |
|
|
|
|
| class ChatGLMModel(ChatGLMPreTrainedModel): |
| def __init__(self, config: ChatGLMConfig, device=None, empty_init=True): |
| super().__init__(config) |
| if empty_init: |
| init_method = skip_init |
| else: |
| init_method = default_init |
| init_kwargs = {} |
| if device is not None: |
| init_kwargs["device"] = device |
| self.embedding = init_method(Embedding, config, **init_kwargs) |
| self.num_layers = config.num_layers |
| self.multi_query_group_num = config.multi_query_group_num |
| self.kv_channels = config.kv_channels |
|
|
| |
| self.seq_length = config.seq_length |
| rotary_dim = ( |
| config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels |
| ) |
|
|
| self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, rope_ratio=config.rope_ratio, |
| original_impl=config.original_rope, |
| device=device, dtype=config.torch_dtype) |
| self.encoder = init_method(GLMTransformer, config, **init_kwargs) |
| self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False, |
| dtype=config.torch_dtype, **init_kwargs) |
| self.pre_seq_len = config.pre_seq_len |
| self.prefix_projection = config.prefix_projection |
| if self.pre_seq_len is not None: |
| for param in self.parameters(): |
| param.requires_grad = False |
| self.prefix_tokens = torch.arange(self.pre_seq_len).long() |
| self.prefix_encoder = PrefixEncoder(config) |
| self.dropout = torch.nn.Dropout(0.1) |
|
|
| self.vision = EVA2CLIPModel(config) |
|
|
| def get_input_embeddings(self): |
| return self.embedding.word_embeddings |
|
|
| def set_input_embeddings(self, value): |
| self.embedding.word_embeddings = value |
|
|
| def get_prompt(self, batch_size, device, dtype=torch.half): |
| prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) |
| past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) |
| past_key_values = past_key_values.view( |
| batch_size, |
| self.pre_seq_len, |
| self.pre_seq_len, |
| self.num_layers * 2, |
| self.multi_query_group_num, |
| self.kv_channels |
| ) |
| |
| past_key_values = self.dropout(past_key_values) |
| past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) |
| return past_key_values |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| images: torch.Tensor = None, |
| position_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.BoolTensor] = None, |
| full_attention_mask: Optional[torch.BoolTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, |
| inputs_embeds: Optional[torch.Tensor] = None, |
| use_cache: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPast]: |
| """take care of image_encode, position_ids and (attention_mask = None is fine)""" |
|
|
| |
| if past_key_values is None: |
| |
| assert input_ids is not None and inputs_embeds is None, f"{input_ids} {inputs_embeds}" |
| if not is_empty(images): |
| image_size: int = self.config.vision_config['image_size'] |
| patch_size: int = self.config.vision_config['patch_size'] |
| num_patches = (image_size // patch_size // 2) ** 2 |
| assert len(input_ids) == len(images), f"{len(input_ids)} {len(images)}" |
| inputs_embeds = self.embedding(input_ids) |
|
|
| images = images.to(dtype=inputs_embeds.dtype) |
| images_features = self.vision(images) |
|
|
| if position_ids is None: |
| position_ids = self.get_position_ids(input_ids, device=inputs_embeds.device) |
| new_input_embeds, new_position_ids = [], [] |
|
|
| for i in range(len(input_ids)): |
| input_id = input_ids[i].tolist() |
| boi_token_pos, eoi_token_pos = input_id.index(self.config.boi_token_id), input_id.index( |
| self.config.eoi_token_id) |
| assert eoi_token_pos - boi_token_pos == 2 |
| new_input_embeds.append(torch.cat( |
| (inputs_embeds[i, :boi_token_pos], images_features[i].to(inputs_embeds.device), |
| inputs_embeds[i, eoi_token_pos + 1:]))) |
| new_position_ids.append(torch.cat( |
| (position_ids[i, :boi_token_pos + 1], position_ids[i, boi_token_pos + 1].repeat(num_patches), |
| position_ids[i, eoi_token_pos:]) |
| )) |
| inputs_embeds = torch.stack(new_input_embeds, dim=0) |
| position_ids = torch.stack(new_position_ids, dim=0) |
|
|
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| batch_size, seq_length = input_ids.shape |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embedding(input_ids) |
|
|
| if self.pre_seq_len is not None: |
| if past_key_values is None: |
| past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device, |
| dtype=inputs_embeds.dtype) |
| if attention_mask is not None: |
| attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)), |
| attention_mask], dim=-1) |
|
|
| if full_attention_mask is None: |
| if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): |
| if self.training: |
| |
| new_input_ids, new_attention_mask = [], [] |
| for i in range(len(input_ids)): |
| input_id = input_ids[i].tolist() |
| boi_token_pos, eoi_token_pos = input_id.index(self.config.boi_token_id), input_id.index(self.config.eoi_token_id) |
| assert eoi_token_pos - boi_token_pos == 2 |
|
|
| new_attention_mask.append(torch.cat( |
| (attention_mask[i, :boi_token_pos + 1], torch.ones(num_patches).to(attention_mask.device), |
| attention_mask[i, eoi_token_pos:]))) |
|
|
| new_input_ids.append(torch.cat( |
| (input_ids[i, :boi_token_pos + 1], input_ids[i, -1].repeat(num_patches), |
| input_ids[i, eoi_token_pos:]))) |
|
|
| attention_mask = torch.stack(new_attention_mask, dim=0) |
| input_ids = torch.stack(new_input_ids, dim=0) |
| inputs_embeds = self.embedding(input_ids) |
|
|
| full_attention_mask = self.get_masks(inputs_embeds, past_key_values, padding_mask=attention_mask) |
|
|
| |
| rotary_pos_emb = self.rotary_pos_emb(self.seq_length) |
|
|
| if position_ids is not None: |
| rotary_pos_emb = rotary_pos_emb[position_ids] |
| else: |
| rotary_pos_emb = rotary_pos_emb[None, :seq_length] |
|
|
| |
| hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( |
| inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, |
| kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states |
| ) |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) |
|
|
| return BaseModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=presents, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| ) |
|
|
|
|
| def _history_to_prompt(history, query): |
| prompt = '' |
| flag = False |
| for i, (old_query, response) in enumerate(history): |
| prompt += ('<|user|>' if flag else '') + old_query + "<|assistant|>" + response + "<|endoftext|>" |
| flag = True |
| prompt += '{}{}<|assistant|>'.format('<|user|>' if flag else '', query) |
| return prompt |
|
|
|
|
| class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): |
| def __init__(self, config: ChatGLMConfig, empty_init=True, device=None): |
| super().__init__(config) |
|
|
| self.max_sequence_length = config.max_length |
| self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device) |
| self.config = config |
|
|
| def _update_model_kwargs_for_generation( |
| self, |
| outputs: ModelOutput, |
| model_kwargs: Dict[str, Any], |
| is_encoder_decoder: bool = False, |
| ) -> Dict[str, Any]: |
| |
| cache_name, cache = self._extract_past_from_model_output(outputs) |
| model_kwargs[cache_name] = cache |
|
|
| |
| if "attention_mask" in model_kwargs: |
| attention_mask = model_kwargs["attention_mask"] |
| model_kwargs["attention_mask"] = torch.cat( |
| [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 |
| ) |
|
|
| |
| if "position_ids" in model_kwargs: |
| position_ids = model_kwargs["position_ids"] |
| new_position_id = position_ids[..., -1:].clone() |
| new_position_id += 1 |
| model_kwargs["position_ids"] = torch.cat( |
| [position_ids, new_position_id], dim=-1 |
| ) |
|
|
| model_kwargs["is_first_forward"] = False |
| return model_kwargs |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids: torch.LongTensor, |
| images: Optional[torch.Tensor] = None, |
| past_key_values: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| use_cache: Optional[bool] = None, |
| is_first_forward: bool = True, |
| **kwargs |
| ) -> dict: |
| |
| if position_ids is None: |
| position_ids = self.get_position_ids(input_ids, device=input_ids.device) |
| if attention_mask is not None: |
| image_size: int = self.config.vision_config['image_size'] |
| patch_size: int = self.config.vision_config['patch_size'] |
| num_patches = (image_size // patch_size // 2) ** 2 |
| new_attention_masks = [] |
|
|
| |
| eoi_token_pos = 6 |
| boi_token_pos = 4 |
|
|
| for i in range(len(input_ids)): |
| input_id = input_ids[i].tolist() |
| if not is_empty(images): |
| boi_token_pos, eoi_token_pos = input_id.index(self.config.boi_token_id), input_id.index( |
| self.config.eoi_token_id) |
| assert eoi_token_pos - boi_token_pos == 2 |
| new_attention_masks.append(torch.cat( |
| (attention_mask[i, :boi_token_pos + 1], attention_mask.new_ones(num_patches), |
| attention_mask[i, eoi_token_pos:]) |
| )) |
| attention_mask = torch.stack(new_attention_masks, dim=0) |
| if not is_first_forward: |
| if past_key_values is not None: |
| position_ids = position_ids[..., -1:] |
| input_ids = input_ids[:, -1:] |
| return { |
| "input_ids": input_ids, |
| "images": images, |
| "past_key_values": past_key_values, |
| "position_ids": position_ids, |
| "attention_mask": attention_mask, |
| "return_last_logit": True, |
| "use_cache": use_cache |
| } |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| images: List[List[torch.Tensor]] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[Tuple[torch.FloatTensor]] = None, |
| inputs_embeds: Optional[torch.Tensor] = None, |
| labels: Optional[torch.Tensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| return_last_logit: Optional[bool] = False, |
| ): |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.transformer( |
| input_ids=input_ids, |
| images=images, |
| position_ids=position_ids, |
| attention_mask=attention_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = transformer_outputs[0] |
| if return_last_logit: |
| hidden_states = hidden_states[:, -1:] |
| lm_logits = self.transformer.output_layer(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| new_labels = [] |
| for i in range(len(input_ids)): |
| input_id = input_ids[i].tolist() |
| boi_token_pos, eoi_token_pos = input_id.index(self.config.boi_token_id), input_id.index( |
| self.config.eoi_token_id) |
| assert eoi_token_pos - boi_token_pos == 2 |
|
|
| new_labels.append(torch.cat( |
| ( |
| labels[i, :boi_token_pos + 1], |
| torch.tensor([-100]).to(labels.device).to(labels.dtype).repeat(1600), |
| labels[i, eoi_token_pos:]))) |
|
|
| labels = torch.stack(new_labels, dim=0) |
| lm_logits = lm_logits.to(torch.float32) |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| loss_fct = CrossEntropyLoss(ignore_index=-100) |
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
|
|
| lm_logits = lm_logits.to(hidden_states.dtype) |
| loss = loss.to(hidden_states.dtype) |
|
|
| if not return_dict: |
| output = (lm_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CausalLMOutputWithPast( |
| loss=loss, |
| logits=lm_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
| @staticmethod |
| def _reorder_cache( |
| past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor |
| ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: |
| """ |
| This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| beam_idx at every generation step. |
| |
| Output shares the same memory storage as `past`. |
| """ |
| return tuple( |
| ( |
| layer_past[0].index_select(0, beam_idx.to(layer_past[0].device)), |
| layer_past[1].index_select(0, beam_idx.to(layer_past[1].device)), |
| ) |
| for layer_past in past |
| ) |
|
|
| class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel): |
| def __init__(self, config: ChatGLMConfig, empty_init=True, device=None): |
| super().__init__(config) |
|
|
| self.num_labels = config.num_labels |
| self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device) |
|
|
| self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=torch.half) |
| if config.classifier_dropout is not None: |
| self.dropout = nn.Dropout(config.classifier_dropout) |
| else: |
| self.dropout = None |
| self.config = config |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| full_attention_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, |
| inputs_embeds: Optional[torch.LongTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]: |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.transformer( |
| input_ids=input_ids, |
| position_ids=position_ids, |
| attention_mask=attention_mask, |
| full_attention_mask=full_attention_mask, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = transformer_outputs[0] |
| pooled_hidden_states = hidden_states[-1] |
| if self.dropout is not None: |
| pooled_hidden_states = self.dropout(pooled_hidden_states) |
| logits = self.classifier_head(pooled_hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(logits.squeeze().float(), labels.squeeze()) |
| else: |
| loss = loss_fct(logits.float(), labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1)) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(logits.float(), labels.view(-1, self.num_labels)) |
|
|
| if not return_dict: |
| output = (logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutputWithPast( |
| loss=loss, |
| logits=logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|