-
Notifications
You must be signed in to change notification settings - Fork 277
/
Copy pathdistil_bert_backbone.py
172 lines (156 loc) · 6.29 KB
/
distil_bert_backbone.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import keras
from keras_hub.src.api_export import keras_hub_export
from keras_hub.src.layers.modeling.token_and_position_embedding import (
TokenAndPositionEmbedding,
)
from keras_hub.src.layers.modeling.transformer_encoder import TransformerEncoder
from keras_hub.src.models.backbone import Backbone
def distilbert_kernel_initializer(stddev=0.02):
return keras.initializers.TruncatedNormal(stddev=stddev)
@keras_hub_export("keras_hub.models.DistilBertBackbone")
class DistilBertBackbone(Backbone):
"""A DistilBERT encoder network.
This network implements a bi-directional Transformer-based encoder as
described in ["DistilBERT, a distilled version of BERT: smaller, faster,
cheaper and lighter"](https://arxiv.org/abs/1910.01108). It includes the
embedding lookups and transformer layers, but not the masked language model
or classification task networks.
The default constructor gives a fully customizable, randomly initialized
DistilBERT encoder with any number of layers, heads, and embedding
dimensions. To load preset architectures and weights, use the
`from_preset()` constructor.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind. The underlying model is provided by a
third party and subject to a separate license, available
[here](https://github.com/huggingface/transformers).
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of transformer layers.
num_heads: int. The number of attention heads for each transformer.
The hidden size must be divisible by the number of attention heads.
hidden_dim: int. The size of the transformer encoding and pooler layers.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each transformer.
dropout: float. Dropout probability for the Transformer encoder.
max_sequence_length: int. The maximum sequence length that this encoder
can consume. If None, `max_sequence_length` uses the value from
sequence length. This determines the variable shape for positional
embeddings.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
Examples:
```python
input_data = {
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
}
# Pretrained DistilBERT encoder.
model = keras_hub.models.DistilBertBackbone.from_preset(
"distil_bert_base_en_uncased"
)
model(input_data)
# Randomly initialized DistilBERT encoder with custom config.
model = keras_hub.models.DistilBertBackbone(
vocabulary_size=30552,
num_layers=4,
num_heads=4,
hidden_dim=256,
intermediate_dim=512,
max_sequence_length=128,
)
model(input_data)
```
"""
def __init__(
self,
vocabulary_size,
num_layers,
num_heads,
hidden_dim,
intermediate_dim,
dropout=0.1,
max_sequence_length=512,
dtype=None,
**kwargs,
):
# === Layers ===
self.embeddings = TokenAndPositionEmbedding(
vocabulary_size=vocabulary_size,
sequence_length=max_sequence_length,
embedding_dim=hidden_dim,
embeddings_initializer=distilbert_kernel_initializer(),
dtype=dtype,
name="token_and_position_embedding",
)
# Keep the token_embedding property for consistency across models.
self.token_embedding = self.embeddings.token_embedding
self.embeddings_layer_norm = keras.layers.LayerNormalization(
axis=-1,
epsilon=1e-12,
dtype=dtype,
name="embeddings_layer_norm",
)
self.embeddings_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="embeddings_dropout",
)
self.transformer_layers = []
for i in range(num_layers):
layer = TransformerEncoder(
num_heads=num_heads,
intermediate_dim=intermediate_dim,
activation="gelu",
dropout=dropout,
layer_norm_epsilon=1e-12,
kernel_initializer=distilbert_kernel_initializer(),
dtype=dtype,
name=f"transformer_layer_{i}",
)
self.transformer_layers.append(layer)
# === Functional Model ===
token_id_input = keras.Input(
shape=(None,), dtype="int32", name="token_ids"
)
padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="padding_mask"
)
x = self.embeddings(token_id_input)
x = self.embeddings_layer_norm(x)
x = self.embeddings_dropout(x)
for transformer_layer in self.transformer_layers:
x = transformer_layer(x, padding_mask=padding_mask_input)
super().__init__(
inputs={
"token_ids": token_id_input,
"padding_mask": padding_mask_input,
},
outputs=x,
dtype=dtype,
**kwargs,
)
# === Config ===
self.vocabulary_size = vocabulary_size
self.num_layers = num_layers
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.max_sequence_length = max_sequence_length
self.cls_token_index = 0
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"num_layers": self.num_layers,
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"max_sequence_length": self.max_sequence_length,
}
)
return config