|
| 1 | +# Copyright 2024 The HuggingFace Team. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from ..utils import is_torch_available |
| 16 | + |
| 17 | + |
| 18 | +if is_torch_available(): |
| 19 | + import torch |
| 20 | + |
| 21 | + |
| 22 | +def replace_with_quanto_layers( |
| 23 | + model, |
| 24 | + quantization_config=None, |
| 25 | + modules_to_not_convert=None, |
| 26 | + current_key_name=None, |
| 27 | + has_been_replaced=False, |
| 28 | +): |
| 29 | + """ |
| 30 | + Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers. |
| 31 | + Returns the converted model and a boolean that indicates if the conversion has been successfull or not. |
| 32 | +
|
| 33 | + Args: |
| 34 | + model (`torch.nn.Module`): |
| 35 | + The model to convert, can be any `torch.nn.Module` instance. |
| 36 | + quantization_config (`AqlmConfig`, defaults to `None`): |
| 37 | + The quantization config object that contains the quantization parameters. |
| 38 | + modules_to_not_convert (`list`, *optional*, defaults to `None`): |
| 39 | + A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be |
| 40 | + converted. |
| 41 | + current_key_name (`list`, *optional*, defaults to `None`): |
| 42 | + A list that contains the current key name. This is used for recursion and should not be passed by the user. |
| 43 | + has_been_replaced (`bool`, *optional*, defaults to `None`): |
| 44 | + A boolean that indicates if the conversion has been successful or not. This is used for recursion and |
| 45 | + should not be passed by the user. |
| 46 | + """ |
| 47 | + from accelerate import init_empty_weights |
| 48 | + from quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8 |
| 49 | + |
| 50 | + w_mapping = {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2} |
| 51 | + a_mapping = {None: None, "float8": qfloat8, "int8": qint8} |
| 52 | + |
| 53 | + if modules_to_not_convert is None: |
| 54 | + modules_to_not_convert = [] |
| 55 | + |
| 56 | + for name, module in model.named_children(): |
| 57 | + if current_key_name is None: |
| 58 | + current_key_name = [] |
| 59 | + current_key_name.append(name) |
| 60 | + |
| 61 | + if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): |
| 62 | + with init_empty_weights(): |
| 63 | + if isinstance(module, torch.nn.Linear): |
| 64 | + model._modules[name] = QLinear( |
| 65 | + in_features=module.in_features, |
| 66 | + out_features=module.out_features, |
| 67 | + bias=module.bias is not None, |
| 68 | + dtype=module.weight.dtype, |
| 69 | + weights=w_mapping[quantization_config.weights], |
| 70 | + activations=a_mapping[quantization_config.activations], |
| 71 | + ) |
| 72 | + model._modules[name].requires_grad_(False) |
| 73 | + has_been_replaced = True |
| 74 | + elif isinstance(module, torch.nn.LayerNorm): |
| 75 | + if quantization_config.activations is not None: |
| 76 | + model._modules[name] = QLayerNorm( |
| 77 | + module.normalized_shape, |
| 78 | + module.eps, |
| 79 | + module.elementwise_affine, |
| 80 | + module.bias is not None, |
| 81 | + activations=a_mapping[quantization_config.activations], |
| 82 | + ) |
| 83 | + has_been_replaced = True |
| 84 | + if len(list(module.children())) > 0: |
| 85 | + _, has_been_replaced = replace_with_quanto_layers( |
| 86 | + module, |
| 87 | + quantization_config=quantization_config, |
| 88 | + modules_to_not_convert=modules_to_not_convert, |
| 89 | + current_key_name=current_key_name, |
| 90 | + has_been_replaced=has_been_replaced, |
| 91 | + ) |
| 92 | + # Remove the last key for recursion |
| 93 | + current_key_name.pop(-1) |
| 94 | + return model, has_been_replaced |
0 commit comments