|
3 | 3 | # |
4 | 4 | # This source code is licensed under the terms described in the LICENSE file in |
5 | 5 | # the root directory of this source tree. |
6 | | - |
7 | | -import logging |
8 | 6 | import math |
9 | 7 | from collections.abc import Callable |
10 | 8 | from functools import partial |
|
22 | 20 | from torch import Tensor, nn |
23 | 21 | from torch.distributed import _functional_collectives as funcol |
24 | 22 |
|
| 23 | +from llama_stack.log import get_logger |
| 24 | + |
25 | 25 | from ..model import ModelArgs, RMSNorm, apply_rotary_emb, precompute_freqs_cis |
26 | 26 | from .encoder_utils import ( |
27 | 27 | build_encoder_attention_mask, |
|
34 | 34 | from .image_transform import VariableSizeImageTransform |
35 | 35 | from .utils import get_negative_inf_value, to_2tuple |
36 | 36 |
|
37 | | -logger = logging.getLogger(__name__) |
38 | 37 | MP_SCALE = 8 |
39 | 38 |
|
| 39 | +logger = get_logger(name=__name__, category="models") |
| 40 | + |
40 | 41 |
|
41 | 42 | def reduce_from_tensor_model_parallel_region(input_): |
42 | 43 | """All-reduce the input tensor across model parallel group.""" |
@@ -771,7 +772,7 @@ def load_hook( |
771 | 772 | if embed is not None: |
772 | 773 | # reshape the weights to the correct shape |
773 | 774 | nt_old, nt_old, _, w = embed.shape |
774 | | - logging.info(f"Resizing tile embedding from {nt_old}x{nt_old} to {self.num_tiles}x{self.num_tiles}") |
| 775 | + logger.info(f"Resizing tile embedding from {nt_old}x{nt_old} to {self.num_tiles}x{self.num_tiles}") |
775 | 776 | embed_new = TilePositionEmbedding._dynamic_resize(embed, self.num_tiles) |
776 | 777 | # assign the weights to the module |
777 | 778 | state_dict[prefix + "embedding"] = embed_new |
|
0 commit comments