Skip to content

Commit 29471df

Browse files
authored
Enhance load_empty_model import (#1930)
Signed-off-by: Kaihui-intel <[email protected]>
1 parent fd96851 commit 29471df

File tree

3 files changed

+3
-2
lines changed

3 files changed

+3
-2
lines changed

neural_compressor/torch/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,4 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
from .utils import load_empty_model

test/3x/torch/quantization/weight_only/test_gptq.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ def test_layer_wise(self):
183183
model = convert(model)
184184
q_label = model(self.example_inputs)[0]
185185

186-
from neural_compressor.torch.utils import load_empty_model
186+
from neural_compressor.torch import load_empty_model
187187

188188
model = load_empty_model("hf-internal-testing/tiny-random-GPTJForCausalLM")
189189

test/3x/torch/quantization/weight_only/test_rtn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def test_quant_lm_head(self):
167167
), "The tied lm_head weight is not deep copied, please check!"
168168

169169
def test_layer_wise(self):
170-
from neural_compressor.torch.utils import load_empty_model
170+
from neural_compressor.torch import load_empty_model
171171

172172
model = load_empty_model("hf-internal-testing/tiny-random-GPTJForCausalLM")
173173
quant_config = RTNConfig(

0 commit comments

Comments
 (0)