File tree Expand file tree Collapse file tree 3 files changed +3
-2
lines changed
test/3x/torch/quantization/weight_only Expand file tree Collapse file tree 3 files changed +3
-2
lines changed Original file line number Diff line number Diff line change 11
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
# See the License for the specific language governing permissions and
13
13
# limitations under the License.
14
+ from .utils import load_empty_model
Original file line number Diff line number Diff line change @@ -183,7 +183,7 @@ def test_layer_wise(self):
183
183
model = convert (model )
184
184
q_label = model (self .example_inputs )[0 ]
185
185
186
- from neural_compressor .torch . utils import load_empty_model
186
+ from neural_compressor .torch import load_empty_model
187
187
188
188
model = load_empty_model ("hf-internal-testing/tiny-random-GPTJForCausalLM" )
189
189
Original file line number Diff line number Diff line change @@ -167,7 +167,7 @@ def test_quant_lm_head(self):
167
167
), "The tied lm_head weight is not deep copied, please check!"
168
168
169
169
def test_layer_wise (self ):
170
- from neural_compressor .torch . utils import load_empty_model
170
+ from neural_compressor .torch import load_empty_model
171
171
172
172
model = load_empty_model ("hf-internal-testing/tiny-random-GPTJForCausalLM" )
173
173
quant_config = RTNConfig (
You can’t perform that action at this time.
0 commit comments