@@ -348,47 +348,3 @@ TEST(Converters, ATenAnyDimNegIndexConvertsCorrectly) {
348348 auto in = at::randint (-2 , 2 , {2 , 32 }, at::kCUDA );
349349 test_body (graph, in);
350350}
351-
352- TEST (Converters, ATenAllDimConvertsCorrectly) {
353- const auto graph = R"IR(
354- graph(%0 : Tensor):
355- %1 : int = prim::Constant[value=-1]()
356- %3 : bool = prim::Constant[value=0]()
357- %5 : Tensor = aten::all(%0, %1, %3)
358- return (%5))IR" ;
359- auto in = at::randint (0 , 2 , {64 , 2 }, at::kCUDA );
360- test_body (graph, in);
361- }
362-
363- TEST (Converters, ATenAllDimKeepDimConvertsCorrectly) {
364- const auto graph = R"IR(
365- graph(%0 : Tensor):
366- %1 : int = prim::Constant[value=0]()
367- %3 : bool = prim::Constant[value=1]()
368- %5 : Tensor = aten::all(%0, %1, %3)
369- return (%5))IR" ;
370- auto in = at::randint (-2 , 2 , {2 , 32 }, at::kCUDA ).to (torch::kBool );
371- test_body (graph, in);
372- }
373-
374- TEST (Converters, ATenAllDimAllTrueConvertsCorrectly) {
375- const auto graph = R"IR(
376- graph(%0 : Tensor):
377- %1 : int = prim::Constant[value=1]()
378- %3 : bool = prim::Constant[value=0]()
379- %5 : Tensor = aten::all(%0, %1, %3)
380- return (%5))IR" ;
381- auto in = at::ones ({2 , 32 }, at::kCUDA );
382- test_body (graph, in);
383- }
384-
385- TEST (Converters, ATenAllDimDynamicConvertsCorrectly) {
386- const auto graph = R"IR(
387- graph(%0 : Tensor):
388- %1 : int = prim::Constant[value=-1]()
389- %3 : bool = prim::Constant[value=0]()
390- %5 : Tensor = aten::all(%0, %1, %3)
391- return (%5))IR" ;
392- auto in = at::randint (0 , 2 , {64 , 2 }, at::kCUDA ).to (torch::kHalf );
393- test_body (graph, in, true );
394- }
0 commit comments