@@ -1025,7 +1025,7 @@ static void translateUnPackMask(CallInst &CI) {
10251025 CI.replaceAllUsesWith (TransCI);
10261026}
10271027
1028- static bool translateVLoad (CallInst &CI, SmallPtrSet <Type *, 4 > &GVTS) {
1028+ static bool translateVLoad (CallInst &CI, SmallPtrSetImpl <Type *> &GVTS) {
10291029 if (GVTS.find (CI.getType ()) != GVTS.end ())
10301030 return false ;
10311031 IRBuilder<> Builder (&CI);
@@ -1035,7 +1035,7 @@ static bool translateVLoad(CallInst &CI, SmallPtrSet<Type *, 4> &GVTS) {
10351035 return true ;
10361036}
10371037
1038- static bool translateVStore (CallInst &CI, SmallPtrSet <Type *, 4 > &GVTS) {
1038+ static bool translateVStore (CallInst &CI, SmallPtrSetImpl <Type *> &GVTS) {
10391039 if (GVTS.find (CI.getOperand (1 )->getType ()) != GVTS.end ())
10401040 return false ;
10411041 IRBuilder<> Builder (&CI);
@@ -1728,7 +1728,7 @@ SmallPtrSet<Type *, 4> collectGenXVolatileTypes(Module &M) {
17281728// of the simd object operations, but in some cases clang can implicitly
17291729// insert stores, such as after a write in inline assembly. To handle that
17301730// case, lower any stores of genx_volatiles into vstores.
1731- void lowerGlobalStores (Module &M, const SmallPtrSet <Type *, 4 > &GVTS) {
1731+ void lowerGlobalStores (Module &M, const SmallPtrSetImpl <Type *> &GVTS) {
17321732 SmallVector<Instruction *, 4 > ToErase;
17331733 for (auto &F : M.functions ()) {
17341734 for (Instruction &I : instructions (F)) {
@@ -1781,7 +1781,7 @@ PreservedAnalyses SYCLLowerESIMDPass::run(Module &M, ModuleAnalysisManager &) {
17811781}
17821782
17831783size_t SYCLLowerESIMDPass::runOnFunction (Function &F,
1784- SmallPtrSet <Type *, 4 > &GVTS) {
1784+ SmallPtrSetImpl <Type *> &GVTS) {
17851785 // There is a current limitation of GPU vector backend that requires kernel
17861786 // functions to be inlined into the kernel itself. To overcome this
17871787 // limitation, mark every function called from ESIMD kernel with
0 commit comments