@@ -99,7 +99,7 @@ static Value getOffsetForBitwidth(Location loc, OpFoldResult srcIdx,
99
99
affine::makeComposedFoldedAffineApply (builder, loc, offsetExpr, {srcIdx});
100
100
Value bitOffset = getValueOrCreateConstantIndexOp (builder, loc, offsetVal);
101
101
IntegerType dstType = builder.getIntegerType (targetBits);
102
- return builder. create < arith::IndexCastOp>( loc, dstType, bitOffset);
102
+ return arith::IndexCastOp::create (builder, loc, dstType, bitOffset);
103
103
}
104
104
105
105
// / When writing a subbyte size, masked bitwise operations are used to only
@@ -112,14 +112,14 @@ static Value getSubByteWriteMask(Location loc, OpFoldResult linearizedIndices,
112
112
auto dstIntegerType = builder.getIntegerType (dstBits);
113
113
auto maskRightAlignedAttr =
114
114
builder.getIntegerAttr (dstIntegerType, (1 << srcBits) - 1 );
115
- Value maskRightAligned = builder. create < arith::ConstantOp> (
116
- loc, dstIntegerType, maskRightAlignedAttr);
115
+ Value maskRightAligned = arith::ConstantOp::create (
116
+ builder, loc, dstIntegerType, maskRightAlignedAttr);
117
117
Value writeMaskInverse =
118
- builder. create < arith::ShLIOp>( loc, maskRightAligned, bitwidthOffset);
118
+ arith::ShLIOp::create (builder, loc, maskRightAligned, bitwidthOffset);
119
119
auto flipValAttr = builder.getIntegerAttr (dstIntegerType, -1 );
120
120
Value flipVal =
121
- builder. create < arith::ConstantOp>( loc, dstIntegerType, flipValAttr);
122
- return builder. create < arith::XOrIOp>( loc, writeMaskInverse, flipVal);
121
+ arith::ConstantOp::create (builder, loc, dstIntegerType, flipValAttr);
122
+ return arith::XOrIOp::create (builder, loc, writeMaskInverse, flipVal);
123
123
}
124
124
125
125
// / Returns the scaled linearized index based on the `srcBits` and `dstBits`
@@ -141,7 +141,7 @@ getLinearizedSrcIndices(OpBuilder &builder, Location loc, int64_t srcBits,
141
141
const SmallVector<OpFoldResult> &indices,
142
142
Value memref) {
143
143
auto stridedMetadata =
144
- builder. create < memref::ExtractStridedMetadataOp>( loc, memref);
144
+ memref::ExtractStridedMetadataOp::create (builder, loc, memref);
145
145
OpFoldResult linearizedIndices;
146
146
std::tie (std::ignore, linearizedIndices) =
147
147
memref::getLinearizedMemRefOffsetAndSize (
@@ -298,24 +298,24 @@ struct ConvertMemRefLoad final : OpConversionPattern<memref::LoadOp> {
298
298
// Special case 0-rank memref loads.
299
299
Value bitsLoad;
300
300
if (convertedType.getRank () == 0 ) {
301
- bitsLoad = rewriter. create < memref::LoadOp>( loc, adaptor.getMemref (),
302
- ValueRange{});
301
+ bitsLoad = memref::LoadOp::create (rewriter, loc, adaptor.getMemref (),
302
+ ValueRange{});
303
303
} else {
304
304
// Linearize the indices of the original load instruction. Do not account
305
305
// for the scaling yet. This will be accounted for later.
306
306
OpFoldResult linearizedIndices = getLinearizedSrcIndices (
307
307
rewriter, loc, srcBits, adaptor.getIndices (), op.getMemRef ());
308
308
309
- Value newLoad = rewriter. create < memref::LoadOp> (
310
- loc, adaptor.getMemref (),
309
+ Value newLoad = memref::LoadOp::create (
310
+ rewriter, loc, adaptor.getMemref (),
311
311
getIndicesForLoadOrStore (rewriter, loc, linearizedIndices, srcBits,
312
312
dstBits));
313
313
314
314
// Get the offset and shift the bits to the rightmost.
315
315
// Note, currently only the big-endian is supported.
316
316
Value bitwidthOffset = getOffsetForBitwidth (loc, linearizedIndices,
317
317
srcBits, dstBits, rewriter);
318
- bitsLoad = rewriter. create < arith::ShRSIOp>( loc, newLoad, bitwidthOffset);
318
+ bitsLoad = arith::ShRSIOp::create (rewriter, loc, newLoad, bitwidthOffset);
319
319
}
320
320
321
321
// Get the corresponding bits. If the arith computation bitwidth equals
@@ -331,17 +331,17 @@ struct ConvertMemRefLoad final : OpConversionPattern<memref::LoadOp> {
331
331
: IntegerType::get (rewriter.getContext (),
332
332
resultTy.getIntOrFloatBitWidth ());
333
333
if (conversionTy == convertedElementType) {
334
- auto mask = rewriter. create < arith::ConstantOp> (
335
- loc, convertedElementType,
334
+ auto mask = arith::ConstantOp::create (
335
+ rewriter, loc, convertedElementType,
336
336
rewriter.getIntegerAttr (convertedElementType, (1 << srcBits) - 1 ));
337
337
338
- result = rewriter. create < arith::AndIOp>( loc, bitsLoad, mask);
338
+ result = arith::AndIOp::create (rewriter, loc, bitsLoad, mask);
339
339
} else {
340
- result = rewriter. create < arith::TruncIOp>( loc, conversionTy, bitsLoad);
340
+ result = arith::TruncIOp::create (rewriter, loc, conversionTy, bitsLoad);
341
341
}
342
342
343
343
if (conversionTy != resultTy) {
344
- result = rewriter. create < arith::BitcastOp>( loc, resultTy, result);
344
+ result = arith::BitcastOp::create (rewriter, loc, resultTy, result);
345
345
}
346
346
347
347
rewriter.replaceOp (op, result);
@@ -428,20 +428,20 @@ struct ConvertMemrefStore final : OpConversionPattern<memref::StoreOp> {
428
428
// Pad the input value with 0s on the left.
429
429
Value input = adaptor.getValue ();
430
430
if (!input.getType ().isInteger ()) {
431
- input = rewriter. create < arith::BitcastOp> (
432
- loc,
431
+ input = arith::BitcastOp::create (
432
+ rewriter, loc,
433
433
IntegerType::get (rewriter.getContext (),
434
434
input.getType ().getIntOrFloatBitWidth ()),
435
435
input);
436
436
}
437
437
Value extendedInput =
438
- rewriter. create < arith::ExtUIOp>( loc, dstIntegerType, input);
438
+ arith::ExtUIOp::create (rewriter, loc, dstIntegerType, input);
439
439
440
440
// Special case 0-rank memref stores. No need for masking.
441
441
if (convertedType.getRank () == 0 ) {
442
- rewriter. create < memref::AtomicRMWOp>( loc, arith::AtomicRMWKind::assign,
443
- extendedInput, adaptor.getMemref (),
444
- ValueRange{});
442
+ memref::AtomicRMWOp::create (rewriter, loc, arith::AtomicRMWKind::assign,
443
+ extendedInput, adaptor.getMemref (),
444
+ ValueRange{});
445
445
rewriter.eraseOp (op);
446
446
return success ();
447
447
}
@@ -456,16 +456,14 @@ struct ConvertMemrefStore final : OpConversionPattern<memref::StoreOp> {
456
456
dstBits, bitwidthOffset, rewriter);
457
457
// Align the value to write with the destination bits
458
458
Value alignedVal =
459
- rewriter. create < arith::ShLIOp>( loc, extendedInput, bitwidthOffset);
459
+ arith::ShLIOp::create (rewriter, loc, extendedInput, bitwidthOffset);
460
460
461
461
// Clear destination bits
462
- rewriter.create <memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::andi,
463
- writeMask, adaptor.getMemref (),
464
- storeIndices);
462
+ memref::AtomicRMWOp::create (rewriter, loc, arith::AtomicRMWKind::andi,
463
+ writeMask, adaptor.getMemref (), storeIndices);
465
464
// Write srcs bits to destination
466
- rewriter.create <memref::AtomicRMWOp>(loc, arith::AtomicRMWKind::ori,
467
- alignedVal, adaptor.getMemref (),
468
- storeIndices);
465
+ memref::AtomicRMWOp::create (rewriter, loc, arith::AtomicRMWKind::ori,
466
+ alignedVal, adaptor.getMemref (), storeIndices);
469
467
rewriter.eraseOp (op);
470
468
return success ();
471
469
}
@@ -525,8 +523,8 @@ struct ConvertMemRefSubview final : OpConversionPattern<memref::SubViewOp> {
525
523
}
526
524
527
525
// Transform the offsets, sizes and strides according to the emulation.
528
- auto stridedMetadata = rewriter. create < memref::ExtractStridedMetadataOp> (
529
- loc, subViewOp.getViewSource ());
526
+ auto stridedMetadata = memref::ExtractStridedMetadataOp::create (
527
+ rewriter, loc, subViewOp.getViewSource ());
530
528
531
529
OpFoldResult linearizedIndices;
532
530
auto strides = stridedMetadata.getConstifiedMixedStrides ();
0 commit comments