diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3c85bb4b6b41d..f98b0555dc6b6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -54,10 +54,9 @@ namespace direct { namespace { /// If the given type is a vector type, return the vector's element type. /// Otherwise return the given type unchanged. -// TODO(cir): Return the vector element type once we have support for vectors -// instead of the identity type. mlir::Type elementTypeIfVector(mlir::Type type) { - assert(!cir::MissingFeatures::vectorType()); + if (const auto vecType = mlir::dyn_cast(type)) + return vecType.getElementType(); return type; } } // namespace @@ -1043,12 +1042,11 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { assert(op.getType() == op.getInput().getType() && "Unary operation's operand type and result type are different"); - mlir::Type type = op.getType(); - mlir::Type elementType = type; - bool isVector = false; - assert(!cir::MissingFeatures::vectorType()); - mlir::Type llvmType = getTypeConverter()->convertType(type); - mlir::Location loc = op.getLoc(); + const mlir::Type type = op.getType(); + const mlir::Type elementType = elementTypeIfVector(type); + const bool isVector = mlir::isa(type); + const mlir::Type llvmType = getTypeConverter()->convertType(type); + const mlir::Location loc = op.getLoc(); // Integer unary operations: + - ~ ++ -- if (mlir::isa(elementType)) { @@ -1076,20 +1074,41 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); case cir::UnaryOpKind::Minus: { - assert(!isVector && - "Add vector handling when vector types are supported"); - mlir::LLVM::ConstantOp zero = rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); + mlir::Value zero; + if (isVector) + zero = rewriter.create(loc, llvmType); + else + zero = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); rewriter.replaceOpWithNewOp( op, llvmType, zero, adaptor.getInput(), maybeNSW); return mlir::success(); } case cir::UnaryOpKind::Not: { // bit-wise compliment operator, implemented as an XOR with -1. - assert(!isVector && - "Add vector handling when vector types are supported"); - mlir::LLVM::ConstantOp minusOne = rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); + mlir::Value minusOne; + if (isVector) { + // Creating a vector object with all -1 values is easier said than + // done. It requires a series of insertelement ops. + const mlir::Type llvmElementType = + getTypeConverter()->convertType(elementType); + const mlir::Value minusOneInt = rewriter.create( + loc, llvmElementType, mlir::IntegerAttr::get(llvmElementType, -1)); + minusOne = rewriter.create(loc, llvmType); + + const uint64_t numElements = + mlir::dyn_cast(type).getSize(); + for (uint64_t i = 0; i < numElements; ++i) { + const mlir::Value indexValue = + rewriter.create(loc, + rewriter.getI64Type(), i); + minusOne = rewriter.create( + loc, minusOne, minusOneInt, indexValue); + } + } else { + minusOne = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); + } rewriter.replaceOpWithNewOp( op, llvmType, adaptor.getInput(), minusOne); return mlir::success(); diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp index a16ef42f113df..504a13d9bb237 100644 --- a/clang/test/CIR/CodeGen/vector-ext.cpp +++ b/clang/test/CIR/CodeGen/vector-ext.cpp @@ -337,6 +337,63 @@ void foo7() { // OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP2]], i32 %[[RES]], i32 2 // OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16 + +void foo8() { + vi4 a = { 1, 2, 3, 4 }; + vi4 plus_res = +a; + vi4 minus_res = -a; + vi4 not_res = ~a; +} + +// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a", init] +// CIR: %[[PLUS_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["plus_res", init] +// CIR: %[[MINUS_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["minus_res", init] +// CIR: %[[NOT_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["not_res", init] +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i +// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i +// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i +// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : +// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> +// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: %[[TMP1:.*]] = cir.load %[[VEC]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[PLUS:.*]] = cir.unary(plus, %[[TMP1]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store %[[PLUS]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: %[[TMP2:.*]] = cir.load %[[VEC]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[MINUS:.*]] = cir.unary(minus, %[[TMP2]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store %[[MINUS]], %[[MINUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: %[[TMP3:.*]] = cir.load %[[VEC]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[NOT:.*]] = cir.unary(not, %[[TMP3]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store %[[NOT]], %[[NOT_RES]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[PLUS_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[MINUS_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[NOT_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> , ptr %[[VEC]], align 16 +// LLVM: %[[TMP1:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// LLVM: store <4 x i32> %[[TMP1]], ptr %[[PLUS_RES]], align 16 +// LLVM: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// LLVM: %[[SUB:.*]] = sub <4 x i32> zeroinitializer, %[[TMP2]] +// LLVM: store <4 x i32> %[[SUB]], ptr %[[MINUS_RES]], align 16 +// LLVM: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// LLVM: %[[NOT:.*]] = xor <4 x i32> %[[TMP3]], splat (i32 -1) +// LLVM: store <4 x i32> %[[NOT]], ptr %[[NOT_RES]], align 16 + +// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[PLUS_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[MINUS_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[NOT_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: store <4 x i32> , ptr %[[VEC]], align 16 +// OGCG: %[[TMP1:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// OGCG: store <4 x i32> %[[TMP1]], ptr %[[PLUS_RES]], align 16 +// OGCG: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// OGCG: %[[SUB:.*]] = sub <4 x i32> zeroinitializer, %[[TMP2]] +// OGCG: store <4 x i32> %[[SUB]], ptr %[[MINUS_RES]], align 16 +// OGCG: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// OGCG: %[[NOT:.*]] = xor <4 x i32> %[[TMP3]], splat (i32 -1) +// OGCG: store <4 x i32> %[[NOT]], ptr %[[NOT_RES]], align 16 + void foo9() { vi4 a = {1, 2, 3, 4}; vi4 b = {5, 6, 7, 8}; diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index 4546215865095..ad5641121d852 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -325,6 +325,63 @@ void foo7() { // OGCG: %[[NEW_VEC:.*]] = insertelement <4 x i32> %[[TMP2]], i32 %[[RES]], i32 2 // OGCG: store <4 x i32> %[[NEW_VEC]], ptr %[[VEC]], align 16 + +void foo8() { + vi4 a = { 1, 2, 3, 4 }; + vi4 plus_res = +a; + vi4 minus_res = -a; + vi4 not_res = ~a; +} + +// CIR: %[[VEC:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a", init] +// CIR: %[[PLUS_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["plus_res", init] +// CIR: %[[MINUS_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["minus_res", init] +// CIR: %[[NOT_RES:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["not_res", init] +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i +// CIR: %[[CONST_3:.*]] = cir.const #cir.int<3> : !s32i +// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !s32i +// CIR: %[[VEC_VAL:.*]] = cir.vec.create(%[[CONST_1]], %[[CONST_2]], %[[CONST_3]], %[[CONST_4]] : +// CIR-SAME: !s32i, !s32i, !s32i, !s32i) : !cir.vector<4 x !s32i> +// CIR: cir.store %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: %[[TMP1:.*]] = cir.load %[[VEC]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[PLUS:.*]] = cir.unary(plus, %[[TMP1]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store %[[PLUS]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: %[[TMP2:.*]] = cir.load %[[VEC]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[MINUS:.*]] = cir.unary(minus, %[[TMP2]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store %[[MINUS]], %[[MINUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr> +// CIR: %[[TMP3:.*]] = cir.load %[[VEC]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[NOT:.*]] = cir.unary(not, %[[TMP3]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store %[[NOT]], %[[NOT_RES]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[PLUS_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[MINUS_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[NOT_RES:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> , ptr %[[VEC]], align 16 +// LLVM: %[[TMP1:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// LLVM: store <4 x i32> %[[TMP1]], ptr %[[PLUS_RES]], align 16 +// LLVM: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// LLVM: %[[SUB:.*]] = sub <4 x i32> zeroinitializer, %[[TMP2]] +// LLVM: store <4 x i32> %[[SUB]], ptr %[[MINUS_RES]], align 16 +// LLVM: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// LLVM: %[[NOT:.*]] = xor <4 x i32> %[[TMP3]], splat (i32 -1) +// LLVM: store <4 x i32> %[[NOT]], ptr %[[NOT_RES]], align 16 + +// OGCG: %[[VEC:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[PLUS_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[MINUS_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[NOT_RES:.*]] = alloca <4 x i32>, align 16 +// OGCG: store <4 x i32> , ptr %[[VEC]], align 16 +// OGCG: %[[TMP1:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// OGCG: store <4 x i32> %[[TMP1]], ptr %[[PLUS_RES]], align 16 +// OGCG: %[[TMP2:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// OGCG: %[[SUB:.*]] = sub <4 x i32> zeroinitializer, %[[TMP2]] +// OGCG: store <4 x i32> %[[SUB]], ptr %[[MINUS_RES]], align 16 +// OGCG: %[[TMP3:.*]] = load <4 x i32>, ptr %[[VEC]], align 16 +// OGCG: %[[NOT:.*]] = xor <4 x i32> %[[TMP3]], splat (i32 -1) +// OGCG: store <4 x i32> %[[NOT]], ptr %[[NOT_RES]], align 16 + void foo9() { vi4 a = {1, 2, 3, 4}; vi4 b = {5, 6, 7, 8};