-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[mlir][OpenMP] Add __atomic_store to AtomicInfo #121055
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-mlir-openmp @llvm/pr-subscribers-flang-openmp Author: None (NimishMishra) ChangesThis PR adds functionality for Fixes #113479 Full diff: https://github.com/llvm/llvm-project/pull/121055.diff 4 Files Affected:
diff --git a/llvm/include/llvm/Frontend/Atomic/Atomic.h b/llvm/include/llvm/Frontend/Atomic/Atomic.h
index 9f46fde6292a90..d9819db17cf645 100644
--- a/llvm/include/llvm/Frontend/Atomic/Atomic.h
+++ b/llvm/include/llvm/Frontend/Atomic/Atomic.h
@@ -96,6 +96,8 @@ class AtomicInfo {
bool IsVolatile, bool IsWeak);
std::pair<LoadInst *, AllocaInst *> EmitAtomicLoadLibcall(AtomicOrdering AO);
+
+ void EmitAtomicStoreLibcall(AtomicOrdering AO, Value *Source);
};
} // end namespace llvm
diff --git a/llvm/lib/Frontend/Atomic/Atomic.cpp b/llvm/lib/Frontend/Atomic/Atomic.cpp
index c9f9a9dcfb702a..31a8794b2b83a9 100644
--- a/llvm/lib/Frontend/Atomic/Atomic.cpp
+++ b/llvm/lib/Frontend/Atomic/Atomic.cpp
@@ -141,6 +141,39 @@ AtomicInfo::EmitAtomicLoadLibcall(AtomicOrdering AO) {
AllocaResult);
}
+void AtomicInfo::EmitAtomicStoreLibcall(AtomicOrdering AO, Value *Source) {
+ LLVMContext &Ctx = getLLVMContext();
+ SmallVector<Value *, 6> Args;
+ AttributeList Attr;
+ Module *M = Builder->GetInsertBlock()->getModule();
+ const DataLayout &DL = M->getDataLayout();
+ Args.push_back(
+ ConstantInt::get(DL.getIntPtrType(Ctx), this->getAtomicSizeInBits() / 8));
+
+ Value *PtrVal = getAtomicPointer();
+ PtrVal = Builder->CreateAddrSpaceCast(PtrVal, PointerType::getUnqual(Ctx));
+ Args.push_back(PtrVal);
+
+ Value *SourceAlloca = Builder->CreateAlloca(Source->getType());
+ Builder->CreateStore(Source, SourceAlloca);
+ SourceAlloca = Builder->CreatePointerBitCastOrAddrSpaceCast(
+ SourceAlloca, Builder->getPtrTy());
+ Args.push_back(SourceAlloca);
+
+ Constant *OrderingVal =
+ ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(AO));
+ Args.push_back(OrderingVal);
+
+ SmallVector<Type *, 6> ArgTys;
+ for (Value *Arg : Args)
+ ArgTys.push_back(Arg->getType());
+ FunctionType *FnType = FunctionType::get(Type::getVoidTy(Ctx), ArgTys, false);
+ FunctionCallee LibcallFn =
+ M->getOrInsertFunction("__atomic_store", FnType, Attr);
+ CallInst *Call = Builder->CreateCall(LibcallFn, Args);
+ Call->setAttributes(Attr);
+}
+
std::pair<Value *, Value *> AtomicInfo::EmitAtomicCompareExchange(
Value *ExpectedVal, Value *DesiredVal, AtomicOrdering Success,
AtomicOrdering Failure, bool IsVolatile, bool IsWeak) {
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 0d8dbbe3a8a718..d47759a7bc9bd9 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -8400,12 +8400,22 @@ OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
"OMP Atomic expects a pointer to target memory");
Type *XElemTy = X.ElemTy;
assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
- XElemTy->isPointerTy()) &&
+ XElemTy->isPointerTy() || XElemTy->isStructTy()) &&
"OMP atomic write expected a scalar type");
if (XElemTy->isIntegerTy()) {
StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
XSt->setAtomic(AO);
+ } else if (XElemTy->isStructTy()) {
+ LoadInst *OldVal = Builder.CreateLoad(XElemTy, X.Var, "omp.atomic.read");
+ const DataLayout &LoadDL = OldVal->getModule()->getDataLayout();
+ unsigned LoadSize =
+ LoadDL.getTypeStoreSize(OldVal->getPointerOperand()->getType());
+ OpenMPIRBuilder::AtomicInfo atomicInfo(
+ &Builder, XElemTy, LoadSize * 8, LoadSize * 8, OldVal->getAlign(),
+ OldVal->getAlign(), true /* UseLibcall */, X.Var);
+ atomicInfo.EmitAtomicStoreLibcall(AO, Expr);
+ OldVal->eraseFromParent();
} else {
// We need to bitcast and perform atomic op as integers
IntegerType *IntCastTy =
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 5f8bdf8afdf783..1ae6218ec11004 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -1409,6 +1409,28 @@ llvm.func @omp_atomic_update(%x:!llvm.ptr, %expr: i32, %xbool: !llvm.ptr, %exprb
llvm.return
}
+// ----
+
+//CHECK-LABEL: @atomic_complex_write
+//CHECK: %[[V:.*]] = alloca { float, float }, i64 1, align 8
+//CHECK: %[[X:.*]] = alloca { float, float }, i64 1, align 8
+//CHECK: %[[LOAD:.*]] = load { float, float }, ptr %1, align 4
+//CHECK: %[[ALLOCA:.*]] = alloca { float, float }, align 8
+//CHECK: store { float, float } %[[LOAD]], ptr %[[ALLOCA]], align 4
+//CHECK: call void @__atomic_store(i64 8, ptr %[[X]], ptr %[[ALLOCA]], i32 0)
+
+llvm.func @atomic_complex_write() {
+ %0 = llvm.mlir.constant(1 : i64) : i64
+ %1 = llvm.alloca %0 x !llvm.struct<(f32, f32)> {bindc_name = "r"} : (i64) -> !llvm.ptr
+ %2 = llvm.mlir.constant(1 : i64) : i64
+ %3 = llvm.alloca %2 x !llvm.struct<(f32, f32)> {bindc_name = "l"} : (i64) -> !llvm.ptr
+ %4 = llvm.mlir.constant(1 : i64) : i64
+ %5 = llvm.mlir.constant(1 : i64) : i64
+ %6 = llvm.load %1 : !llvm.ptr -> !llvm.struct<(f32, f32)>
+ omp.atomic.write %3 = %6 : !llvm.ptr, !llvm.struct<(f32, f32)>
+ llvm.return
+}
+
// -----
//CHECK: %[[X_NEW_VAL:.*]] = alloca { float, float }, align 8
|
@llvm/pr-subscribers-mlir Author: None (NimishMishra) ChangesThis PR adds functionality for Fixes #113479 Full diff: https://github.com/llvm/llvm-project/pull/121055.diff 4 Files Affected:
diff --git a/llvm/include/llvm/Frontend/Atomic/Atomic.h b/llvm/include/llvm/Frontend/Atomic/Atomic.h
index 9f46fde6292a90..d9819db17cf645 100644
--- a/llvm/include/llvm/Frontend/Atomic/Atomic.h
+++ b/llvm/include/llvm/Frontend/Atomic/Atomic.h
@@ -96,6 +96,8 @@ class AtomicInfo {
bool IsVolatile, bool IsWeak);
std::pair<LoadInst *, AllocaInst *> EmitAtomicLoadLibcall(AtomicOrdering AO);
+
+ void EmitAtomicStoreLibcall(AtomicOrdering AO, Value *Source);
};
} // end namespace llvm
diff --git a/llvm/lib/Frontend/Atomic/Atomic.cpp b/llvm/lib/Frontend/Atomic/Atomic.cpp
index c9f9a9dcfb702a..31a8794b2b83a9 100644
--- a/llvm/lib/Frontend/Atomic/Atomic.cpp
+++ b/llvm/lib/Frontend/Atomic/Atomic.cpp
@@ -141,6 +141,39 @@ AtomicInfo::EmitAtomicLoadLibcall(AtomicOrdering AO) {
AllocaResult);
}
+void AtomicInfo::EmitAtomicStoreLibcall(AtomicOrdering AO, Value *Source) {
+ LLVMContext &Ctx = getLLVMContext();
+ SmallVector<Value *, 6> Args;
+ AttributeList Attr;
+ Module *M = Builder->GetInsertBlock()->getModule();
+ const DataLayout &DL = M->getDataLayout();
+ Args.push_back(
+ ConstantInt::get(DL.getIntPtrType(Ctx), this->getAtomicSizeInBits() / 8));
+
+ Value *PtrVal = getAtomicPointer();
+ PtrVal = Builder->CreateAddrSpaceCast(PtrVal, PointerType::getUnqual(Ctx));
+ Args.push_back(PtrVal);
+
+ Value *SourceAlloca = Builder->CreateAlloca(Source->getType());
+ Builder->CreateStore(Source, SourceAlloca);
+ SourceAlloca = Builder->CreatePointerBitCastOrAddrSpaceCast(
+ SourceAlloca, Builder->getPtrTy());
+ Args.push_back(SourceAlloca);
+
+ Constant *OrderingVal =
+ ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(AO));
+ Args.push_back(OrderingVal);
+
+ SmallVector<Type *, 6> ArgTys;
+ for (Value *Arg : Args)
+ ArgTys.push_back(Arg->getType());
+ FunctionType *FnType = FunctionType::get(Type::getVoidTy(Ctx), ArgTys, false);
+ FunctionCallee LibcallFn =
+ M->getOrInsertFunction("__atomic_store", FnType, Attr);
+ CallInst *Call = Builder->CreateCall(LibcallFn, Args);
+ Call->setAttributes(Attr);
+}
+
std::pair<Value *, Value *> AtomicInfo::EmitAtomicCompareExchange(
Value *ExpectedVal, Value *DesiredVal, AtomicOrdering Success,
AtomicOrdering Failure, bool IsVolatile, bool IsWeak) {
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 0d8dbbe3a8a718..d47759a7bc9bd9 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -8400,12 +8400,22 @@ OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
"OMP Atomic expects a pointer to target memory");
Type *XElemTy = X.ElemTy;
assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
- XElemTy->isPointerTy()) &&
+ XElemTy->isPointerTy() || XElemTy->isStructTy()) &&
"OMP atomic write expected a scalar type");
if (XElemTy->isIntegerTy()) {
StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
XSt->setAtomic(AO);
+ } else if (XElemTy->isStructTy()) {
+ LoadInst *OldVal = Builder.CreateLoad(XElemTy, X.Var, "omp.atomic.read");
+ const DataLayout &LoadDL = OldVal->getModule()->getDataLayout();
+ unsigned LoadSize =
+ LoadDL.getTypeStoreSize(OldVal->getPointerOperand()->getType());
+ OpenMPIRBuilder::AtomicInfo atomicInfo(
+ &Builder, XElemTy, LoadSize * 8, LoadSize * 8, OldVal->getAlign(),
+ OldVal->getAlign(), true /* UseLibcall */, X.Var);
+ atomicInfo.EmitAtomicStoreLibcall(AO, Expr);
+ OldVal->eraseFromParent();
} else {
// We need to bitcast and perform atomic op as integers
IntegerType *IntCastTy =
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 5f8bdf8afdf783..1ae6218ec11004 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -1409,6 +1409,28 @@ llvm.func @omp_atomic_update(%x:!llvm.ptr, %expr: i32, %xbool: !llvm.ptr, %exprb
llvm.return
}
+// ----
+
+//CHECK-LABEL: @atomic_complex_write
+//CHECK: %[[V:.*]] = alloca { float, float }, i64 1, align 8
+//CHECK: %[[X:.*]] = alloca { float, float }, i64 1, align 8
+//CHECK: %[[LOAD:.*]] = load { float, float }, ptr %1, align 4
+//CHECK: %[[ALLOCA:.*]] = alloca { float, float }, align 8
+//CHECK: store { float, float } %[[LOAD]], ptr %[[ALLOCA]], align 4
+//CHECK: call void @__atomic_store(i64 8, ptr %[[X]], ptr %[[ALLOCA]], i32 0)
+
+llvm.func @atomic_complex_write() {
+ %0 = llvm.mlir.constant(1 : i64) : i64
+ %1 = llvm.alloca %0 x !llvm.struct<(f32, f32)> {bindc_name = "r"} : (i64) -> !llvm.ptr
+ %2 = llvm.mlir.constant(1 : i64) : i64
+ %3 = llvm.alloca %2 x !llvm.struct<(f32, f32)> {bindc_name = "l"} : (i64) -> !llvm.ptr
+ %4 = llvm.mlir.constant(1 : i64) : i64
+ %5 = llvm.mlir.constant(1 : i64) : i64
+ %6 = llvm.load %1 : !llvm.ptr -> !llvm.struct<(f32, f32)>
+ omp.atomic.write %3 = %6 : !llvm.ptr, !llvm.struct<(f32, f32)>
+ llvm.return
+}
+
// -----
//CHECK: %[[X_NEW_VAL:.*]] = alloca { float, float }, align 8
|
@llvm/pr-subscribers-mlir-llvm Author: None (NimishMishra) ChangesThis PR adds functionality for Fixes #113479 Full diff: https://github.com/llvm/llvm-project/pull/121055.diff 4 Files Affected:
diff --git a/llvm/include/llvm/Frontend/Atomic/Atomic.h b/llvm/include/llvm/Frontend/Atomic/Atomic.h
index 9f46fde6292a90..d9819db17cf645 100644
--- a/llvm/include/llvm/Frontend/Atomic/Atomic.h
+++ b/llvm/include/llvm/Frontend/Atomic/Atomic.h
@@ -96,6 +96,8 @@ class AtomicInfo {
bool IsVolatile, bool IsWeak);
std::pair<LoadInst *, AllocaInst *> EmitAtomicLoadLibcall(AtomicOrdering AO);
+
+ void EmitAtomicStoreLibcall(AtomicOrdering AO, Value *Source);
};
} // end namespace llvm
diff --git a/llvm/lib/Frontend/Atomic/Atomic.cpp b/llvm/lib/Frontend/Atomic/Atomic.cpp
index c9f9a9dcfb702a..31a8794b2b83a9 100644
--- a/llvm/lib/Frontend/Atomic/Atomic.cpp
+++ b/llvm/lib/Frontend/Atomic/Atomic.cpp
@@ -141,6 +141,39 @@ AtomicInfo::EmitAtomicLoadLibcall(AtomicOrdering AO) {
AllocaResult);
}
+void AtomicInfo::EmitAtomicStoreLibcall(AtomicOrdering AO, Value *Source) {
+ LLVMContext &Ctx = getLLVMContext();
+ SmallVector<Value *, 6> Args;
+ AttributeList Attr;
+ Module *M = Builder->GetInsertBlock()->getModule();
+ const DataLayout &DL = M->getDataLayout();
+ Args.push_back(
+ ConstantInt::get(DL.getIntPtrType(Ctx), this->getAtomicSizeInBits() / 8));
+
+ Value *PtrVal = getAtomicPointer();
+ PtrVal = Builder->CreateAddrSpaceCast(PtrVal, PointerType::getUnqual(Ctx));
+ Args.push_back(PtrVal);
+
+ Value *SourceAlloca = Builder->CreateAlloca(Source->getType());
+ Builder->CreateStore(Source, SourceAlloca);
+ SourceAlloca = Builder->CreatePointerBitCastOrAddrSpaceCast(
+ SourceAlloca, Builder->getPtrTy());
+ Args.push_back(SourceAlloca);
+
+ Constant *OrderingVal =
+ ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(AO));
+ Args.push_back(OrderingVal);
+
+ SmallVector<Type *, 6> ArgTys;
+ for (Value *Arg : Args)
+ ArgTys.push_back(Arg->getType());
+ FunctionType *FnType = FunctionType::get(Type::getVoidTy(Ctx), ArgTys, false);
+ FunctionCallee LibcallFn =
+ M->getOrInsertFunction("__atomic_store", FnType, Attr);
+ CallInst *Call = Builder->CreateCall(LibcallFn, Args);
+ Call->setAttributes(Attr);
+}
+
std::pair<Value *, Value *> AtomicInfo::EmitAtomicCompareExchange(
Value *ExpectedVal, Value *DesiredVal, AtomicOrdering Success,
AtomicOrdering Failure, bool IsVolatile, bool IsWeak) {
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 0d8dbbe3a8a718..d47759a7bc9bd9 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -8400,12 +8400,22 @@ OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
"OMP Atomic expects a pointer to target memory");
Type *XElemTy = X.ElemTy;
assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||
- XElemTy->isPointerTy()) &&
+ XElemTy->isPointerTy() || XElemTy->isStructTy()) &&
"OMP atomic write expected a scalar type");
if (XElemTy->isIntegerTy()) {
StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
XSt->setAtomic(AO);
+ } else if (XElemTy->isStructTy()) {
+ LoadInst *OldVal = Builder.CreateLoad(XElemTy, X.Var, "omp.atomic.read");
+ const DataLayout &LoadDL = OldVal->getModule()->getDataLayout();
+ unsigned LoadSize =
+ LoadDL.getTypeStoreSize(OldVal->getPointerOperand()->getType());
+ OpenMPIRBuilder::AtomicInfo atomicInfo(
+ &Builder, XElemTy, LoadSize * 8, LoadSize * 8, OldVal->getAlign(),
+ OldVal->getAlign(), true /* UseLibcall */, X.Var);
+ atomicInfo.EmitAtomicStoreLibcall(AO, Expr);
+ OldVal->eraseFromParent();
} else {
// We need to bitcast and perform atomic op as integers
IntegerType *IntCastTy =
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 5f8bdf8afdf783..1ae6218ec11004 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -1409,6 +1409,28 @@ llvm.func @omp_atomic_update(%x:!llvm.ptr, %expr: i32, %xbool: !llvm.ptr, %exprb
llvm.return
}
+// ----
+
+//CHECK-LABEL: @atomic_complex_write
+//CHECK: %[[V:.*]] = alloca { float, float }, i64 1, align 8
+//CHECK: %[[X:.*]] = alloca { float, float }, i64 1, align 8
+//CHECK: %[[LOAD:.*]] = load { float, float }, ptr %1, align 4
+//CHECK: %[[ALLOCA:.*]] = alloca { float, float }, align 8
+//CHECK: store { float, float } %[[LOAD]], ptr %[[ALLOCA]], align 4
+//CHECK: call void @__atomic_store(i64 8, ptr %[[X]], ptr %[[ALLOCA]], i32 0)
+
+llvm.func @atomic_complex_write() {
+ %0 = llvm.mlir.constant(1 : i64) : i64
+ %1 = llvm.alloca %0 x !llvm.struct<(f32, f32)> {bindc_name = "r"} : (i64) -> !llvm.ptr
+ %2 = llvm.mlir.constant(1 : i64) : i64
+ %3 = llvm.alloca %2 x !llvm.struct<(f32, f32)> {bindc_name = "l"} : (i64) -> !llvm.ptr
+ %4 = llvm.mlir.constant(1 : i64) : i64
+ %5 = llvm.mlir.constant(1 : i64) : i64
+ %6 = llvm.load %1 : !llvm.ptr -> !llvm.struct<(f32, f32)>
+ omp.atomic.write %3 = %6 : !llvm.ptr, !llvm.struct<(f32, f32)>
+ llvm.return
+}
+
// -----
//CHECK: %[[X_NEW_VAL:.*]] = alloca { float, float }, align 8
|
Thanks for your work on this. I think there is some memory corruption going on for complex numbers of different kinds:
When I run this I hit the second stop "v2 corrupted at 2". The problem goes away when x and v have the same kind. |
Thanks for the issue @tblah. I am working on a fix for this. |
Hi @tblah, I have made a branch off this PR here: https://github.com/NimishMishra/f18-llvm-project/commits/atomic_allocated_type_size/. Could you check if you are happy with it? I can submit it for review as a separate PR then, since the issue is with all of atomic read/write/update. |
Thanks for looking into this. I think we need to perform an actual cast at the fortran level. For example this segfaults when built with omp because the write of
Building without openmp there is a |
Hi @tblah It seems that just adding a convert in fortran is not sufficient. I added an explicit convert, and somewhere down the line, it gets removed, and I can still reproduce the segfault FIR:
Corresponding LLVM Dialect
I'm investigating where the issue is. |
Hi @tblah I have implemented a WIP solution here: NimishMishra@ae1196f#diff-a6c8db9d350ec59f4eb93f27f29468b01c9590426a11c9cb79e499bc96b28adc. Do you think this could be an acceptable way of going about this? One upside I see is that we no longer need to do the implicit cast during building LLVM IR, about which Michael also had reservations. If you are okay with the approach, I'll prepare a new PR for the same combining casts for different atomic operations. In that case, maybe we can let this current PR go through (since the fix is completely unrelated). |
Thanks for the update. This looks good to me. We might need casts for other kinds of atomic operations too. |
Thanks @Meinersbur. Indeed, that PR has a better approach. I am adding @tblah as a reviewer there, in case he would like to take a look. Do we have to wait for acceptance from the Clang community though? Last time, iirc, there was some objection to the RFC. |
@tblah / @Meinersbur The PR #134455 will take a while to land; I was following the discussion Michael had at EuroLLVM (which he has summarized on the PR). The two issues fixed by this PR are blockers for removing the experimental status on OpenMP : #110008. Is it okay if we go ahead with merging this PR before LLVM branches out this year ? |
1ef64f2
to
80bf85a
Compare
The test failure |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
"OMP atomic write expected a scalar type"); | ||
|
||
if (XElemTy->isIntegerTy()) { | ||
StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); | ||
XSt->setAtomic(AO); | ||
} else if (XElemTy->isStructTy()) { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[nit] empty line looks unnecessary here
80bf85a
to
5cdbde1
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks Nimish
This PR adds functionality for `__atomic_store` libcall in AtomicInfo. This allows for supporting complex types in `atomic write`. Fixes llvm#113479 Fixes llvm#115652
This PR adds functionality for `__atomic_store` libcall in AtomicInfo. This allows for supporting complex types in `atomic write`. Fixes llvm#113479 Fixes llvm#115652
This PR adds functionality for `__atomic_store` libcall in AtomicInfo. This allows for supporting complex types in `atomic write`. Fixes llvm#113479 Fixes llvm#115652
This PR adds functionality for `__atomic_store` libcall in AtomicInfo. This allows for supporting complex types in `atomic write`. Fixes llvm#113479 Fixes llvm#115652
This PR adds functionality for `__atomic_store` libcall in AtomicInfo. This allows for supporting complex types in `atomic write`. Fixes llvm#113479 Fixes llvm#115652
This PR adds functionality for
__atomic_store
libcall in AtomicInfo. This allows for supporting complex types inatomic write
.Fixes #113479
Fixes #115652