aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen/CGStmt.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen/CGStmt.cpp')
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp43
1 files changed, 27 insertions, 16 deletions
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index aeb319ca1581..0a3a722fa653 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2097,7 +2097,8 @@ CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
} else {
llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
- if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
+ getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);
@@ -2320,23 +2321,28 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// If this is a register output, then make the inline asm return it
// by-value. If this is a memory result, return the value by-reference.
- bool isScalarizableAggregate =
- hasAggregateEvaluationKind(OutExpr->getType());
- if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
- isScalarizableAggregate)) {
+ QualType QTy = OutExpr->getType();
+ const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
+ hasAggregateEvaluationKind(QTy);
+ if (!Info.allowsMemory() && IsScalarOrAggregate) {
+
Constraints += "=" + OutputConstraint;
- ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegQualTys.push_back(QTy);
ResultRegDests.push_back(Dest);
- ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
- if (Info.allowsRegister() && isScalarizableAggregate) {
- ResultTypeRequiresCast.push_back(true);
- unsigned Size = getContext().getTypeSize(OutExpr->getType());
- llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
- ResultRegTypes.push_back(ConvTy);
- } else {
- ResultTypeRequiresCast.push_back(false);
- ResultRegTypes.push_back(ResultTruncRegTypes.back());
+
+ llvm::Type *Ty = ConvertTypeForMem(QTy);
+ const bool RequiresCast = Info.allowsRegister() &&
+ (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
+ Ty->isAggregateType());
+
+ ResultTruncRegTypes.push_back(Ty);
+ ResultTypeRequiresCast.push_back(RequiresCast);
+
+ if (RequiresCast) {
+ unsigned Size = getContext().getTypeSize(QTy);
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
}
+ ResultRegTypes.push_back(Ty);
// If this output is tied to an input, and if the input is larger, then
// we need to set the actual result type of the inline asm node to be the
// same as the input type.
@@ -2638,11 +2644,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
llvm::Value *Tmp = RegResults[i];
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
// If the result type of the LLVM IR asm doesn't match the result type of
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
- llvm::Type *TruncTy = ResultTruncRegTypes[i];
// Truncate the integer result to the right size, note that TruncTy can be
// a pointer.
@@ -2672,6 +2678,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
Address A = Builder.CreateBitCast(Dest.getAddress(*this),
ResultRegTypes[i]->getPointerTo());
+ if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
+ Builder.CreateStore(Tmp, A);
+ continue;
+ }
+
QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
if (Ty.isNull()) {
const Expr *OutExpr = S.getOutputExpr(i);