aboutsummaryrefslogtreecommitdiff
path: root/test/Transforms/InstCombine/intrinsics.ll
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2011-12-12 11:59:10 +0000
committerChandler Carruth <chandlerc@gmail.com>2011-12-12 11:59:10 +0000
commitddbc274169ed4ee0e0ac32ed194b925a180202fe (patch)
treea15e94ed378c7e90d4b6985af905b07f82ad8a36 /test/Transforms/InstCombine/intrinsics.ll
parent2106badea341062643d4e11d6e9975b871fa61b9 (diff)
Manually upgrade the test suite to specify the flag to cttz and ctlz.
I followed three heuristics for deciding whether to set 'true' or 'false': - Everything target independent got 'true' as that is the expected common output of the GCC builtins. - If the target arch only has one way of implementing this operation, set the flag in the way that exercises the most of codegen. For most architectures this is also the likely path from a GCC builtin, with 'true' being set. It will (eventually) require lowering away that difference, and then lowering to the architecture's operation. - Otherwise, set the flag differently dependending on which target operation should be tested. Let me know if anyone has any issue with this pattern or would like specific tests of another form. This should allow the x86 codegen to just iteratively improve as I teach the backend how to differentiate between the two forms, and everything else should remain exactly the same. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146370 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/InstCombine/intrinsics.ll')
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll16
1 files changed, 8 insertions, 8 deletions
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index fb57a190aab..e31bd7dfee0 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -5,10 +5,10 @@
declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
declare double @llvm.powi.f64(double, i32) nounwind readonly
-declare i32 @llvm.cttz.i32(i32) nounwind readnone
-declare i32 @llvm.ctlz.i32(i32) nounwind readnone
+declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
-declare i8 @llvm.ctlz.i8(i8) nounwind readnone
+declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone
define i8 @uaddtest1(i8 %A, i8 %B) {
%x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
@@ -161,7 +161,7 @@ define i32 @cttz(i32 %a) {
entry:
%or = or i32 %a, 8
%and = and i32 %or, -8
- %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
+ %count = tail call i32 @llvm.cttz.i32(i32 %and, i1 true) nounwind readnone
ret i32 %count
; CHECK: @cttz
; CHECK-NEXT: entry:
@@ -172,7 +172,7 @@ define i8 @ctlz(i8 %a) {
entry:
%or = or i8 %a, 32
%and = and i8 %or, 63
- %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
+ %count = tail call i8 @llvm.ctlz.i8(i8 %and, i1 true) nounwind readnone
ret i8 %count
; CHECK: @ctlz
; CHECK-NEXT: entry:
@@ -181,10 +181,10 @@ entry:
define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
entry:
- %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
+ %lz = tail call i32 @llvm.ctlz.i32(i32 %a, i1 true) nounwind readnone
%lz.cmp = icmp eq i32 %lz, 32
store volatile i1 %lz.cmp, i1* %c
- %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
+ %tz = tail call i32 @llvm.cttz.i32(i32 %a, i1 true) nounwind readnone
%tz.cmp = icmp ne i32 %tz, 32
store volatile i1 %tz.cmp, i1* %c
%pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
@@ -203,7 +203,7 @@ entry:
define i32 @cttz_simplify1(i32 %x) nounwind readnone ssp {
- %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x) ; <i32> [#uses=1]
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true) ; <i32> [#uses=1]
%shr3 = lshr i32 %tmp1, 5 ; <i32> [#uses=1]
ret i32 %shr3