aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/pr32241.ll
blob: 02f3bb122913e8ea31aa343d05535ffcc7dae301 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -O0 -mcpu=skx | FileCheck %s

define i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
; CHECK:       # BB#0: # %entry
; CHECK-NEXT:    pushl %esi
; CHECK-NEXT:    .cfi_def_cfa_offset 8
; CHECK-NEXT:    subl $16, %esp
; CHECK-NEXT:    .cfi_def_cfa_offset 24
; CHECK-NEXT:    .cfi_offset %esi, -8
; CHECK-NEXT:    movb $1, %al
; CHECK-NEXT:    movw $10959, {{[0-9]+}}(%esp) # imm = 0x2ACF
; CHECK-NEXT:    movw $-15498, {{[0-9]+}}(%esp) # imm = 0xC376
; CHECK-NEXT:    movw $19417, {{[0-9]+}}(%esp) # imm = 0x4BD9
; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT:    cmpw $0, {{[0-9]+}}(%esp)
; CHECK-NEXT:    movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; CHECK-NEXT:    movb %al, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT:    jne .LBB0_2
; CHECK-NEXT:  # BB#1: # %lor.rhs
; CHECK-NEXT:    xorl %eax, %eax
; CHECK-NEXT:    movb %al, %cl
; CHECK-NEXT:    movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT:    jmp .LBB0_2
; CHECK-NEXT:  .LBB0_2: # %lor.end
; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al # 1-byte Reload
; CHECK-NEXT:    movb $1, %cl
; CHECK-NEXT:    andb $1, %al
; CHECK-NEXT:    movzbl %al, %edx
; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; CHECK-NEXT:    cmpl %edx, %esi
; CHECK-NEXT:    setl %al
; CHECK-NEXT:    andb $1, %al
; CHECK-NEXT:    movzbl %al, %edx
; CHECK-NEXT:    xorl $-1, %edx
; CHECK-NEXT:    cmpl $0, %edx
; CHECK-NEXT:    movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT:    jne .LBB0_4
; CHECK-NEXT:  # BB#3: # %lor.rhs4
; CHECK-NEXT:    xorl %eax, %eax
; CHECK-NEXT:    movb %al, %cl
; CHECK-NEXT:    movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT:    jmp .LBB0_4
; CHECK-NEXT:  .LBB0_4: # %lor.end5
; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al # 1-byte Reload
; CHECK-NEXT:    andb $1, %al
; CHECK-NEXT:    movzbl %al, %ecx
; CHECK-NEXT:    movw %cx, %dx
; CHECK-NEXT:    movw %dx, {{[0-9]+}}(%esp)
; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT:    addl $16, %esp
; CHECK-NEXT:    .cfi_def_cfa_offset 8
; CHECK-NEXT:    popl %esi
; CHECK-NEXT:    .cfi_def_cfa_offset 4
; CHECK-NEXT:    retl
entry:
  %aa = alloca i16, align 2
  %bb = alloca i16, align 2
  %cc = alloca i16, align 2
  store i16 10959, i16* %aa, align 2
  store i16 -15498, i16* %bb, align 2
  store i16 19417, i16* %cc, align 2
  %0 = load i16, i16* %aa, align 2
  %conv = zext i16 %0 to i32
  %1 = load i16, i16* %cc, align 2
  %tobool = icmp ne i16 %1, 0
  br i1 %tobool, label %lor.end, label %lor.rhs

lor.rhs:                                          ; preds = %entry
  br label %lor.end

lor.end:                                          ; preds = %lor.rhs, %entry
  %2 = phi i1 [ true, %entry ], [ false, %lor.rhs ]
  %conv1 = zext i1 %2 to i32
  %cmp = icmp slt i32 %conv, %conv1
  %conv2 = zext i1 %cmp to i32
  %neg = xor i32 %conv2, -1
  %tobool3 = icmp ne i32 %neg, 0
  br i1 %tobool3, label %lor.end5, label %lor.rhs4

lor.rhs4:                                         ; preds = %lor.end
  br label %lor.end5

lor.end5:                                         ; preds = %lor.rhs4, %lor.end
  %3 = phi i1 [ true, %lor.end ], [ false, %lor.rhs4 ]
  %conv6 = zext i1 %3 to i16
  store i16 %conv6, i16* %bb, align 2
  %4 = load i16, i16* %bb, align 2
  %conv7 = zext i16 %4 to i32
  ret i32 %conv7
}