diff options
Diffstat (limited to 'test/CodeGen/X86/memcmp.ll')
-rw-r--r-- | test/CodeGen/X86/memcmp.ll | 827 |
1 files changed, 532 insertions, 295 deletions
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll index 0e09abf73c8c9..2e67827654624 100644 --- a/test/CodeGen/X86/memcmp.ll +++ b/test/CodeGen/X86/memcmp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2 ; This tests codegen time inlining/optimization of memcmp ; rdar://6480398 @@ -12,43 +12,21 @@ declare i32 @memcmp(i8*, i8*, i64) define i32 @length2(i8* %X, i8* %Y) nounwind { -; X86-NOSSE-LABEL: length2: -; X86-NOSSE: # BB#0: -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NOSSE-NEXT: movzwl (%ecx), %ecx -; X86-NOSSE-NEXT: movzwl (%eax), %eax -; X86-NOSSE-NEXT: rolw $8, %cx -; X86-NOSSE-NEXT: rolw $8, %ax -; X86-NOSSE-NEXT: cmpw %ax, %cx -; X86-NOSSE-NEXT: movl $-1, %eax -; X86-NOSSE-NEXT: jae .LBB0_1 -; X86-NOSSE-NEXT: # BB#2: -; X86-NOSSE-NEXT: je .LBB0_3 -; X86-NOSSE-NEXT: .LBB0_4: -; X86-NOSSE-NEXT: retl -; X86-NOSSE-NEXT: .LBB0_1: -; X86-NOSSE-NEXT: movl $1, %eax -; X86-NOSSE-NEXT: jne .LBB0_4 -; X86-NOSSE-NEXT: .LBB0_3: -; X86-NOSSE-NEXT: xorl %eax, %eax -; X86-NOSSE-NEXT: retl -; -; X86-SSE2-LABEL: length2: -; X86-SSE2: # BB#0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-SSE2-NEXT: movzwl (%ecx), %ecx -; X86-SSE2-NEXT: movzwl (%eax), %eax -; X86-SSE2-NEXT: rolw $8, %cx -; X86-SSE2-NEXT: rolw $8, %ax -; X86-SSE2-NEXT: xorl %edx, %edx -; X86-SSE2-NEXT: cmpw %ax, %cx -; X86-SSE2-NEXT: movl $-1, %ecx -; X86-SSE2-NEXT: movl $1, %eax -; X86-SSE2-NEXT: cmovbl %ecx, %eax -; X86-SSE2-NEXT: cmovel %edx, %eax -; X86-SSE2-NEXT: retl +; X86-LABEL: length2: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %ecx +; X86-NEXT: movzwl (%eax), %eax +; X86-NEXT: rolw $8, %cx +; X86-NEXT: rolw $8, %ax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpw %ax, %cx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: cmovel %edx, %eax +; X86-NEXT: retl ; ; X64-LABEL: length2: ; X64: # BB#0: @@ -137,44 +115,90 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind { define i32 @length3(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length3: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $3 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: movzwl (%ecx), %esi +; X86-NEXT: rolw $8, %dx +; X86-NEXT: rolw $8, %si +; X86-NEXT: movzwl %dx, %edx +; X86-NEXT: movzwl %si, %esi +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: jne .LBB4_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movzbl 2(%eax), %eax +; X86-NEXT: movzbl 2(%ecx), %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB4_1: # %res_block +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length3: -; X64: # BB#0: -; X64-NEXT: movl $3, %edx -; X64-NEXT: jmp memcmp # TAILCALL +; X64: # BB#0: # %loadbb +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: movzwl (%rsi), %ecx +; X64-NEXT: rolw $8, %ax +; X64-NEXT: rolw $8, %cx +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: movzwl %cx, %ecx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB4_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movzbl 2(%rdi), %eax +; X64-NEXT: movzbl 2(%rsi), %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: retq +; X64-NEXT: .LBB4_1: # %res_block +; X64-NEXT: movl $-1, %ecx +; X64-NEXT: movl $1, %eax +; X64-NEXT: cmovbl %ecx, %eax +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind ret i32 %m } define i1 @length3_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length3_eq: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $3 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: cmpw (%ecx), %dx +; X86-NEXT: jne .LBB5_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movb 2(%eax), %dl +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpb 2(%ecx), %dl +; X86-NEXT: je .LBB5_3 +; X86-NEXT: .LBB5_1: # %res_block +; X86-NEXT: movl $1, %eax +; X86-NEXT: .LBB5_3: # %endblock ; X86-NEXT: testl %eax, %eax ; X86-NEXT: setne %al ; X86-NEXT: retl ; ; X64-LABEL: length3_eq: -; X64: # BB#0: -; X64-NEXT: pushq %rax -; X64-NEXT: movl $3, %edx -; X64-NEXT: callq memcmp +; X64: # BB#0: # %loadbb +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: cmpw (%rsi), %ax +; X64-NEXT: jne .LBB5_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movb 2(%rdi), %cl +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpb 2(%rsi), %cl +; X64-NEXT: je .LBB5_3 +; X64-NEXT: .LBB5_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB5_3: # %endblock ; X64-NEXT: testl %eax, %eax ; X64-NEXT: setne %al -; X64-NEXT: popq %rcx ; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind %c = icmp ne i32 %m, 0 @@ -182,43 +206,21 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind { } define i32 @length4(i8* %X, i8* %Y) nounwind { -; X86-NOSSE-LABEL: length4: -; X86-NOSSE: # BB#0: -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NOSSE-NEXT: movl (%ecx), %ecx -; X86-NOSSE-NEXT: movl (%eax), %eax -; X86-NOSSE-NEXT: bswapl %ecx -; X86-NOSSE-NEXT: bswapl %eax -; X86-NOSSE-NEXT: cmpl %eax, %ecx -; X86-NOSSE-NEXT: movl $-1, %eax -; X86-NOSSE-NEXT: jae .LBB6_1 -; X86-NOSSE-NEXT: # BB#2: -; X86-NOSSE-NEXT: je .LBB6_3 -; X86-NOSSE-NEXT: .LBB6_4: -; X86-NOSSE-NEXT: retl -; X86-NOSSE-NEXT: .LBB6_1: -; X86-NOSSE-NEXT: movl $1, %eax -; X86-NOSSE-NEXT: jne .LBB6_4 -; X86-NOSSE-NEXT: .LBB6_3: -; X86-NOSSE-NEXT: xorl %eax, %eax -; X86-NOSSE-NEXT: retl -; -; X86-SSE2-LABEL: length4: -; X86-SSE2: # BB#0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-SSE2-NEXT: movl (%ecx), %ecx -; X86-SSE2-NEXT: movl (%eax), %eax -; X86-SSE2-NEXT: bswapl %ecx -; X86-SSE2-NEXT: bswapl %eax -; X86-SSE2-NEXT: xorl %edx, %edx -; X86-SSE2-NEXT: cmpl %eax, %ecx -; X86-SSE2-NEXT: movl $-1, %ecx -; X86-SSE2-NEXT: movl $1, %eax -; X86-SSE2-NEXT: cmovbl %ecx, %eax -; X86-SSE2-NEXT: cmovel %edx, %eax -; X86-SSE2-NEXT: retl +; X86-LABEL: length4: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %ecx +; X86-NEXT: movl (%eax), %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: cmovel %edx, %eax +; X86-NEXT: retl ; ; X64-LABEL: length4: ; X64: # BB#0: @@ -278,44 +280,86 @@ define i1 @length4_eq_const(i8* %X) nounwind { define i32 @length5(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length5: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $5 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: movl (%ecx), %esi +; X86-NEXT: bswapl %edx +; X86-NEXT: bswapl %esi +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: jne .LBB9_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movzbl 4(%eax), %eax +; X86-NEXT: movzbl 4(%ecx), %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB9_1: # %res_block +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length5: -; X64: # BB#0: -; X64-NEXT: movl $5, %edx -; X64-NEXT: jmp memcmp # TAILCALL +; X64: # BB#0: # %loadbb +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl (%rsi), %ecx +; X64-NEXT: bswapl %eax +; X64-NEXT: bswapl %ecx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB9_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movzbl 4(%rdi), %eax +; X64-NEXT: movzbl 4(%rsi), %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: retq +; X64-NEXT: .LBB9_1: # %res_block +; X64-NEXT: movl $-1, %ecx +; X64-NEXT: movl $1, %eax +; X64-NEXT: cmovbl %ecx, %eax +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind ret i32 %m } define i1 @length5_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length5_eq: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $5 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: cmpl (%ecx), %edx +; X86-NEXT: jne .LBB10_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movb 4(%eax), %dl +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpb 4(%ecx), %dl +; X86-NEXT: je .LBB10_3 +; X86-NEXT: .LBB10_1: # %res_block +; X86-NEXT: movl $1, %eax +; X86-NEXT: .LBB10_3: # %endblock ; X86-NEXT: testl %eax, %eax ; X86-NEXT: setne %al ; X86-NEXT: retl ; ; X64-LABEL: length5_eq: -; X64: # BB#0: -; X64-NEXT: pushq %rax -; X64-NEXT: movl $5, %edx -; X64-NEXT: callq memcmp +; X64: # BB#0: # %loadbb +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: cmpl (%rsi), %eax +; X64-NEXT: jne .LBB10_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movb 4(%rdi), %cl +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpb 4(%rsi), %cl +; X64-NEXT: je .LBB10_3 +; X64-NEXT: .LBB10_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB10_3: # %endblock ; X64-NEXT: testl %eax, %eax ; X64-NEXT: setne %al -; X64-NEXT: popq %rcx ; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind %c = icmp ne i32 %m, 0 @@ -324,13 +368,33 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind { define i32 @length8(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length8: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $8 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB11_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB11_1 +; X86-NEXT: # BB#3: # %endblock +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB11_1: # %res_block +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length8: @@ -352,13 +416,20 @@ define i32 @length8(i8* %X, i8* %Y) nounwind { define i1 @length8_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length8_eq: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $8 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: cmpl (%ecx), %edx +; X86-NEXT: jne .LBB12_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl 4(%ecx), %edx +; X86-NEXT: je .LBB12_3 +; X86-NEXT: .LBB12_1: # %res_block +; X86-NEXT: movl $1, %eax +; X86-NEXT: .LBB12_3: # %endblock ; X86-NEXT: testl %eax, %eax ; X86-NEXT: sete %al ; X86-NEXT: retl @@ -376,13 +447,17 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind { define i1 @length8_eq_const(i8* %X) nounwind { ; X86-LABEL: length8_eq_const: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $8 -; X86-NEXT: pushl $.L.str -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: cmpl $858927408, (%ecx) # imm = 0x33323130 +; X86-NEXT: jne .LBB13_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl $926299444, 4(%ecx) # imm = 0x37363534 +; X86-NEXT: je .LBB13_3 +; X86-NEXT: .LBB13_1: # %res_block +; X86-NEXT: movl $1, %eax +; X86-NEXT: .LBB13_3: # %endblock ; X86-NEXT: testl %eax, %eax ; X86-NEXT: setne %al ; X86-NEXT: retl @@ -400,25 +475,43 @@ define i1 @length8_eq_const(i8* %X) nounwind { define i1 @length12_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length12_eq: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $12 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: cmpl (%eax), %edx +; X86-NEXT: jne .LBB14_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movl 4(%ecx), %edx +; X86-NEXT: cmpl 4(%eax), %edx +; X86-NEXT: jne .LBB14_1 +; X86-NEXT: # BB#3: # %loadbb2 +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: cmpl 8(%eax), %edx +; X86-NEXT: je .LBB14_4 +; X86-NEXT: .LBB14_1: # %res_block +; X86-NEXT: movl $1, %ecx +; X86-NEXT: .LBB14_4: # %endblock +; X86-NEXT: testl %ecx, %ecx ; X86-NEXT: setne %al ; X86-NEXT: retl ; ; X64-LABEL: length12_eq: -; X64: # BB#0: -; X64-NEXT: pushq %rax -; X64-NEXT: movl $12, %edx -; X64-NEXT: callq memcmp +; X64: # BB#0: # %loadbb +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: cmpq (%rsi), %rax +; X64-NEXT: jne .LBB14_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movl 8(%rdi), %ecx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl 8(%rsi), %ecx +; X64-NEXT: je .LBB14_3 +; X64-NEXT: .LBB14_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB14_3: # %endblock ; X64-NEXT: testl %eax, %eax ; X64-NEXT: setne %al -; X64-NEXT: popq %rcx ; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind %c = icmp ne i32 %m, 0 @@ -427,19 +520,66 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind { define i32 @length12(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length12: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $12 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB15_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB15_1 +; X86-NEXT: # BB#3: # %loadbb2 +; X86-NEXT: movl 8(%esi), %ecx +; X86-NEXT: movl 8(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB15_1 +; X86-NEXT: # BB#4: # %endblock +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB15_1: # %res_block +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length12: -; X64: # BB#0: -; X64-NEXT: movl $12, %edx -; X64-NEXT: jmp memcmp # TAILCALL +; X64: # BB#0: # %loadbb +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB15_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movl 8(%rdi), %ecx +; X64-NEXT: movl 8(%rsi), %edx +; X64-NEXT: bswapl %ecx +; X64-NEXT: bswapl %edx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB15_1 +; X64-NEXT: # BB#3: # %endblock +; X64-NEXT: retq +; X64-NEXT: .LBB15_1: # %res_block +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: movl $-1, %ecx +; X64-NEXT: movl $1, %eax +; X64-NEXT: cmovbl %ecx, %eax +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind ret i32 %m } @@ -448,111 +588,165 @@ define i32 @length12(i8* %X, i8* %Y) nounwind { define i32 @length16(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length16: -; X86: # BB#0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $16 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp +; X86: # BB#0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB16_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB16_1 +; X86-NEXT: # BB#3: # %loadbb2 +; X86-NEXT: movl 8(%esi), %ecx +; X86-NEXT: movl 8(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB16_1 +; X86-NEXT: # BB#4: # %loadbb3 +; X86-NEXT: movl 12(%esi), %ecx +; X86-NEXT: movl 12(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB16_1 +; X86-NEXT: # BB#5: # %endblock +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB16_1: # %res_block +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length16: -; X64: # BB#0: -; X64-NEXT: movl $16, %edx -; X64-NEXT: jmp memcmp # TAILCALL +; X64: # BB#0: # %loadbb +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB16_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB16_1 +; X64-NEXT: # BB#3: # %endblock +; X64-NEXT: retq +; X64-NEXT: .LBB16_1: # %res_block +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: movl $-1, %ecx +; X64-NEXT: movl $1, %eax +; X64-NEXT: cmovbl %ecx, %eax +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind ret i32 %m } define i1 @length16_eq(i8* %x, i8* %y) nounwind { -; X86-NOSSE-LABEL: length16_eq: -; X86-NOSSE: # BB#0: -; X86-NOSSE-NEXT: pushl $0 -; X86-NOSSE-NEXT: pushl $16 -; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: calll memcmp -; X86-NOSSE-NEXT: addl $16, %esp -; X86-NOSSE-NEXT: testl %eax, %eax -; X86-NOSSE-NEXT: setne %al -; X86-NOSSE-NEXT: retl -; -; X86-SSE2-LABEL: length16_eq: -; X86-SSE2: # BB#0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 -; X86-SSE2-NEXT: movdqu (%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1 -; X86-SSE2-NEXT: pmovmskb %xmm1, %eax -; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF -; X86-SSE2-NEXT: setne %al -; X86-SSE2-NEXT: retl -; -; X64-SSE2-LABEL: length16_eq: -; X64-SSE2: # BB#0: -; X64-SSE2-NEXT: movdqu (%rsi), %xmm0 -; X64-SSE2-NEXT: movdqu (%rdi), %xmm1 -; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1 -; X64-SSE2-NEXT: pmovmskb %xmm1, %eax -; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF -; X64-SSE2-NEXT: setne %al -; X64-SSE2-NEXT: retq +; X86-LABEL: length16_eq: +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: cmpl (%eax), %edx +; X86-NEXT: jne .LBB17_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: movl 4(%ecx), %edx +; X86-NEXT: cmpl 4(%eax), %edx +; X86-NEXT: jne .LBB17_1 +; X86-NEXT: # BB#3: # %loadbb2 +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: cmpl 8(%eax), %edx +; X86-NEXT: jne .LBB17_1 +; X86-NEXT: # BB#4: # %loadbb3 +; X86-NEXT: movl 12(%ecx), %edx +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: cmpl 12(%eax), %edx +; X86-NEXT: je .LBB17_5 +; X86-NEXT: .LBB17_1: # %res_block +; X86-NEXT: movl $1, %ecx +; X86-NEXT: .LBB17_5: # %endblock +; X86-NEXT: testl %ecx, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl ; -; X64-AVX2-LABEL: length16_eq: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0 -; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0 -; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax -; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF -; X64-AVX2-NEXT: setne %al -; X64-AVX2-NEXT: retq +; X64-LABEL: length16_eq: +; X64: # BB#0: # %loadbb +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: cmpq (%rsi), %rax +; X64-NEXT: jne .LBB17_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq 8(%rsi), %rcx +; X64-NEXT: je .LBB17_3 +; X64-NEXT: .LBB17_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB17_3: # %endblock +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind %cmp = icmp ne i32 %call, 0 ret i1 %cmp } define i1 @length16_eq_const(i8* %X) nounwind { -; X86-NOSSE-LABEL: length16_eq_const: -; X86-NOSSE: # BB#0: -; X86-NOSSE-NEXT: pushl $0 -; X86-NOSSE-NEXT: pushl $16 -; X86-NOSSE-NEXT: pushl $.L.str -; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: calll memcmp -; X86-NOSSE-NEXT: addl $16, %esp -; X86-NOSSE-NEXT: testl %eax, %eax -; X86-NOSSE-NEXT: sete %al -; X86-NOSSE-NEXT: retl -; -; X86-SSE2-LABEL: length16_eq_const: -; X86-SSE2: # BB#0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movdqu (%eax), %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 -; X86-SSE2-NEXT: pmovmskb %xmm0, %eax -; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF -; X86-SSE2-NEXT: sete %al -; X86-SSE2-NEXT: retl -; -; X64-SSE2-LABEL: length16_eq_const: -; X64-SSE2: # BB#0: -; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 -; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 -; X64-SSE2-NEXT: pmovmskb %xmm0, %eax -; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF -; X64-SSE2-NEXT: sete %al -; X64-SSE2-NEXT: retq +; X86-LABEL: length16_eq_const: +; X86: # BB#0: # %loadbb +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $858927408, (%eax) # imm = 0x33323130 +; X86-NEXT: jne .LBB18_1 +; X86-NEXT: # BB#2: # %loadbb1 +; X86-NEXT: cmpl $926299444, 4(%eax) # imm = 0x37363534 +; X86-NEXT: jne .LBB18_1 +; X86-NEXT: # BB#3: # %loadbb2 +; X86-NEXT: cmpl $825243960, 8(%eax) # imm = 0x31303938 +; X86-NEXT: jne .LBB18_1 +; X86-NEXT: # BB#4: # %loadbb3 +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: cmpl $892613426, 12(%eax) # imm = 0x35343332 +; X86-NEXT: je .LBB18_5 +; X86-NEXT: .LBB18_1: # %res_block +; X86-NEXT: movl $1, %ecx +; X86-NEXT: .LBB18_5: # %endblock +; X86-NEXT: testl %ecx, %ecx +; X86-NEXT: sete %al +; X86-NEXT: retl ; -; X64-AVX2-LABEL: length16_eq_const: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0 -; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0 -; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax -; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF -; X64-AVX2-NEXT: sete %al -; X64-AVX2-NEXT: retq +; X64-LABEL: length16_eq_const: +; X64: # BB#0: # %loadbb +; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130 +; X64-NEXT: cmpq %rax, (%rdi) +; X64-NEXT: jne .LBB18_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: movabsq $3833745473465760056, %rcx # imm = 0x3534333231303938 +; X64-NEXT: cmpq %rcx, 8(%rdi) +; X64-NEXT: je .LBB18_3 +; X64-NEXT: .LBB18_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB18_3: # %endblock +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind %c = icmp eq i32 %m, 0 ret i1 %c @@ -570,9 +764,43 @@ define i32 @length32(i8* %X, i8* %Y) nounwind { ; X86-NEXT: retl ; ; X64-LABEL: length32: -; X64: # BB#0: -; X64-NEXT: movl $32, %edx -; X64-NEXT: jmp memcmp # TAILCALL +; X64: # BB#0: # %loadbb +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB19_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB19_1 +; X64-NEXT: # BB#3: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rcx +; X64-NEXT: movq 16(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB19_1 +; X64-NEXT: # BB#4: # %loadbb3 +; X64-NEXT: movq 24(%rdi), %rcx +; X64-NEXT: movq 24(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB19_1 +; X64-NEXT: # BB#5: # %endblock +; X64-NEXT: retq +; X64-NEXT: .LBB19_1: # %res_block +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: movl $-1, %ecx +; X64-NEXT: movl $1, %eax +; X64-NEXT: cmovbl %ecx, %eax +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind ret i32 %m } @@ -592,25 +820,30 @@ define i1 @length32_eq(i8* %x, i8* %y) nounwind { ; X86-NEXT: sete %al ; X86-NEXT: retl ; -; X64-SSE2-LABEL: length32_eq: -; X64-SSE2: # BB#0: -; X64-SSE2-NEXT: pushq %rax -; X64-SSE2-NEXT: movl $32, %edx -; X64-SSE2-NEXT: callq memcmp -; X64-SSE2-NEXT: testl %eax, %eax -; X64-SSE2-NEXT: sete %al -; X64-SSE2-NEXT: popq %rcx -; X64-SSE2-NEXT: retq -; -; X64-AVX2-LABEL: length32_eq: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 -; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0 -; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax -; X64-AVX2-NEXT: cmpl $-1, %eax -; X64-AVX2-NEXT: sete %al -; X64-AVX2-NEXT: vzeroupper -; X64-AVX2-NEXT: retq +; X64-LABEL: length32_eq: +; X64: # BB#0: # %loadbb +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: cmpq (%rsi), %rax +; X64-NEXT: jne .LBB20_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rax +; X64-NEXT: cmpq 8(%rsi), %rax +; X64-NEXT: jne .LBB20_1 +; X64-NEXT: # BB#3: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: cmpq 16(%rsi), %rax +; X64-NEXT: jne .LBB20_1 +; X64-NEXT: # BB#4: # %loadbb3 +; X64-NEXT: movq 24(%rdi), %rcx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq 24(%rsi), %rcx +; X64-NEXT: je .LBB20_5 +; X64-NEXT: .LBB20_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB20_5: # %endblock +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -629,26 +862,30 @@ define i1 @length32_eq_const(i8* %X) nounwind { ; X86-NEXT: setne %al ; X86-NEXT: retl ; -; X64-SSE2-LABEL: length32_eq_const: -; X64-SSE2: # BB#0: -; X64-SSE2-NEXT: pushq %rax -; X64-SSE2-NEXT: movl $.L.str, %esi -; X64-SSE2-NEXT: movl $32, %edx -; X64-SSE2-NEXT: callq memcmp -; X64-SSE2-NEXT: testl %eax, %eax -; X64-SSE2-NEXT: setne %al -; X64-SSE2-NEXT: popq %rcx -; X64-SSE2-NEXT: retq -; -; X64-AVX2-LABEL: length32_eq_const: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 -; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax -; X64-AVX2-NEXT: cmpl $-1, %eax -; X64-AVX2-NEXT: setne %al -; X64-AVX2-NEXT: vzeroupper -; X64-AVX2-NEXT: retq +; X64-LABEL: length32_eq_const: +; X64: # BB#0: # %loadbb +; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130 +; X64-NEXT: cmpq %rax, (%rdi) +; X64-NEXT: jne .LBB21_1 +; X64-NEXT: # BB#2: # %loadbb1 +; X64-NEXT: movabsq $3833745473465760056, %rax # imm = 0x3534333231303938 +; X64-NEXT: cmpq %rax, 8(%rdi) +; X64-NEXT: jne .LBB21_1 +; X64-NEXT: # BB#3: # %loadbb2 +; X64-NEXT: movabsq $3689065127958034230, %rax # imm = 0x3332313039383736 +; X64-NEXT: cmpq %rax, 16(%rdi) +; X64-NEXT: jne .LBB21_1 +; X64-NEXT: # BB#4: # %loadbb3 +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: movabsq $3544395820347831604, %rcx # imm = 0x3130393837363534 +; X64-NEXT: cmpq %rcx, 24(%rdi) +; X64-NEXT: je .LBB21_5 +; X64-NEXT: .LBB21_1: # %res_block +; X64-NEXT: movl $1, %eax +; X64-NEXT: .LBB21_5: # %endblock +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind %c = icmp ne i32 %m, 0 ret i1 %c |