; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_add_ss( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP4]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 4) ret <4 x float> %4 } define <4 x float> @test_add_ss_round(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_add_ss_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 8) ret <4 x float> %4 } define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_add_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP8]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) ret <4 x float> %4 } define <4 x float> @test_add_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_add_ss_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 8) ret <4 x float> %4 } define float @test_add_ss_1(float %a, float %b) { ; CHECK-LABEL: @test_add_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %4, <4 x float> %8, <4 x float> undef, i8 -1, i32 8) %10 = extractelement <4 x float> %9, i32 1 ret float %10 } declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_add_sd( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP4]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 4) ret <2 x double> %2 } define <2 x double> @test_add_sd_round(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_add_sd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 8) ret <2 x double> %2 } define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_add_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP8]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) ret <2 x double> %2 } define <2 x double> @test_add_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_add_sd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 8) ret <2 x double> %2 } define double @test_add_sd_1(double %a, double %b) { ; CHECK-LABEL: @test_add_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %2, <2 x double> %4, <2 x double> undef, i8 -1, i32 8) %6 = extractelement <2 x double> %5, i32 1 ret double %6 } declare <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_sub_ss( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP4]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 4) ret <4 x float> %4 } define <4 x float> @test_sub_ss_round(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_sub_ss_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 8) ret <4 x float> %4 } define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_sub_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP8]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) ret <4 x float> %4 } define <4 x float> @test_sub_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_sub_ss_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 8) ret <4 x float> %4 } define float @test_sub_ss_1(float %a, float %b) { ; CHECK-LABEL: @test_sub_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %4, <4 x float> %8, <4 x float> undef, i8 -1, i32 8) %10 = extractelement <4 x float> %9, i32 1 ret float %10 } declare <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_sub_sd( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP4]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 4) ret <2 x double> %2 } define <2 x double> @test_sub_sd_round(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_sub_sd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 8) ret <2 x double> %2 } define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_sub_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP8]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) ret <2 x double> %2 } define <2 x double> @test_sub_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_sub_sd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 8) ret <2 x double> %2 } define double @test_sub_sd_1(double %a, double %b) { ; CHECK-LABEL: @test_sub_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %2, <2 x double> %4, <2 x double> undef, i8 -1, i32 8) %6 = extractelement <2 x double> %5, i32 1 ret double %6 } declare <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_mul_ss( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP4]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 4) ret <4 x float> %4 } define <4 x float> @test_mul_ss_round(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_mul_ss_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 8) ret <4 x float> %4 } define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mul_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP8]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) ret <4 x float> %4 } define <4 x float> @test_mul_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mul_ss_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 8) ret <4 x float> %4 } define float @test_mul_ss_1(float %a, float %b) { ; CHECK-LABEL: @test_mul_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %4, <4 x float> %8, <4 x float> undef, i8 -1, i32 8) %10 = extractelement <4 x float> %9, i32 1 ret float %10 } declare <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_mul_sd( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP4]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 4) ret <2 x double> %2 } define <2 x double> @test_mul_sd_round(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_mul_sd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 8) ret <2 x double> %2 } define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mul_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP8]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) ret <2 x double> %2 } define <2 x double> @test_mul_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mul_sd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 8) ret <2 x double> %2 } define double @test_mul_sd_1(double %a, double %b) { ; CHECK-LABEL: @test_mul_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %2, <2 x double> %4, <2 x double> undef, i8 -1, i32 8) %6 = extractelement <2 x double> %5, i32 1 ret double %6 } declare <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_div_ss( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP4]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 4) ret <4 x float> %4 } define <4 x float> @test_div_ss_round(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_div_ss_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 8) ret <4 x float> %4 } define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_div_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0 ; CHECK-NEXT: ret <4 x float> [[TMP8]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) ret <4 x float> %4 } define <4 x float> @test_div_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_div_ss_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 8) ret <4 x float> %4 } define float @test_div_ss_1(float %a, float %b) { ; CHECK-LABEL: @test_div_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %4, <4 x float> %8, <4 x float> undef, i8 -1, i32 8) %10 = extractelement <4 x float> %9, i32 1 ret float %10 } declare <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_div_sd( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP4]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 4) ret <2 x double> %2 } define <2 x double> @test_div_sd_round(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_div_sd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 8) ret <2 x double> %2 } define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_div_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0 ; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0 ; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0 ; CHECK-NEXT: ret <2 x double> [[TMP8]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) ret <2 x double> %2 } define <2 x double> @test_div_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_div_sd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 8) ret <2 x double> %2 } define double @test_div_sd_1(double %a, double %b) { ; CHECK-LABEL: @test_div_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %2, <2 x double> %4, <2 x double> undef, i8 -1, i32 8) %6 = extractelement <2 x double> %5, i32 1 ret double %6 } declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_max_ss(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_max_ss( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 4) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 4) ret <4 x float> %4 } define <4 x float> @test_max_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_max_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) ret <4 x float> %4 } define float @test_max_ss_1(float %a, float %b) { ; CHECK-LABEL: @test_max_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> %4, <4 x float> %8, <4 x float> undef, i8 -1, i32 8) %10 = extractelement <4 x float> %9, i32 1 ret float %10 } declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_max_sd(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_max_sd( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 4) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 4) ret <2 x double> %2 } define <2 x double> @test_max_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_max_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) ret <2 x double> %2 } define double @test_max_sd_1(double %a, double %b) { ; CHECK-LABEL: @test_max_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> %2, <2 x double> %4, <2 x double> undef, i8 -1, i32 8) %6 = extractelement <2 x double> %5, i32 1 ret double %6 } declare <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_min_ss(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test_min_ss( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 4) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> %a, <4 x float> %3, <4 x float> undef, i8 -1, i32 4) ret <4 x float> %4 } define <4 x float> @test_min_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_min_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) ret <4 x float> %4 } define float @test_min_ss_1(float %a, float %b) { ; CHECK-LABEL: @test_min_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> %4, <4 x float> %8, <4 x float> undef, i8 -1, i32 8) %10 = extractelement <4 x float> %9, i32 1 ret float %10 } declare <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_min_sd(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: @test_min_sd( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 4) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> %a, <2 x double> %1, <2 x double> undef, i8 -1, i32 4) ret <2 x double> %2 } define <2 x double> @test_min_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_min_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[TMP1]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) ret <2 x double> %2 } define double @test_min_sd_1(double %a, double %b) { ; CHECK-LABEL: @test_min_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> %2, <2 x double> %4, <2 x double> undef, i8 -1, i32 8) %6 = extractelement <2 x double> %5, i32 1 ret double %6 } declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32) define i8 @test_cmp_ss(<4 x float> %a, <4 x float> %b, i8 %mask) { ; CHECK-LABEL: @test_cmp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], i32 3, i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret i8 [[TMP1]] ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = insertelement <4 x float> %b, float 4.000000e+00, i32 1 %5 = insertelement <4 x float> %4, float 5.000000e+00, i32 2 %6 = insertelement <4 x float> %5, float 6.000000e+00, i32 3 %7 = tail call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %3, <4 x float> %6, i32 3, i8 %mask, i32 4) ret i8 %7 } declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32) define i8 @test_cmp_sd(<2 x double> %a, <2 x double> %b, i8 %mask) { ; CHECK-LABEL: @test_cmp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], i32 3, i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret i8 [[TMP1]] ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1 %3 = tail call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %1, <2 x double> %2, i32 3, i8 %mask, i32 4) ret i8 %3 } define i64 @test(float %f, double %d) { ; CHECK-LABEL: @test( ; CHECK-NEXT: [[V03:%.*]] = insertelement <4 x float> undef, float [[F:%.*]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> [[V03]], i32 4) ; CHECK-NEXT: [[V13:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> [[V13]], i32 4) ; CHECK-NEXT: [[V23:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.avx512.cvttss2si(<4 x float> [[V23]], i32 4) ; CHECK-NEXT: [[V33:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> [[V33]], i32 4) ; CHECK-NEXT: [[V41:%.*]] = insertelement <2 x double> undef, double [[D:%.*]], i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> [[V41]], i32 4) ; CHECK-NEXT: [[V51:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> [[V51]], i32 4) ; CHECK-NEXT: [[V61:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0 ; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> [[V61]], i32 4) ; CHECK-NEXT: [[V71:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> [[V71]], i32 4) ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]] ; CHECK-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]] ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]] ; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]] ; CHECK-NEXT: ret i64 [[TMP15]] ; %v00 = insertelement <4 x float> undef, float %f, i32 0 %v01 = insertelement <4 x float> %v00, float 0.000000e+00, i32 1 %v02 = insertelement <4 x float> %v01, float 0.000000e+00, i32 2 %v03 = insertelement <4 x float> %v02, float 0.000000e+00, i32 3 %tmp0 = tail call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %v03, i32 4) %v10 = insertelement <4 x float> undef, float %f, i32 0 %v11 = insertelement <4 x float> %v10, float 0.000000e+00, i32 1 %v12 = insertelement <4 x float> %v11, float 0.000000e+00, i32 2 %v13 = insertelement <4 x float> %v12, float 0.000000e+00, i32 3 %tmp1 = tail call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %v13, i32 4) %v20 = insertelement <4 x float> undef, float %f, i32 0 %v21 = insertelement <4 x float> %v20, float 0.000000e+00, i32 1 %v22 = insertelement <4 x float> %v21, float 0.000000e+00, i32 2 %v23 = insertelement <4 x float> %v22, float 0.000000e+00, i32 3 %tmp2 = tail call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %v23, i32 4) %v30 = insertelement <4 x float> undef, float %f, i32 0 %v31 = insertelement <4 x float> %v30, float 0.000000e+00, i32 1 %v32 = insertelement <4 x float> %v31, float 0.000000e+00, i32 2 %v33 = insertelement <4 x float> %v32, float 0.000000e+00, i32 3 %tmp3 = tail call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> %v33, i32 4) %v40 = insertelement <2 x double> undef, double %d, i32 0 %v41 = insertelement <2 x double> %v40, double 0.000000e+00, i32 1 %tmp4 = tail call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %v41, i32 4) %v50 = insertelement <2 x double> undef, double %d, i32 0 %v51 = insertelement <2 x double> %v50, double 0.000000e+00, i32 1 %tmp5 = tail call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %v51, i32 4) %v60 = insertelement <2 x double> undef, double %d, i32 0 %v61 = insertelement <2 x double> %v60, double 0.000000e+00, i32 1 %tmp6 = tail call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> %v61, i32 4) %v70 = insertelement <2 x double> undef, double %d, i32 0 %v71 = insertelement <2 x double> %v70, double 0.000000e+00, i32 1 %tmp7 = tail call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> %v71, i32 4) %tmp8 = add i32 %tmp0, %tmp2 %tmp9 = add i32 %tmp4, %tmp6 %tmp10 = add i32 %tmp8, %tmp9 %tmp11 = sext i32 %tmp10 to i64 %tmp12 = add i64 %tmp1, %tmp3 %tmp13 = add i64 %tmp5, %tmp7 %tmp14 = add i64 %tmp12, %tmp13 %tmp15 = add i64 %tmp11, %tmp14 ret i64 %tmp15 } declare i32 @llvm.x86.avx512.vcvtss2si32(<4 x float>, i32) declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) declare i32 @llvm.x86.avx512.cvttss2si(<4 x float>, i32) declare i64 @llvm.x86.avx512.cvttss2si64(<4 x float>, i32) declare i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double>, i32) declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) declare i32 @llvm.x86.avx512.cvttsd2si(<2 x double>, i32) declare i64 @llvm.x86.avx512.cvttsd2si64(<2 x double>, i32) define i64 @test2(float %f, double %d) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[V03:%.*]] = insertelement <4 x float> undef, float [[F:%.*]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> [[V03]], i32 4) ; CHECK-NEXT: [[V13:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> [[V13]], i32 4) ; CHECK-NEXT: [[V23:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> [[V23]], i32 4) ; CHECK-NEXT: [[V33:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> [[V33]], i32 4) ; CHECK-NEXT: [[V41:%.*]] = insertelement <2 x double> undef, double [[D:%.*]], i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> [[V41]], i32 4) ; CHECK-NEXT: [[V51:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> [[V51]], i32 4) ; CHECK-NEXT: [[V61:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0 ; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> [[V61]], i32 4) ; CHECK-NEXT: [[V71:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> [[V71]], i32 4) ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]] ; CHECK-NEXT: [[TMP11:%.*]] = sext i32 [[TMP10]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]] ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]] ; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]] ; CHECK-NEXT: ret i64 [[TMP15]] ; %v00 = insertelement <4 x float> undef, float %f, i32 0 %v01 = insertelement <4 x float> %v00, float 0.000000e+00, i32 1 %v02 = insertelement <4 x float> %v01, float 0.000000e+00, i32 2 %v03 = insertelement <4 x float> %v02, float 0.000000e+00, i32 3 %tmp0 = tail call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %v03, i32 4) %v10 = insertelement <4 x float> undef, float %f, i32 0 %v11 = insertelement <4 x float> %v10, float 0.000000e+00, i32 1 %v12 = insertelement <4 x float> %v11, float 0.000000e+00, i32 2 %v13 = insertelement <4 x float> %v12, float 0.000000e+00, i32 3 %tmp1 = tail call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %v13, i32 4) %v20 = insertelement <4 x float> undef, float %f, i32 0 %v21 = insertelement <4 x float> %v20, float 0.000000e+00, i32 1 %v22 = insertelement <4 x float> %v21, float 0.000000e+00, i32 2 %v23 = insertelement <4 x float> %v22, float 0.000000e+00, i32 3 %tmp2 = tail call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %v23, i32 4) %v30 = insertelement <4 x float> undef, float %f, i32 0 %v31 = insertelement <4 x float> %v30, float 0.000000e+00, i32 1 %v32 = insertelement <4 x float> %v31, float 0.000000e+00, i32 2 %v33 = insertelement <4 x float> %v32, float 0.000000e+00, i32 3 %tmp3 = tail call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> %v33, i32 4) %v40 = insertelement <2 x double> undef, double %d, i32 0 %v41 = insertelement <2 x double> %v40, double 0.000000e+00, i32 1 %tmp4 = tail call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %v41, i32 4) %v50 = insertelement <2 x double> undef, double %d, i32 0 %v51 = insertelement <2 x double> %v50, double 0.000000e+00, i32 1 %tmp5 = tail call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %v51, i32 4) %v60 = insertelement <2 x double> undef, double %d, i32 0 %v61 = insertelement <2 x double> %v60, double 0.000000e+00, i32 1 %tmp6 = tail call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> %v61, i32 4) %v70 = insertelement <2 x double> undef, double %d, i32 0 %v71 = insertelement <2 x double> %v70, double 0.000000e+00, i32 1 %tmp7 = tail call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> %v71, i32 4) %tmp8 = add i32 %tmp0, %tmp2 %tmp9 = add i32 %tmp4, %tmp6 %tmp10 = add i32 %tmp8, %tmp9 %tmp11 = sext i32 %tmp10 to i64 %tmp12 = add i64 %tmp1, %tmp3 %tmp13 = add i64 %tmp5, %tmp7 %tmp14 = add i64 %tmp12, %tmp13 %tmp15 = add i64 %tmp11, %tmp14 ret i64 %tmp15 } declare i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float>, i32) declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) declare i32 @llvm.x86.avx512.cvttss2usi(<4 x float>, i32) declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) declare i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double>, i32) declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) declare i32 @llvm.x86.avx512.cvttsd2usi(<2 x double>, i32) declare i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double>, i32) declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask_vfmadd_ss( ; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[RES]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = insertelement <4 x float> %c, float 4.000000e+00, i32 1 %5 = insertelement <4 x float> %4, float 5.000000e+00, i32 2 %6 = insertelement <4 x float> %5, float 6.000000e+00, i32 3 %res = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %3, <4 x float> %6, i8 %mask, i32 4) ret <4 x float> %res } define float @test_mask_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask_vfmadd_ss_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0 ; CHECK-NEXT: ret float [[TMP2]] ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %3, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 0 ret float %5 } define float @test_mask_vfmadd_ss_1(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask_vfmadd_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %3, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 1 ret float %5 } declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask_vfmadd_sd( ; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[RES]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = insertelement <2 x double> %c, double 2.000000e+00, i32 1 %res = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x double> %1, <2 x double> %2, i8 %mask, i32 4) ret <2 x double> %res } define double @test_mask_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask_vfmadd_sd_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %1, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 0 ret double %3 } define double @test_mask_vfmadd_sd_1(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask_vfmadd_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %1, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 1 ret double %3 } declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_maskz_vfmadd_ss( ; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[RES]] ; %1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = insertelement <4 x float> %c, float 4.000000e+00, i32 1 %5 = insertelement <4 x float> %4, float 5.000000e+00, i32 2 %6 = insertelement <4 x float> %5, float 6.000000e+00, i32 3 %res = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %a, <4 x float> %3, <4 x float> %6, i8 %mask, i32 4) ret <4 x float> %res } define float @test_maskz_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_maskz_vfmadd_ss_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0 ; CHECK-NEXT: ret float [[TMP2]] ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %3, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 0 ret float %5 } define float @test_maskz_vfmadd_ss_1(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_maskz_vfmadd_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %3, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 1 ret float %5 } declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_maskz_vfmadd_sd( ; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[RES]] ; %1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1 %2 = insertelement <2 x double> %c, double 2.000000e+00, i32 1 %res = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %a, <2 x double> %1, <2 x double> %2, i8 %mask, i32 4) ret <2 x double> %res } define double @test_maskz_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_maskz_vfmadd_sd_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %1, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 0 ret double %3 } define double @test_maskz_vfmadd_sd_1(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_maskz_vfmadd_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %1, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 1 ret double %3 } declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmadd_ss( ; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[RES]] ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = insertelement <4 x float> %b, float 4.000000e+00, i32 1 %5 = insertelement <4 x float> %4, float 5.000000e+00, i32 2 %6 = insertelement <4 x float> %5, float 6.000000e+00, i32 3 %res = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %3, <4 x float> %6, <4 x float> %c, i8 %mask, i32 4) ret <4 x float> %res } define float @test_mask3_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmadd_ss_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0 ; CHECK-NEXT: ret float [[TMP2]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 0 ret float %5 } define float @test_mask3_vfmadd_ss_1(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmadd_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 1 ret float %5 } declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmadd_sd( ; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[RES]] ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1 %res = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %1, <2 x double> %2, <2 x double> %c, i8 %mask, i32 4) ret <2 x double> %res } define double @test_mask3_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmadd_sd_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 0 ret double %3 } define double @test_mask3_vfmadd_sd_1(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmadd_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 1 ret double %3 } declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmsub_ss( ; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[RES]] ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = insertelement <4 x float> %b, float 4.000000e+00, i32 1 %5 = insertelement <4 x float> %4, float 5.000000e+00, i32 2 %6 = insertelement <4 x float> %5, float 6.000000e+00, i32 3 %res = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %3, <4 x float> %6, <4 x float> %c, i8 %mask, i32 4) ret <4 x float> %res } define float @test_mask3_vfmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmsub_ss_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0 ; CHECK-NEXT: ret float [[TMP2]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 0 ret float %5 } define float @test_mask3_vfmsub_ss_1(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmsub_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 1 ret float %5 } declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmsub_sd( ; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[RES]] ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1 %res = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %1, <2 x double> %2, <2 x double> %c, i8 %mask, i32 4) ret <2 x double> %res } define double @test_mask3_vfmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmsub_sd_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 0 ret double %3 } define double @test_mask3_vfmsub_sd_1(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfmsub_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 1 ret double %3 } declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfnmsub_ss( ; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <4 x float> [[RES]] ; %1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = insertelement <4 x float> %b, float 4.000000e+00, i32 1 %5 = insertelement <4 x float> %4, float 5.000000e+00, i32 2 %6 = insertelement <4 x float> %5, float 6.000000e+00, i32 3 %res = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %3, <4 x float> %6, <4 x float> %c, i8 %mask, i32 4) ret <4 x float> %res } define float @test_mask3_vfnmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfnmsub_ss_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0 ; CHECK-NEXT: ret float [[TMP2]] ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 0 ret float %5 } define float @test_mask3_vfnmsub_ss_1(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfnmsub_ss_1( ; CHECK-NEXT: ret float 1.000000e+00 ; %1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1 %2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2 %3 = insertelement <4 x float> %2, float 3.000000e+00, i32 3 %4 = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %3, i8 %mask, i32 4) %5 = extractelement <4 x float> %4, i32 1 ret float %5 } declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfnmsub_sd( ; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: ret <2 x double> [[RES]] ; %1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1 %2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1 %res = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %1, <2 x double> %2, <2 x double> %c, i8 %mask, i32 4) ret <2 x double> %res } define double @test_mask3_vfnmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfnmsub_sd_0( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4) ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0 ; CHECK-NEXT: ret double [[TMP2]] ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 0 ret double %3 } define double @test_mask3_vfnmsub_sd_1(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mask3_vfnmsub_sd_1( ; CHECK-NEXT: ret double 1.000000e+00 ; %1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1 %2 = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %1, i8 %mask, i32 4) %3 = extractelement <2 x double> %2, i32 1 ret double %3 } declare <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) define <8 x i32> @identity_test_permvar_si_256(<8 x i32> %a0) { ; CHECK-LABEL: @identity_test_permvar_si_256( ; CHECK-NEXT: ret <8 x i32> [[A0:%.*]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> , <8 x i32> undef, i8 -1) ret <8 x i32> %a } define <8 x i32> @identity_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_si_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> [[A0:%.*]], <8 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> , <8 x i32> %passthru, i8 %mask) ret <8 x i32> %a } define <8 x i32> @zero_test_permvar_si_256(<8 x i32> %a0) { ; CHECK-LABEL: @zero_test_permvar_si_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> undef, i8 -1) ret <8 x i32> %a } define <8 x i32> @zero_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_si_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i32> [[TMP3]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> %passthru, i8 %mask) ret <8 x i32> %a } define <8 x i32> @shuffle_test_permvar_si_256(<8 x i32> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_si_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> , <8 x i32> undef, i8 -1) ret <8 x i32> %a } define <8 x i32> @shuffle_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_si_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i32> [[TMP3]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> , <8 x i32> %passthru, i8 %mask) ret <8 x i32> %a } define <8 x i32> @undef_test_permvar_si_256(<8 x i32> %a0) { ; CHECK-LABEL: @undef_test_permvar_si_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP1]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> , <8 x i32> undef, i8 -1) ret <8 x i32> %a } define <8 x i32> @undef_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_si_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i32> [[TMP3]] ; %a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> , <8 x i32> %passthru, i8 %mask) ret <8 x i32> %a } declare <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float>, <8 x i32>, <8 x float>, i8) define <8 x float> @identity_test_permvar_sf_256(<8 x float> %a0) { ; CHECK-LABEL: @identity_test_permvar_sf_256( ; CHECK-NEXT: ret <8 x float> [[A0:%.*]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> , <8 x float> undef, i8 -1) ret <8 x float> %a } define <8 x float> @identity_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_sf_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[A0:%.*]], <8 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x float> [[TMP2]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> , <8 x float> %passthru, i8 %mask) ret <8 x float> %a } define <8 x float> @zero_test_permvar_sf_256(<8 x float> %a0) { ; CHECK-LABEL: @zero_test_permvar_sf_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x float> [[TMP1]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> zeroinitializer, <8 x float> undef, i8 -1) ret <8 x float> %a } define <8 x float> @zero_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_sf_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x float> [[TMP3]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> zeroinitializer, <8 x float> %passthru, i8 %mask) ret <8 x float> %a } define <8 x float> @shuffle_test_permvar_sf_256(<8 x float> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_sf_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> ; CHECK-NEXT: ret <8 x float> [[TMP1]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> , <8 x float> undef, i8 -1) ret <8 x float> %a } define <8 x float> @shuffle_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_sf_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x float> [[TMP3]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> , <8 x float> %passthru, i8 %mask) ret <8 x float> %a } define <8 x float> @undef_test_permvar_sf_256(<8 x float> %a0) { ; CHECK-LABEL: @undef_test_permvar_sf_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> ; CHECK-NEXT: ret <8 x float> [[TMP1]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> , <8 x float> undef, i8 -1) ret <8 x float> %a } define <8 x float> @undef_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_sf_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x float> [[TMP3]] ; %a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> , <8 x float> %passthru, i8 %mask) ret <8 x float> %a } declare <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) define <4 x i64> @identity_test_permvar_di_256(<4 x i64> %a0) { ; CHECK-LABEL: @identity_test_permvar_di_256( ; CHECK-NEXT: ret <4 x i64> [[A0:%.*]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> , <4 x i64> undef, i8 -1) ret <4 x i64> %a } define <4 x i64> @identity_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_di_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[A0:%.*]], <4 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x i64> [[TMP2]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> , <4 x i64> %passthru, i8 %mask) ret <4 x i64> %a } define <4 x i64> @zero_test_permvar_di_256(<4 x i64> %a0) { ; CHECK-LABEL: @zero_test_permvar_di_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> zeroinitializer ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> zeroinitializer, <4 x i64> undef, i8 -1) ret <4 x i64> %a } define <4 x i64> @zero_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_di_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[TMP1]], <4 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x i64> [[TMP3]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> zeroinitializer, <4 x i64> %passthru, i8 %mask) ret <4 x i64> %a } define <4 x i64> @shuffle_test_permvar_di_256(<4 x i64> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_di_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> , <4 x i64> undef, i8 -1) ret <4 x i64> %a } define <4 x i64> @shuffle_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_di_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[TMP1]], <4 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x i64> [[TMP3]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> , <4 x i64> %passthru, i8 %mask) ret <4 x i64> %a } define <4 x i64> @undef_test_permvar_di_256(<4 x i64> %a0) { ; CHECK-LABEL: @undef_test_permvar_di_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> , <4 x i64> undef, i8 -1) ret <4 x i64> %a } define <4 x i64> @undef_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_di_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[TMP1]], <4 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x i64> [[TMP3]] ; %a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> , <4 x i64> %passthru, i8 %mask) ret <4 x i64> %a } declare <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double>, <4 x i64>, <4 x double>, i8) define <4 x double> @identity_test_permvar_df_256(<4 x double> %a0) { ; CHECK-LABEL: @identity_test_permvar_df_256( ; CHECK-NEXT: ret <4 x double> [[A0:%.*]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> , <4 x double> undef, i8 -1) ret <4 x double> %a } define <4 x double> @identity_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_df_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[A0:%.*]], <4 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x double> [[TMP2]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> , <4 x double> %passthru, i8 %mask) ret <4 x double> %a } define <4 x double> @zero_test_permvar_df_256(<4 x double> %a0) { ; CHECK-LABEL: @zero_test_permvar_df_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> zeroinitializer ; CHECK-NEXT: ret <4 x double> [[TMP1]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> zeroinitializer, <4 x double> undef, i8 -1) ret <4 x double> %a } define <4 x double> @zero_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_df_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[TMP1]], <4 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x double> [[TMP3]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> zeroinitializer, <4 x double> %passthru, i8 %mask) ret <4 x double> %a } define <4 x double> @shuffle_test_permvar_df_256(<4 x double> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_df_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> ; CHECK-NEXT: ret <4 x double> [[TMP1]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> , <4 x double> undef, i8 -1) ret <4 x double> %a } define <4 x double> @shuffle_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_df_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[TMP1]], <4 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x double> [[TMP3]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> , <4 x double> %passthru, i8 %mask) ret <4 x double> %a } define <4 x double> @undef_test_permvar_df_256(<4 x double> %a0) { ; CHECK-LABEL: @undef_test_permvar_df_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> ; CHECK-NEXT: ret <4 x double> [[TMP1]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> , <4 x double> undef, i8 -1) ret <4 x double> %a } define <4 x double> @undef_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_df_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[TMP1]], <4 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <4 x double> [[TMP3]] ; %a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> , <4 x double> %passthru, i8 %mask) ret <4 x double> %a } declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) define <16 x i32> @identity_test_permvar_si_512(<16 x i32> %a0) { ; CHECK-LABEL: @identity_test_permvar_si_512( ; CHECK-NEXT: ret <16 x i32> [[A0:%.*]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> , <16 x i32> undef, i16 -1) ret <16 x i32> %a } define <16 x i32> @identity_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) { ; CHECK-LABEL: @identity_test_permvar_si_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i32> [[A0:%.*]], <16 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i32> [[TMP2]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> , <16 x i32> %passthru, i16 %mask) ret <16 x i32> %a } define <16 x i32> @zero_test_permvar_si_512(<16 x i32> %a0) { ; CHECK-LABEL: @zero_test_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> zeroinitializer, <16 x i32> undef, i16 -1) ret <16 x i32> %a } define <16 x i32> @zero_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) { ; CHECK-LABEL: @zero_test_permvar_si_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i32> [[TMP3]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> zeroinitializer, <16 x i32> %passthru, i16 %mask) ret <16 x i32> %a } define <16 x i32> @shuffle_test_permvar_si_512(<16 x i32> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> , <16 x i32> undef, i16 -1) ret <16 x i32> %a } define <16 x i32> @shuffle_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_si_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i32> [[TMP3]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> , <16 x i32> %passthru, i16 %mask) ret <16 x i32> %a } define <16 x i32> @undef_test_permvar_si_512(<16 x i32> %a0) { ; CHECK-LABEL: @undef_test_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> ; CHECK-NEXT: ret <16 x i32> [[TMP1]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> , <16 x i32> undef, i16 -1) ret <16 x i32> %a } define <16 x i32> @undef_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) { ; CHECK-LABEL: @undef_test_permvar_si_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i32> [[TMP3]] ; %a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> , <16 x i32> %passthru, i16 %mask) ret <16 x i32> %a } declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i32>, <16 x float>, i16) define <16 x float> @identity_test_permvar_sf_512(<16 x float> %a0) { ; CHECK-LABEL: @identity_test_permvar_sf_512( ; CHECK-NEXT: ret <16 x float> [[A0:%.*]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> , <16 x float> undef, i16 -1) ret <16 x float> %a } define <16 x float> @identity_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) { ; CHECK-LABEL: @identity_test_permvar_sf_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x float> [[A0:%.*]], <16 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP2]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> , <16 x float> %passthru, i16 %mask) ret <16 x float> %a } define <16 x float> @zero_test_permvar_sf_512(<16 x float> %a0) { ; CHECK-LABEL: @zero_test_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> zeroinitializer, <16 x float> undef, i16 -1) ret <16 x float> %a } define <16 x float> @zero_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) { ; CHECK-LABEL: @zero_test_permvar_sf_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> zeroinitializer, <16 x float> %passthru, i16 %mask) ret <16 x float> %a } define <16 x float> @shuffle_test_permvar_sf_512(<16 x float> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> , <16 x float> undef, i16 -1) ret <16 x float> %a } define <16 x float> @shuffle_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_sf_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> , <16 x float> %passthru, i16 %mask) ret <16 x float> %a } define <16 x float> @undef_test_permvar_sf_512(<16 x float> %a0) { ; CHECK-LABEL: @undef_test_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> , <16 x float> undef, i16 -1) ret <16 x float> %a } define <16 x float> @undef_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) { ; CHECK-LABEL: @undef_test_permvar_sf_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> , <16 x float> %passthru, i16 %mask) ret <16 x float> %a } declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) define <8 x i64> @identity_test_permvar_di_512(<8 x i64> %a0) { ; CHECK-LABEL: @identity_test_permvar_di_512( ; CHECK-NEXT: ret <8 x i64> [[A0:%.*]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> , <8 x i64> undef, i8 -1) ret <8 x i64> %a } define <8 x i64> @identity_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_di_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i64> [[A0:%.*]], <8 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i64> [[TMP2]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> , <8 x i64> %passthru, i8 %mask) ret <8 x i64> %a } define <8 x i64> @zero_test_permvar_di_512(<8 x i64> %a0) { ; CHECK-LABEL: @zero_test_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> zeroinitializer, <8 x i64> undef, i8 -1) ret <8 x i64> %a } define <8 x i64> @zero_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_di_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i64> [[TMP3]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> zeroinitializer, <8 x i64> %passthru, i8 %mask) ret <8 x i64> %a } define <8 x i64> @shuffle_test_permvar_di_512(<8 x i64> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> , <8 x i64> undef, i8 -1) ret <8 x i64> %a } define <8 x i64> @shuffle_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_di_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i64> [[TMP3]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> , <8 x i64> %passthru, i8 %mask) ret <8 x i64> %a } define <8 x i64> @undef_test_permvar_di_512(<8 x i64> %a0) { ; CHECK-LABEL: @undef_test_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> ; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> , <8 x i64> undef, i8 -1) ret <8 x i64> %a } define <8 x i64> @undef_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_di_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i64> [[TMP3]] ; %a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> , <8 x i64> %passthru, i8 %mask) ret <8 x i64> %a } declare <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double>, <8 x i64>, <8 x double>, i8) define <8 x double> @identity_test_permvar_df_512(<8 x double> %a0) { ; CHECK-LABEL: @identity_test_permvar_df_512( ; CHECK-NEXT: ret <8 x double> [[A0:%.*]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> , <8 x double> undef, i8 -1) ret <8 x double> %a } define <8 x double> @identity_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_df_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x double> [[A0:%.*]], <8 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP2]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> , <8 x double> %passthru, i8 %mask) ret <8 x double> %a } define <8 x double> @zero_test_permvar_df_512(<8 x double> %a0) { ; CHECK-LABEL: @zero_test_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> zeroinitializer, <8 x double> undef, i8 -1) ret <8 x double> %a } define <8 x double> @zero_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_df_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> zeroinitializer, <8 x double> %passthru, i8 %mask) ret <8 x double> %a } define <8 x double> @shuffle_test_permvar_df_512(<8 x double> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> , <8 x double> undef, i8 -1) ret <8 x double> %a } define <8 x double> @shuffle_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_df_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> , <8 x double> %passthru, i8 %mask) ret <8 x double> %a } define <8 x double> @undef_test_permvar_df_512(<8 x double> %a0) { ; CHECK-LABEL: @undef_test_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> , <8 x double> undef, i8 -1) ret <8 x double> %a } define <8 x double> @undef_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_df_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> , <8 x double> %passthru, i8 %mask) ret <8 x double> %a } declare <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) define <8 x i16> @identity_test_permvar_hi_128(<8 x i16> %a0) { ; CHECK-LABEL: @identity_test_permvar_hi_128( ; CHECK-NEXT: ret <8 x i16> [[A0:%.*]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> , <8 x i16> undef, i8 -1) ret <8 x i16> %a } define <8 x i16> @identity_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) { ; CHECK-LABEL: @identity_test_permvar_hi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[A0:%.*]], <8 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i16> [[TMP2]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> , <8 x i16> %passthru, i8 %mask) ret <8 x i16> %a } define <8 x i16> @zero_test_permvar_hi_128(<8 x i16> %a0) { ; CHECK-LABEL: @zero_test_permvar_hi_128( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i16> undef, i8 -1) ret <8 x i16> %a } define <8 x i16> @zero_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) { ; CHECK-LABEL: @zero_test_permvar_hi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i16> [[TMP3]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i16> %passthru, i8 %mask) ret <8 x i16> %a } define <8 x i16> @shuffle_test_permvar_hi_128(<8 x i16> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_hi_128( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> , <8 x i16> undef, i8 -1) ret <8 x i16> %a } define <8 x i16> @shuffle_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_hi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i16> [[TMP3]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> , <8 x i16> %passthru, i8 %mask) ret <8 x i16> %a } define <8 x i16> @undef_test_permvar_hi_128(<8 x i16> %a0) { ; CHECK-LABEL: @undef_test_permvar_hi_128( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> ; CHECK-NEXT: ret <8 x i16> [[TMP1]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> , <8 x i16> undef, i8 -1) ret <8 x i16> %a } define <8 x i16> @undef_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) { ; CHECK-LABEL: @undef_test_permvar_hi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <8 x i16> [[TMP3]] ; %a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> , <8 x i16> %passthru, i8 %mask) ret <8 x i16> %a } declare <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) define <16 x i16> @identity_test_permvar_hi_256(<16 x i16> %a0) { ; CHECK-LABEL: @identity_test_permvar_hi_256( ; CHECK-NEXT: ret <16 x i16> [[A0:%.*]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> , <16 x i16> undef, i16 -1) ret <16 x i16> %a } define <16 x i16> @identity_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) { ; CHECK-LABEL: @identity_test_permvar_hi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i16> [[A0:%.*]], <16 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i16> [[TMP2]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> , <16 x i16> %passthru, i16 %mask) ret <16 x i16> %a } define <16 x i16> @zero_test_permvar_hi_256(<16 x i16> %a0) { ; CHECK-LABEL: @zero_test_permvar_hi_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> zeroinitializer, <16 x i16> undef, i16 -1) ret <16 x i16> %a } define <16 x i16> @zero_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) { ; CHECK-LABEL: @zero_test_permvar_hi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i16> [[TMP3]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> zeroinitializer, <16 x i16> %passthru, i16 %mask) ret <16 x i16> %a } define <16 x i16> @shuffle_test_permvar_hi_256(<16 x i16> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_hi_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> , <16 x i16> undef, i16 -1) ret <16 x i16> %a } define <16 x i16> @shuffle_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_hi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i16> [[TMP3]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> , <16 x i16> %passthru, i16 %mask) ret <16 x i16> %a } define <16 x i16> @undef_test_permvar_hi_256(<16 x i16> %a0) { ; CHECK-LABEL: @undef_test_permvar_hi_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> ; CHECK-NEXT: ret <16 x i16> [[TMP1]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> , <16 x i16> undef, i16 -1) ret <16 x i16> %a } define <16 x i16> @undef_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) { ; CHECK-LABEL: @undef_test_permvar_hi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i16> [[TMP3]] ; %a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> , <16 x i16> %passthru, i16 %mask) ret <16 x i16> %a } declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) define <32 x i16> @identity_test_permvar_hi_512(<32 x i16> %a0) { ; CHECK-LABEL: @identity_test_permvar_hi_512( ; CHECK-NEXT: ret <32 x i16> [[A0:%.*]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> , <32 x i16> undef, i32 -1) ret <32 x i16> %a } define <32 x i16> @identity_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) { ; CHECK-LABEL: @identity_test_permvar_hi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <32 x i1> [[TMP1]], <32 x i16> [[A0:%.*]], <32 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i16> [[TMP2]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> , <32 x i16> %passthru, i32 %mask) ret <32 x i16> %a } define <32 x i16> @zero_test_permvar_hi_512(<32 x i16> %a0) { ; CHECK-LABEL: @zero_test_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> zeroinitializer ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> zeroinitializer, <32 x i16> undef, i32 -1) ret <32 x i16> %a } define <32 x i16> @zero_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) { ; CHECK-LABEL: @zero_test_permvar_hi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i16> [[TMP3]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> zeroinitializer, <32 x i16> %passthru, i32 %mask) ret <32 x i16> %a } define <32 x i16> @shuffle_test_permvar_hi_512(<32 x i16> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> , <32 x i16> undef, i32 -1) ret <32 x i16> %a } define <32 x i16> @shuffle_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_hi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i16> [[TMP3]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> , <32 x i16> %passthru, i32 %mask) ret <32 x i16> %a } define <32 x i16> @undef_test_permvar_hi_512(<32 x i16> %a0) { ; CHECK-LABEL: @undef_test_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> ; CHECK-NEXT: ret <32 x i16> [[TMP1]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> , <32 x i16> undef, i32 -1) ret <32 x i16> %a } define <32 x i16> @undef_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) { ; CHECK-LABEL: @undef_test_permvar_hi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i16> [[TMP3]] ; %a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> , <32 x i16> %passthru, i32 %mask) ret <32 x i16> %a } declare <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) define <16 x i8> @identity_test_permvar_qi_128(<16 x i8> %a0) { ; CHECK-LABEL: @identity_test_permvar_qi_128( ; CHECK-NEXT: ret <16 x i8> [[A0:%.*]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> , <16 x i8> undef, i16 -1) ret <16 x i8> %a } define <16 x i8> @identity_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) { ; CHECK-LABEL: @identity_test_permvar_qi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[A0:%.*]], <16 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i8> [[TMP2]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> , <16 x i8> %passthru, i16 %mask) ret <16 x i8> %a } define <16 x i8> @zero_test_permvar_qi_128(<16 x i8> %a0) { ; CHECK-LABEL: @zero_test_permvar_qi_128( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: ret <16 x i8> [[TMP1]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i8> undef, i16 -1) ret <16 x i8> %a } define <16 x i8> @zero_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) { ; CHECK-LABEL: @zero_test_permvar_qi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i8> [[TMP3]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i8> %passthru, i16 %mask) ret <16 x i8> %a } define <16 x i8> @shuffle_test_permvar_qi_128(<16 x i8> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_qi_128( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> ; CHECK-NEXT: ret <16 x i8> [[TMP1]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> , <16 x i8> undef, i16 -1) ret <16 x i8> %a } define <16 x i8> @shuffle_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_qi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i8> [[TMP3]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> , <16 x i8> %passthru, i16 %mask) ret <16 x i8> %a } define <16 x i8> @undef_test_permvar_qi_128(<16 x i8> %a0) { ; CHECK-LABEL: @undef_test_permvar_qi_128( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> ; CHECK-NEXT: ret <16 x i8> [[TMP1]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> , <16 x i8> undef, i16 -1) ret <16 x i8> %a } define <16 x i8> @undef_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) { ; CHECK-LABEL: @undef_test_permvar_qi_128_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <16 x i8> [[TMP3]] ; %a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> , <16 x i8> %passthru, i16 %mask) ret <16 x i8> %a } declare <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) define <32 x i8> @identity_test_permvar_qi_256(<32 x i8> %a0) { ; CHECK-LABEL: @identity_test_permvar_qi_256( ; CHECK-NEXT: ret <32 x i8> [[A0:%.*]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> , <32 x i8> undef, i32 -1) ret <32 x i8> %a } define <32 x i8> @identity_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) { ; CHECK-LABEL: @identity_test_permvar_qi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <32 x i1> [[TMP1]], <32 x i8> [[A0:%.*]], <32 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i8> [[TMP2]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> , <32 x i8> %passthru, i32 %mask) ret <32 x i8> %a } define <32 x i8> @zero_test_permvar_qi_256(<32 x i8> %a0) { ; CHECK-LABEL: @zero_test_permvar_qi_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> zeroinitializer ; CHECK-NEXT: ret <32 x i8> [[TMP1]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i8> undef, i32 -1) ret <32 x i8> %a } define <32 x i8> @zero_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) { ; CHECK-LABEL: @zero_test_permvar_qi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i8> [[TMP3]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i8> %passthru, i32 %mask) ret <32 x i8> %a } define <32 x i8> @shuffle_test_permvar_qi_256(<32 x i8> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_qi_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> ; CHECK-NEXT: ret <32 x i8> [[TMP1]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> , <32 x i8> undef, i32 -1) ret <32 x i8> %a } define <32 x i8> @shuffle_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_qi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i8> [[TMP3]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> , <32 x i8> %passthru, i32 %mask) ret <32 x i8> %a } define <32 x i8> @undef_test_permvar_qi_256(<32 x i8> %a0) { ; CHECK-LABEL: @undef_test_permvar_qi_256( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> ; CHECK-NEXT: ret <32 x i8> [[TMP1]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> , <32 x i8> undef, i32 -1) ret <32 x i8> %a } define <32 x i8> @undef_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) { ; CHECK-LABEL: @undef_test_permvar_qi_256_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <32 x i8> [[TMP3]] ; %a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> , <32 x i8> %passthru, i32 %mask) ret <32 x i8> %a } declare <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) define <64 x i8> @identity_test_permvar_qi_512(<64 x i8> %a0) { ; CHECK-LABEL: @identity_test_permvar_qi_512( ; CHECK-NEXT: ret <64 x i8> [[A0:%.*]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> , <64 x i8> undef, i64 -1) ret <64 x i8> %a } define <64 x i8> @identity_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) { ; CHECK-LABEL: @identity_test_permvar_qi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> ; CHECK-NEXT: [[TMP2:%.*]] = select <64 x i1> [[TMP1]], <64 x i8> [[A0:%.*]], <64 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <64 x i8> [[TMP2]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> , <64 x i8> %passthru, i64 %mask) ret <64 x i8> %a } define <64 x i8> @zero_test_permvar_qi_512(<64 x i8> %a0) { ; CHECK-LABEL: @zero_test_permvar_qi_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> zeroinitializer ; CHECK-NEXT: ret <64 x i8> [[TMP1]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> zeroinitializer, <64 x i8> undef, i64 -1) ret <64 x i8> %a } define <64 x i8> @zero_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) { ; CHECK-LABEL: @zero_test_permvar_qi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <64 x i8> [[TMP3]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> zeroinitializer, <64 x i8> %passthru, i64 %mask) ret <64 x i8> %a } define <64 x i8> @shuffle_test_permvar_qi_512(<64 x i8> %a0) { ; CHECK-LABEL: @shuffle_test_permvar_qi_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> ; CHECK-NEXT: ret <64 x i8> [[TMP1]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> , <64 x i8> undef, i64 -1) ret <64 x i8> %a } define <64 x i8> @shuffle_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) { ; CHECK-LABEL: @shuffle_test_permvar_qi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <64 x i8> [[TMP3]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> , <64 x i8> %passthru, i64 %mask) ret <64 x i8> %a } define <64 x i8> @undef_test_permvar_qi_512(<64 x i8> %a0) { ; CHECK-LABEL: @undef_test_permvar_qi_512( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> ; CHECK-NEXT: ret <64 x i8> [[TMP1]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> , <64 x i8> undef, i64 -1) ret <64 x i8> %a } define <64 x i8> @undef_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) { ; CHECK-LABEL: @undef_test_permvar_qi_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> [[PASSTHRU:%.*]] ; CHECK-NEXT: ret <64 x i8> [[TMP3]] ; %a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> , <64 x i8> %passthru, i64 %mask) ret <64 x i8> %a } declare <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) define <16 x float> @test_add_ps(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_add_ps( ; CHECK-NEXT: [[TMP1:%.*]] = fadd <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4) ret <16 x float> %1 } define <16 x float> @test_add_ps_round(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_add_ps_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8) ret <16 x float> %1 } define <16 x float> @test_add_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_add_ps_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fadd <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4) ret <16 x float> %1 } define <16 x float> @test_add_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_add_ps_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8) ret <16 x float> %1 } declare <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) define <8 x double> @test_add_pd(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_add_pd( ; CHECK-NEXT: [[TMP1:%.*]] = fadd <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4) ret <8 x double> %1 } define <8 x double> @test_add_pd_round(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_add_pd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8) ret <8 x double> %1 } define <8 x double> @test_add_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_add_pd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fadd <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4) ret <8 x double> %1 } define <8 x double> @test_add_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_add_pd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8) ret <8 x double> %1 } declare <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) define <16 x float> @test_sub_ps(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_sub_ps( ; CHECK-NEXT: [[TMP1:%.*]] = fsub <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4) ret <16 x float> %1 } define <16 x float> @test_sub_ps_round(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_sub_ps_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8) ret <16 x float> %1 } define <16 x float> @test_sub_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_sub_ps_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fsub <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4) ret <16 x float> %1 } define <16 x float> @test_sub_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_sub_ps_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8) ret <16 x float> %1 } declare <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) define <8 x double> @test_sub_pd(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_sub_pd( ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4) ret <8 x double> %1 } define <8 x double> @test_sub_pd_round(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_sub_pd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8) ret <8 x double> %1 } define <8 x double> @test_sub_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_sub_pd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4) ret <8 x double> %1 } define <8 x double> @test_sub_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_sub_pd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8) ret <8 x double> %1 } declare <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) define <16 x float> @test_mul_ps(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_mul_ps( ; CHECK-NEXT: [[TMP1:%.*]] = fmul <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4) ret <16 x float> %1 } define <16 x float> @test_mul_ps_round(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_mul_ps_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8) ret <16 x float> %1 } define <16 x float> @test_mul_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_mul_ps_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fmul <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4) ret <16 x float> %1 } define <16 x float> @test_mul_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_mul_ps_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8) ret <16 x float> %1 } declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) define <8 x double> @test_mul_pd(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_mul_pd( ; CHECK-NEXT: [[TMP1:%.*]] = fmul <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4) ret <8 x double> %1 } define <8 x double> @test_mul_pd_round(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_mul_pd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8) ret <8 x double> %1 } define <8 x double> @test_mul_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mul_pd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fmul <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4) ret <8 x double> %1 } define <8 x double> @test_mul_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_mul_pd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8) ret <8 x double> %1 } declare <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) define <16 x float> @test_div_ps(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_div_ps( ; CHECK-NEXT: [[TMP1:%.*]] = fdiv <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4) ret <16 x float> %1 } define <16 x float> @test_div_ps_round(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: @test_div_ps_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8) ret <16 x float> %1 } define <16 x float> @test_div_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_div_ps_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fdiv <16 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]] ; CHECK-NEXT: ret <16 x float> [[TMP3]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4) ret <16 x float> %1 } define <16 x float> @test_div_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { ; CHECK-LABEL: @test_div_ps_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <16 x float> [[TMP1]] ; %1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8) ret <16 x float> %1 } declare <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) define <8 x double> @test_div_pd(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_div_pd( ; CHECK-NEXT: [[TMP1:%.*]] = fdiv <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4) ret <8 x double> %1 } define <8 x double> @test_div_pd_round(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: @test_div_pd_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8) ret <8 x double> %1 } define <8 x double> @test_div_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_div_pd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = fdiv <8 x double> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]] ; CHECK-NEXT: ret <8 x double> [[TMP3]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4) ret <8 x double> %1 } define <8 x double> @test_div_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; CHECK-LABEL: @test_div_pd_mask_round( ; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8) ; CHECK-NEXT: ret <8 x double> [[TMP1]] ; %1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8) ret <8 x double> %1 } declare i32 @llvm.x86.avx512.vcomi.ss(<4 x float>, <4 x float>, i32, i32) define i32 @test_comi_ss_0(float %a, float %b) { ; CHECK-LABEL: @test_comi_ss_0( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float [[A:%.*]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.x86.avx512.vcomi.ss(<4 x float> [[TMP1]], <4 x float> [[TMP2]], i32 0, i32 4) ; CHECK-NEXT: ret i32 [[TMP3]] ; %1 = insertelement <4 x float> undef, float %a, i32 0 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 %5 = insertelement <4 x float> undef, float %b, i32 0 %6 = insertelement <4 x float> %5, float 4.000000e+00, i32 1 %7 = insertelement <4 x float> %6, float 5.000000e+00, i32 2 %8 = insertelement <4 x float> %7, float 6.000000e+00, i32 3 %9 = tail call i32 @llvm.x86.avx512.vcomi.ss(<4 x float> %4, <4 x float> %8, i32 0, i32 4) ret i32 %9 } declare i32 @llvm.x86.avx512.vcomi.sd(<2 x double>, <2 x double>, i32, i32) define i32 @test_comi_sd_0(double %a, double %b) { ; CHECK-LABEL: @test_comi_sd_0( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double [[A:%.*]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[B:%.*]], i32 0 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> [[TMP1]], <2 x double> [[TMP2]], i32 0, i32 4) ; CHECK-NEXT: ret i32 [[TMP3]] ; %1 = insertelement <2 x double> undef, double %a, i32 0 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 %3 = insertelement <2 x double> undef, double %b, i32 0 %4 = insertelement <2 x double> %3, double 2.000000e+00, i32 1 %5 = tail call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %2, <2 x double> %4, i32 0, i32 4) ret i32 %5 }