; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S < %s 2>%t | FileCheck %s ; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t ; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. ; WARN-NOT: warning define void @fixed_array16i32_to_scalable4i32(* %out) { ; CHECK-LABEL: @fixed_array16i32_to_scalable4i32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca [16 x i32], align 16 ; CHECK-NEXT: [[CAST:%.*]] = bitcast [16 x i32]* [[TMP]] to * ; CHECK-NEXT: store volatile zeroinitializer, * [[CAST]], align 16 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile , * [[CAST]], align 16 ; CHECK-NEXT: store [[RELOAD]], * [[OUT:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca [16 x i32], align 16 %cast = bitcast [16 x i32]* %tmp to * store volatile zeroinitializer, * %cast, align 16 %reload = load volatile , * %cast, align 16 store %reload, * %out, align 16 ret void } define void @scalable4i32_to_fixed16i32(<16 x i32>* %out) { ; CHECK-LABEL: @scalable4i32_to_fixed16i32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca , align 64 ; CHECK-NEXT: [[CAST:%.*]] = bitcast * [[TMP]] to <16 x i32>* ; CHECK-NEXT: store <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64 ; CHECK-NEXT: store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca , align 16 %cast = bitcast * %tmp to <16 x i32>* store <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16 %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16 store <16 x i32> %reload, <16 x i32>* %out, align 16 ret void } define void @fixed16i32_to_scalable4i32(* %out) { ; CHECK-LABEL: @fixed16i32_to_scalable4i32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca <16 x i32>, align 16 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <16 x i32>* [[TMP]] to * ; CHECK-NEXT: store volatile zeroinitializer, * [[CAST]], align 16 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile , * [[CAST]], align 16 ; CHECK-NEXT: store [[RELOAD]], * [[OUT:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca <16 x i32>, align 16 %cast = bitcast <16 x i32>* %tmp to * store volatile zeroinitializer, * %cast, align 16 %reload = load volatile , * %cast, align 16 store %reload, * %out, align 16 ret void } define void @scalable16i32_to_fixed16i32(<16 x i32>* %out) { ; CHECK-LABEL: @scalable16i32_to_fixed16i32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca , align 64 ; CHECK-NEXT: [[CAST:%.*]] = bitcast * [[TMP]] to <16 x i32>* ; CHECK-NEXT: store volatile <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64 ; CHECK-NEXT: store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca , align 16 %cast = bitcast * %tmp to <16 x i32>* store volatile <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16 %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16 store <16 x i32> %reload, <16 x i32>* %out, align 16 ret void } define void @scalable32i32_to_scalable16i32(* %out) { ; CHECK-LABEL: @scalable32i32_to_scalable16i32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca , align 64 ; CHECK-NEXT: [[CAST:%.*]] = bitcast * [[TMP]] to * ; CHECK-NEXT: store volatile zeroinitializer, * [[CAST]], align 64 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile , * [[CAST]], align 64 ; CHECK-NEXT: store [[RELOAD]], * [[OUT:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca , align 16 %cast = bitcast * %tmp to * store volatile zeroinitializer, * %cast, align 16 %reload = load volatile , * %cast, align 16 store %reload, * %out, align 16 ret void } define void @scalable32i16_to_scalable16i32(* %out) { ; CHECK-LABEL: @scalable32i16_to_scalable16i32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca , align 64 ; CHECK-NEXT: store volatile zeroinitializer, * [[TMP]], align 64 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile , * [[TMP]], align 64 ; CHECK-NEXT: store [[RELOAD]], * [[OUT:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca , align 16 %cast = bitcast * %tmp to * store volatile zeroinitializer, * %cast, align 16 %reload = load volatile , * %cast, align 16 store %reload, * %out, align 16 ret void } define void @scalable32i16_to_scalable16i32_multiuse(* %out, * %out2) { ; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP:%.*]] = alloca , align 64 ; CHECK-NEXT: [[CAST:%.*]] = bitcast * [[TMP]] to * ; CHECK-NEXT: store volatile zeroinitializer, * [[CAST]], align 64 ; CHECK-NEXT: [[RELOAD:%.*]] = load volatile , * [[CAST]], align 64 ; CHECK-NEXT: store [[RELOAD]], * [[OUT:%.*]], align 16 ; CHECK-NEXT: [[RELOAD2:%.*]] = load volatile , * [[TMP]], align 64 ; CHECK-NEXT: store [[RELOAD2]], * [[OUT2:%.*]], align 16 ; CHECK-NEXT: ret void ; entry: %tmp = alloca , align 16 %cast = bitcast * %tmp to * store volatile zeroinitializer, * %cast, align 16 %reload = load volatile , * %cast, align 16 store %reload, * %out, align 16 %reload2 = load volatile , * %tmp, align 16 store %reload2, * %out2, align 16 ret void }