Skip to content

Commit

Permalink
Lower __builtin_neon_vshl_v
Browse files Browse the repository at this point in the history
  • Loading branch information
ghehg committed Nov 22, 2024
1 parent 8176d88 commit 6f53fad
Show file tree
Hide file tree
Showing 2 changed files with 107 additions and 66 deletions.
1 change: 1 addition & 0 deletions clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2555,6 +2555,7 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr(
: "aarch64.neon.srhadd";
break;
}
case NEON::BI__builtin_neon_vshl_v:
case NEON::BI__builtin_neon_vshlq_v: {
intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.ushl"
: "aarch64.neon.sshl";
Expand Down
172 changes: 106 additions & 66 deletions clang/test/CIR/CodeGen/AArch64/neon.c
Original file line number Diff line number Diff line change
Expand Up @@ -3391,79 +3391,119 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) {
// return vqsubq_u64(a, b);
// }

// NYI-LABEL: @test_vshl_s8(
// NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sshl.v8i8(<8 x i8> %a, <8 x i8> %b)
// NYI: ret <8 x i8> [[VSHL_V_I]]
// int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) {
// return vshl_s8(a, b);
// }
int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) {
return vshl_s8(a, b);

// NYI-LABEL: @test_vshl_s16(
// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
// NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sshl.v4i16(<4 x i16> %a, <4 x i16> %b)
// NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8>
// NYI: ret <4 x i16> [[VSHL_V2_I]]
// int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) {
// return vshl_s16(a, b);
// }
// CIR-LABEL: vshl_s8
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!s8i x 8>, !cir.vector<!s8i x 8>) -> !cir.vector<!s8i x 8>

// NYI-LABEL: @test_vshl_s32(
// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
// NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sshl.v2i32(<2 x i32> %a, <2 x i32> %b)
// NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8>
// NYI: ret <2 x i32> [[VSHL_V2_I]]
// int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) {
// return vshl_s32(a, b);
// }
// LLVM: {{.*}}test_vshl_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]])
// LLVM: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sshl.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
// LLVM: ret <8 x i8> [[VSHL_V_I]]
}

// NYI-LABEL: @test_vshl_s64(
// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
// NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> %a, <1 x i64> %b)
// NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8>
// NYI: ret <1 x i64> [[VSHL_V2_I]]
// int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) {
// return vshl_s64(a, b);
// }
int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) {
return vshl_s16(a, b);

// NYI-LABEL: @test_vshl_u8(
// NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.ushl.v8i8(<8 x i8> %a, <8 x i8> %b)
// NYI: ret <8 x i8> [[VSHL_V_I]]
// uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) {
// return vshl_u8(a, b);
// }
// CIR-LABEL: vshl_s16
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!s16i x 4>, !cir.vector<!s16i x 4>) -> !cir.vector<!s16i x 4>

// NYI-LABEL: @test_vshl_u16(
// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
// NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.ushl.v4i16(<4 x i16> %a, <4 x i16> %b)
// NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8>
// NYI: ret <4 x i16> [[VSHL_V2_I]]
// uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) {
// return vshl_u16(a, b);
// }
// LLVM: {{.*}}test_vshl_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
// LLVM: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sshl.v4i16(<4 x i16> [[A]], <4 x i16> [[B]])
// LLVM: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8>
// LLVM: ret <4 x i16> [[VSHL_V2_I]]
}

// NYI-LABEL: @test_vshl_u32(
// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
// NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.ushl.v2i32(<2 x i32> %a, <2 x i32> %b)
// NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8>
// NYI: ret <2 x i32> [[VSHL_V2_I]]
// uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) {
// return vshl_u32(a, b);
// }
int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) {
return vshl_s32(a, b);

// NYI-LABEL: @test_vshl_u64(
// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
// NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> %a, <1 x i64> %b)
// NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8>
// NYI: ret <1 x i64> [[VSHL_V2_I]]
// uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) {
// return vshl_u64(a, b);
// }
// CIR-LABEL: vshl_s32
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!s32i x 2>, !cir.vector<!s32i x 2>) -> !cir.vector<!s32i x 2>

// LLVM: {{.*}}test_vshl_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
// LLVM: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sshl.v2i32(<2 x i32> [[A]], <2 x i32> [[B]])
// LLVM: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8>
// LLVM: ret <2 x i32> [[VSHL_V2_I]]
}

int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) {
return vshl_s64(a, b);

// CIR-LABEL: vshl_s64
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!s64i x 1>, !cir.vector<!s64i x 1>) -> !cir.vector<!s64i x 1>

// LLVM: {{.*}}test_vshl_s64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
// LLVM: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> [[A]], <1 x i64> [[B]])
// LLVM: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8>
// LLVM: ret <1 x i64> [[VSHL_V2_I]]
}

uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) {
return vshl_u8(a, b);

// CIR-LABEL: vshl_u8
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!u8i x 8>, !cir.vector<!u8i x 8>) -> !cir.vector<!u8i x 8>

// LLVM: {{.*}}test_vshl_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]])
// LLVM: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.ushl.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
// LLVM: ret <8 x i8> [[VSHL_V_I]]
}

uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) {
return vshl_u16(a, b);

// CIR-LABEL: vshl_u16
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!u16i x 4>, !cir.vector<!u16i x 4>) -> !cir.vector<!u16i x 4>

// LLVM: {{.*}}test_vshl_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
// LLVM: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.ushl.v4i16(<4 x i16> [[A]], <4 x i16> [[B]])
// LLVM: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8>
// LLVM: ret <4 x i16> [[VSHL_V2_I]]
}

uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) {
return vshl_u32(a, b);

// CIR-LABEL: vshl_u32
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!u32i x 2>, !cir.vector<!u32i x 2>) -> !cir.vector<!u32i x 2>

// LLVM: {{.*}}test_vshl_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
// LLVM: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.ushl.v2i32(<2 x i32> [[A]], <2 x i32> [[B]])
// LLVM: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8>
// LLVM: ret <2 x i32> [[VSHL_V2_I]]
}

uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) {
return vshl_u64(a, b);

// CIR-LABEL: vshl_u64
// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} :
// CIR-SAME: (!cir.vector<!u64i x 1>, !cir.vector<!u64i x 1>) -> !cir.vector<!u64i x 1>

// LLVM: {{.*}}test_vshl_u64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]])
// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
// LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
// LLVM: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> [[A]], <1 x i64> [[B]])
// LLVM: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8>
// LLVM: ret <1 x i64> [[VSHL_V2_I]]
}

int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) {
return vshlq_s8(a, b);
Expand Down

0 comments on commit 6f53fad

Please sign in to comment.