Skip to content

Commit c6ce536

Browse files
arsenmbcahoon
authored andcommitted
AMDGPU: Fix using wrong memory type for non-image resource intrinsics (llvm#94911)
An 8 x i16 raw load was incorrectly using a 64-bit memory type, which would assert in the MachineMemOperand constructor. This is preparation for a cleanup which will make the buffer intrinsics work for all legal types. Change-Id: Ia916725b45151f3adac6d1c2a2fbc06302c4c131
1 parent 2ee7cfd commit c6ce536

File tree

3 files changed

+73
-4
lines changed

3 files changed

+73
-4
lines changed

llvm/lib/Target/AMDGPU/BUFInstructions.td

+4
Original file line numberDiff line numberDiff line change
@@ -1431,6 +1431,8 @@ defm : MUBUF_LoadIntrinsicPat<SIbuffer_load, v4f32, "BUFFER_LOAD_DWORDX4">;
14311431
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load, v4i32, "BUFFER_LOAD_DWORDX4">;
14321432
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load, v2i64, "BUFFER_LOAD_DWORDX4">;
14331433
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load, v2f64, "BUFFER_LOAD_DWORDX4">;
1434+
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load, v8f16, "BUFFER_LOAD_DWORDX4">;
1435+
14341436
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_byte, i32, "BUFFER_LOAD_SBYTE">;
14351437
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_short, i32, "BUFFER_LOAD_SSHORT">;
14361438
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_ubyte, i32, "BUFFER_LOAD_UBYTE">;
@@ -1527,6 +1529,8 @@ defm : MUBUF_StoreIntrinsicPat<SIbuffer_store, v4f32, "BUFFER_STORE_DWORDX4">;
15271529
defm : MUBUF_StoreIntrinsicPat<SIbuffer_store, v4i32, "BUFFER_STORE_DWORDX4">;
15281530
defm : MUBUF_StoreIntrinsicPat<SIbuffer_store, v2i64, "BUFFER_STORE_DWORDX4">;
15291531
defm : MUBUF_StoreIntrinsicPat<SIbuffer_store, v2f64, "BUFFER_STORE_DWORDX4">;
1532+
defm : MUBUF_StoreIntrinsicPat<SIbuffer_store, v8f16, "BUFFER_STORE_DWORDX4">;
1533+
15301534
defm : MUBUF_StoreIntrinsicPat<SIbuffer_store_byte, i32, "BUFFER_STORE_BYTE">;
15311535
defm : MUBUF_StoreIntrinsicPat<SIbuffer_store_short, i32, "BUFFER_STORE_SHORT">;
15321536

llvm/lib/Target/AMDGPU/SIISelLowering.cpp

+7-4
Original file line numberDiff line numberDiff line change
@@ -1188,9 +1188,9 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
11881188
Info.flags |= MachineMemOperand::MOVolatile;
11891189
Info.flags |= MachineMemOperand::MODereferenceable;
11901190
if (ME.onlyReadsMemory()) {
1191-
unsigned MaxNumLanes = 4;
1192-
11931191
if (RsrcIntr->IsImage) {
1192+
unsigned MaxNumLanes = 4;
1193+
11941194
const AMDGPU::ImageDimIntrinsicInfo *Intr
11951195
= AMDGPU::getImageDimIntrinsicInfo(IntrID);
11961196
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
@@ -1203,9 +1203,12 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12031203
= cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue();
12041204
MaxNumLanes = DMask == 0 ? 1 : llvm::popcount(DMask);
12051205
}
1206-
}
12071206

1208-
Info.memVT = memVTFromLoadIntrReturn(CI.getType(), MaxNumLanes);
1207+
Info.memVT = memVTFromLoadIntrReturn(CI.getType(), MaxNumLanes);
1208+
} else {
1209+
Info.memVT = memVTFromLoadIntrReturn(
1210+
CI.getType(), std::numeric_limits<unsigned>::max());
1211+
}
12091212

12101213
// FIXME: What does alignment mean for an image?
12111214
Info.opc = ISD::INTRINSIC_W_CHAIN;
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -mtriple=amdgcn -mcpu=gfx908 -stop-after=finalize-isel -o - %s | FileCheck -check-prefix=GCN %s
3+
4+
define amdgpu_ps void @raw_ptr_buffer_load_v8f16(ptr addrspace(8) inreg %rsrc, ptr addrspace(3) %ptr) {
5+
; GCN-LABEL: name: raw_ptr_buffer_load_v8f16
6+
; GCN: bb.0 (%ir-block.0):
7+
; GCN-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0
8+
; GCN-NEXT: {{ $}}
9+
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
10+
; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr3
11+
; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
12+
; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr1
13+
; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr0
14+
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
15+
; GCN-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1
16+
; GCN-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0
17+
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1
18+
; GCN-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub1
19+
; GCN-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0
20+
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY8]], %subreg.sub0, killed [[COPY7]], %subreg.sub1, killed [[COPY6]], %subreg.sub2, killed [[COPY5]], %subreg.sub3
21+
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
22+
; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET killed [[REG_SEQUENCE2]], killed [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s128) from %ir.rsrc, align 1, addrspace 8)
23+
; GCN-NEXT: DS_WRITE_B128_gfx9 [[COPY]], killed [[BUFFER_LOAD_DWORDX4_OFFSET]], 0, 0, implicit $exec :: (store (s128) into %ir.ptr, addrspace 3)
24+
; GCN-NEXT: S_ENDPGM 0
25+
%val = call <8 x half> @llvm.amdgcn.raw.ptr.buffer.load.v8f16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
26+
store <8 x half> %val, ptr addrspace(3) %ptr
27+
ret void
28+
}
29+
30+
define amdgpu_ps void @buffer_store_v8f16(ptr addrspace(8) inreg %rsrc, <8 x half> %data, i32 %offset) {
31+
; GCN-LABEL: name: buffer_store_v8f16
32+
; GCN: bb.0 (%ir-block.0):
33+
; GCN-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
34+
; GCN-NEXT: {{ $}}
35+
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr4
36+
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
37+
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
38+
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
39+
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
40+
; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr3
41+
; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr2
42+
; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr1
43+
; GCN-NEXT: [[COPY8:%[0-9]+]]:sgpr_32 = COPY $sgpr0
44+
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
45+
; GCN-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1
46+
; GCN-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0
47+
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
48+
; GCN-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub1
49+
; GCN-NEXT: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0
50+
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY12]], %subreg.sub0, killed [[COPY11]], %subreg.sub1, killed [[COPY10]], %subreg.sub2, killed [[COPY9]], %subreg.sub3
51+
; GCN-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
52+
; GCN-NEXT: [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
53+
; GCN-NEXT: [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
54+
; GCN-NEXT: [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
55+
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY1]], %subreg.sub3
56+
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
57+
; GCN-NEXT: [[COPY13:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE3]]
58+
; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFEN_exact killed [[COPY13]], [[COPY]], killed [[REG_SEQUENCE2]], killed [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable store (s128) into %ir.rsrc, align 1, addrspace 8)
59+
; GCN-NEXT: S_ENDPGM 0
60+
call void @llvm.amdgcn.raw.ptr.buffer.store.v8f16(<8 x half> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
61+
ret void
62+
}

0 commit comments

Comments
 (0)