forked from riscv-non-isa/riscv-iommu
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathiommu_command_queue.c
541 lines (523 loc) · 27.2 KB
/
iommu_command_queue.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
// Copyright (c) 2022 by Rivos Inc.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
// Author: ved@rivosinc.com
#include "iommu.h"
uint8_t g_command_queue_stall_for_itag = 0;
uint8_t g_ats_inv_req_timeout = 0;
uint8_t g_iofence_wait_pending_inv = 0;
uint8_t g_iofence_pending_PR, g_iofence_pending_PW, g_iofence_pending_AV, g_iofence_pending_WSI_BIT;
uint64_t g_iofence_pending_ADDR;
uint32_t g_iofence_pending_DATA;
uint8_t g_pending_inval_req_DSV;
uint8_t g_pending_inval_req_DSEG;
uint16_t g_pending_inval_req_RID;
uint8_t g_pending_inval_req_PV;
uint32_t g_pending_inval_req_PID;
uint64_t g_pending_inval_req_PAYLOAD;
void
process_commands(
void) {
uint8_t status, itag;
uint64_t a;
command_t command;
// Command queue is used by software to queue commands to be processed by
// the IOMMU. Each command is 16 bytes.
// The PPN of the base of this in-memory queue and the size of the queue
// is configured into a memorymapped register called command-queue base (cqb).
// The tail of the command-queue resides in a software controlled read/write
// memory-mapped register called command-queue tail (cqt). The cqt is an index
// into the next command queue entry that software will write. Subsequent to
// writing the command(s), software advances the cqt by the count of the number
// of commands written. The head of the command-queue resides in a read-only
// memory-mapped IOMMU controlled register called command-queue head (cqh). The
// cqh is an index into the command queue that IOMMU should process next.
//
// If command-queue access leads to a memory fault then the
// command-queue-memory-fault cqmf bit is set to 1
// If the execution of a command leads to a timeout (e.g. a command to invalidate
// device ATC may timeout waiting for a completion), then the command-queue
// sets the cmd_to bit.
// If an illegal or unsupported command is fetched and decoded by the
// command-queue then the command-queue sets the cmd_ill bit
// If any of these bits are set then CQ stops processing from the
// command-queue.
// The command-queue is active if cqon is 1.
// Sometimes the command queue may stall due to unavailability of internal
// resources - e.g. ITAG trackers
if ( (g_reg_file.cqcsr.cqon == 0) ||
(g_reg_file.cqcsr.cqen == 0) ||
(g_reg_file.cqcsr.cqmf != 0) ||
(g_reg_file.cqcsr.cmd_ill != 0) ||
(g_reg_file.cqcsr.cmd_to != 0) ||
(g_command_queue_stall_for_itag != 0) ||
(g_iofence_wait_pending_inv != 0) )
return;
// If cqh == cqt, the command-queue is empty.
// If cqt == (cqh - 1) the command-queue is full.
if ( g_reg_file.cqh.index == g_reg_file.cqt.index )
return;
a = g_reg_file.cqb.ppn * PAGESIZE | (g_reg_file.cqh.index * 16);
status = read_memory(a, 16, (char *)&command);
if ( status != 0 ) {
// If command-queue access leads to a memory fault then the
// command-queue-memory-fault bit is set to 1 and the command
// queue stalls until this bit is cleared. When cqmf is set to 1, an
// interrupt is generated if an interrupt is not already pending (i.e.,
// ipsr.cip == 1) and not masked (i.e. cqsr.cie == 0). To reenable
// command processing, software should clear this bit by writing 1
if ( g_reg_file.cqcsr.cqmf == 0 ) {
g_reg_file.cqcsr.cqmf = 1;
generate_interrupt(COMMAND_QUEUE);
}
return;
}
// IOMMU commands are grouped into a major command group determined by the
// opcode and within each group the func3 field specifies the function invoked
// by that command. The opcode defines the format of the operand fields. One
// or more of those fields may be used by the specific function invoked.
// A command is determined to be illegal if it uses a reserved encoding or if a
// reserved bit is set to 1. A command is unsupported if it is defined but not
// implemented as determined by the IOMMU capabilities register.
switch ( command.any.opcode ) {
case IOTINVAL:
if ( command.iotinval.rsvd != 0 || command.iotinval.rsvd1 != 0 ||
command.iotinval.rsvd2 != 0 || command.iotinval.rsvd3 != 0 ||
command.iotinval.rsvd4 != 0 ) goto command_illegal;
switch ( command.any.func3 ) {
case VMA:
do_iotinval_vma(command.iotinval.gv, command.iotinval.av,
command.iotinval.pscv, command.iotinval.gscid,
command.iotinval.pscid, command.iotinval.addr_63_12);
break;
case GVMA:
// Setting PSCV to 1 with IOTINVAL.GVMA is illegal.
if ( command.iotinval.pscv != 0 ) goto command_illegal;
do_iotinval_gvma(command.iotinval.gv, command.iotinval.av,
command.iotinval.gscid, command.iotinval.addr_63_12);
break;
default: goto command_illegal;
}
break;
case IODIR:
if ( command.iodir.rsvd != 0 || command.iodir.rsvd1 != 0 ||
command.iodir.rsvd2 != 0 || command.iodir.rsvd3 != 0 )
goto command_illegal;
switch ( command.any.func3 ) {
case INVAL_DDT:
// The PID operand is reserved for the
// IODIR.INVAL_DDT command.
if ( command.iodir.pid != 0 ) goto command_illegal;
// When DV operand is 1, the value of the DID operand must not
// be wider than that supported by the ddtp.iommu_mode.
if ( command.iodir.dv &&
(command.iodir.did & ~g_max_devid_mask) ) {
goto command_illegal;
}
do_inval_ddt(command.iodir.dv, command.iodir.did);
break;
case INVAL_PDT:
// The DV operand must be 1 for IODIR.INVAL_PDT else
// the command is illegal. When DV operand is 1, the value of
// the DID operand must not be wider than that supported by
// the ddtp.iommu_mode.
if ( command.iodir.dv != 1 ) goto command_illegal;
// When DV operand is 1, the value of the DID operand must not
// be wider than that supported by the ddtp.iommu_mode.
if ( command.iodir.did & ~g_max_devid_mask )
goto command_illegal;
// The PID operand of IODIR.INVAL_PDT must not be wider than
// the width supported by the IOMMU (see Section 5.3)
if ( g_reg_file.capabilities.pd20 == 0 &&
command.iodir.pid > ((1UL << 17) - 1) ) {
goto command_illegal;
}
if ( g_reg_file.capabilities.pd20 == 0 &&
g_reg_file.capabilities.pd17 == 0 &&
command.iodir.pid > ((1UL << 8) - 1) ) {
goto command_illegal;
}
do_inval_pdt(command.iodir.did, command.iodir.pid);
break;
default: goto command_illegal;
}
break;
case IOFENCE:
if ( command.iofence.reserved != 0 || command.iofence.reserved1 != 0 )
goto command_illegal;
// The wired-signaled-interrupt (WSI) bit when set to 1
// causes a wired-interrupt from the command
// queue to be generated on completion of IOFENCE.C. This
// bit is reserved if the IOMMU supports MSI.
if ( g_reg_file.fctl.wsi == 0 && command.iofence.wsi == 1)
goto command_illegal;
switch ( command.any.func3 ) {
case IOFENCE_C:
if ( do_iofence_c(command.iofence.pr, command.iofence.pw,
command.iofence.av, command.iofence.wsi,
(command.iofence.addr_63_2 << 2UL), command.iofence.data) ) {
// If IOFENCE encountered a memory fault or timeout
// then do not advance the CQH
// If IOFENCE is waiting for invalidation requests
// to complete then do not advance the CQ head
return;
}
break;
default: goto command_illegal;
}
break;
case ATS:
if ( command.ats.rsvd != 0 || command.ats.rsvd1 != 0 ) goto command_illegal;
switch ( command.any.func3 ) {
case INVAL:
// Allocate a ITAG for the request
if ( allocate_itag(command.ats.dsv, command.ats.dseg,
command.ats.rid, &itag) ) {
// No ITAG available, This command stays pending
// but since the reference implementation only
// has one deep pending command buffer the CQ
// is now stall till a completion or a timeout
// frees up pending ITAGs.
g_pending_inval_req_DSV = command.ats.dsv;
g_pending_inval_req_DSEG = command.ats.dseg;
g_pending_inval_req_RID = command.ats.rid;
g_pending_inval_req_PV = command.ats.pv;
g_pending_inval_req_PID = command.ats.pid;
g_pending_inval_req_PAYLOAD = command.ats.payload;
g_command_queue_stall_for_itag = 1;
} else {
// ITAG allocated successfully, send invalidate request
do_ats_msg(INVAL_REQ_MSG_CODE, itag, command.ats.dsv, command.ats.dseg,
command.ats.rid, command.ats.pv, command.ats.pid, command.ats.payload);
}
break;
case PRGR:
do_ats_msg(PRGR_MSG_CODE, 0, command.ats.dsv, command.ats.dseg,
command.ats.rid, command.ats.pv, command.ats.pid, command.ats.payload);
break;
default: goto command_illegal;
}
break;
default: goto command_illegal;
}
// The head of the command-queue resides in a read-only memory-mapped IOMMU
// controlled register called command-queue head (`cqh`). The `cqh` is an index
// into the command queue that IOMMU should process next. Subsequent to reading
// each command the IOMMU may advance the `cqh` by 1.
g_reg_file.cqh.index =
(g_reg_file.cqh.index + 1) & ((1UL << (g_reg_file.cqb.log2szm1 + 1)) - 1);
return;
command_illegal:
// If an illegal or unsupported command is fetched and decoded by
// the command-queue then the command-queue sets the cmd_ill
// bit and stops processing from the command-queue. When cmd_ill
// is set to 1, an interrupt is generated if not already pending (i.e.
// ipsr.cip == 1) and not masked (i.e. cqsr.cie == 0). To reenable
// command processing software should clear this bit by writing 1
if ( g_reg_file.cqcsr.cmd_ill == 0 ) {
g_reg_file.cqcsr.cmd_ill = 1;
generate_interrupt(COMMAND_QUEUE);
}
return;
}
void
do_inval_ddt(
uint8_t DV, uint32_t DID) {
uint8_t i;
// IOMMU operations cause implicit reads to DDT and/or PDT.
// To reduce latency of such reads, the IOMMU may cache entries from
// the DDT and/or PDT in IOMMU directory caches. These caches may not
// observe modifications performed by software to these data structures
// in memory.
// The IOMMU DDT cache invalidation command, `IODIR.INVAL_DDT`
// synchronize updates to DDT with the operation of the IOMMU and
// flushes the matching cached entries.
// The `DV` operand indicates if the device ID (`DID`) operand is valid.
// `IODIR.INVAL_DDT` guarantees that any previous stores made by a RISC-V hart to
// the DDT are observed before all subsequent implicit reads from IOMMU to DDT.
// If `DV` is 0, then the command invalidates all DDT and PDT entries cached for
// all devices. If `DV` is 1, then the command invalidates cached leaf level DDT
// entry for the device identified by `DID` operand and all associated PDT entries.
// The `PID` operand is reserved for `IODIR.INVAL_DDT`.
for ( i = 0; i < DDT_CACHE_SIZE; i++ ) {
if ( ddt_cache[i].valid == 0 ) continue;
if ( DV == 0 ) ddt_cache[i].valid = 0;
if ( DV == 1 && (ddt_cache[i].DID == DID) )
ddt_cache[i].valid = 0;
}
return;
}
void
do_inval_pdt(
uint32_t DID, uint32_t PID) {
int i;
// IOMMU operations cause implicit reads to DDT and/or PDT.
// To reduce latency of such reads, the IOMMU may cache entries from
// the DDT and/or PDT in IOMMU directory caches. These caches may not
// observe modifications performed by software to these data structures
// in memory.
// The IOMMU PDT cache invalidation command, `IODIR.INVAL_PDT` synchronize
// updates to PDT with the operation of the IOMMU and flushes the matching
// cached entries.
// The `DV` operand must be 1 for `IODIR.INVAL_PDT`.
// `IODIR.INVAL_PDT` guarantees that any previous stores made by a RISC-V hart to
// the PDT are observed before all subsequent implicit reads from IOMMU to PDT.
// The command invalidates cached leaf PDT entry for the specified `PID` and `DID`.
for ( i = 0; i < PDT_CACHE_SIZE; i++ )
if ( pdt_cache[i].DID == DID && pdt_cache[i].PID == PID && pdt_cache[i].valid == 1)
pdt_cache[i].valid = 0;
return;
}
void
do_iotinval_vma(
uint8_t GV, uint8_t AV, uint8_t PSCV, uint32_t GSCID, uint32_t PSCID, uint64_t ADDR_63_12) {
// IOMMU operations cause implicit reads to PDT, first-stage and second-stage
// page tables. To reduce latency of such reads, the IOMMU may cache entries
// from the first and/or second-stage page tables in the
// IOMMU-address-translation-cache (IOATC). These caches may not observe
// modifications performed by software to these data structures in memory.
// The IOMMU translation-table cache invalidation commands, IOTINVAL.VMA
// and IOTINVAL.GVMA synchronize updates to in-memory S/VS-stage and G-stage
// page table data structures with the operation of the IOMMU and invalidate
// the matching IOATC entries.
// The GV operand indicates if the Guest-Soft-Context ID (GSCID) operand is
// valid. The PSCV operand indicates if the Process Soft-Context ID (PSCID)
// operand is valid. Setting PSCV to 1 is allowed only for IOTINVAL.VMA. The
// AV operand indicates if the address (ADDR) operand is valid. When GV is 0,
// the translations associated with the host (i.e. those where the
// second-stage translation is not active) are operated on.
// IOTINVAL.VMA ensures that previous stores made to the first-stage page
// tables by the harts are observed by the IOMMU before all subsequent
// implicit reads from IOMMU to the corresponding firststage page tables.
//
// .`IOTINVAL.VMA` operands and operations
// |`GV`|`AV`|`PSCV`| Operation
// |0 |0 |0 | Invalidates all address-translation cache entries, including
// those that contain global mappings, for all host address
// spaces.
// |0 |0 |1 | Invalidates all address-translation cache entries for the
// host address space identified by `PSCID` operand, except for
// entries containing global mappings.
// |0 |1 |0 | Invalidates all address-translation cache entries that
// contain leaf page table entries, including those that contain
// global mappings, corresponding to the IOVA in `ADDR` operand,
// for all host address spaces.
// |0 |1 |1 | Invalidates all address-translation cache entries that
// contain leaf page table entries corresponding to the IOVA in
// `ADDR` operand and that match the host address space
// identified by `PSCID` operand, except for entries containing
// global mappings.
// |1 |0 |0 | Invalidates all address-translation cache entries, including
// those that contain global mappings, for all VM address spaces
// associated with `GSCID` operand.
// |1 |0 |1 | Invalidates all address-translation cache entries for the
// for the VM address space identified by `PSCID` and `GSCID`
// operands, except for entries containing global mappings.
// |1 |1 |0 | Invalidates all address-translation cache entries that
// contain leaf page table entries, including those that contain
// global mappings, corresponding to the IOVA in `ADDR` operand,
// for all VM address spaces associated with the `GSCID` operand.
// |1 |1 |1 | Invalidates all address-translation cache entries that
// contain leaf page table entries corresponding to the IOVA in
// `ADDR` operand, for the VM address space identified by `PSCID`
// and `GSCID` operands, except for entries containing global
// mappings.
uint8_t i, gscid_match, pscid_match, addr_match, global_match;
for ( i = 0; i < TLB_SIZE; i++ ) {
gscid_match = pscid_match = addr_match = global_match = 0;
if ( (GV == 0 && tlb[i].GV == 0 ) ||
(GV == 1 && tlb[i].GV == 1 && tlb[i].GSCID == GSCID) )
gscid_match = 1;
if ( (PSCV == 0) ||
(PSCV == 1 && tlb[i].PSCV == 1 && tlb[i].PSCID == PSCID) )
pscid_match = 1;
if ( (PSCV == 0) ||
(PSCV == 1 && tlb[i].G == 0) )
global_match = 1;
if ( (AV == 0) ||
(AV == 1 && match_address_range(ADDR_63_12, tlb[i].vpn, tlb[i].S)) )
addr_match = 1;
if ( gscid_match && pscid_match && addr_match && global_match )
tlb[i].valid = 0;
}
return;
}
void
do_iotinval_gvma(
uint8_t GV, uint8_t AV, uint32_t GSCID, uint64_t ADDR_63_12) {
uint8_t i, gscid_match, addr_match;
// Conceptually, an implementation might contain two address-translation
// caches: one that maps guest virtual addresses to guest physical addresses,
// and another that maps guest physical addresses to supervisor physical
// addresses. IOTINVAL.GVMA need not flush the former cache, but it must
// flush entries from the latter cache that match the IOTINVAL.GVMA’s
// address and GSCID arguments.
// More commonly, implementations contain address-translation caches
// that map guest virtual addresses directly to supervisor physical
// addresses, removing a level of indirection. For such implementations,
// any entry whose guest virtual address maps to a guest physical address that
// matches the IOTINVAL.GVMA’s address and GSCID arguments must be flushed.
// Selectively flushing entries in this fashion requires tagging them with
// the guest physical address, which is costly, and so a common technique
// is to flush all entries that match the IOTINVAL.GVMA’s GSCID argument,
// regardless of the address argument.
// IOTINVAL.GVMA ensures that previous stores made to the G-stage page
// tables are observed before all subsequent implicit reads from IOMMU
// to the corresponding G-stage page tables. Setting PSCV to 1 with
// IOTINVAL.GVMA is illegal.
// .`IOTINVAL.GVMA` operands and operations
// | `GV` | `AV` | Operation
// | 0 | n/a | Invalidates information cached from any level of the
// G-stage page table, for all VM address spaces.
// | 1 | 0 | Invalidates information cached from any level of the
// G-stage page tables, but only for VM address spaces
// identified by the `GSCID` operand.
// | 1 | 1 | Invalidates information cached from leaf G-stage page
// table entries corresponding to the guest-physical-address in
// `ADDR` operand, for only for VM address spaces identified
// `GSCID` operand.
for ( i = 0; i < TLB_SIZE; i++ ) {
if ( tlb[i].valid == 0 ) continue;
if ( (GV == 0 && tlb[i].GV == 1) ||
(GV == 1 && tlb[i].GV == 1 && tlb[i].GSCID == GSCID) )
gscid_match = 1;
// If the cache holds a VA -> SPA translation i.e. PSCV == 1 then invalidate
// it. If PSCV is 0 then it holds a GPA. If AV is 0 then all entries are
// eligible else match the address
if ( (tlb[i].PSCV == 1) || (AV == 0) ||
(tlb[i].PSCV == 0 && AV == 1 && match_address_range(ADDR_63_12, tlb[i].vpn, tlb[i].S)) )
addr_match = 1;
if ( gscid_match && addr_match )
tlb[i].valid = 0;
}
return;
}
void
do_ats_msg(
uint8_t MSGCODE, uint8_t TAG, uint8_t DSV, uint8_t DSEG, uint16_t RID,
uint8_t PV, uint32_t PID, uint64_t PAYLOAD) {
ats_msg_t msg;
// The ATS.INVAL command instructs the IOMMU to send a “Invalidation Request” message
// to the PCIe device function identified by RID. An “Invalidation Request” message
// is used to clear a specific subset of the address range from the address translation
// cache in a device function. The ATS.INVAL command completes when an “Invalidation
// Completion” response message is received from the device or a protocol defined
// timeout occurs while waiting for a response. The IOMMU may advance the cqh and fetch
// more commands from CQ while a response is awaited.
// The ATS.PRGR command instructs the IOMMU to send a “Page Request Group Response”
// message to the PCIe device function identified by the RID. The “Page Request Group
// Response” message is used by system hardware and/or software to communicate with the
// device functions page-request interface to signal completion of a “Page Request”, or
// the catastrophic failure of the interface.If the PV operand is set to 1, the message
// is generated with a PASID with the PASID field set to the PID operand. The PAYLOAD
// operand of the command is used to form the message body.
// If the DSV operand is 1, then a valid destination segment number is specified by
// the DSEG operand.
msg.MSGCODE = MSGCODE;
msg.TAG = TAG;
msg.RID = RID;
msg.DSV = DSV;
msg.DSEG = DSEG;
msg.PV = PV;
msg.PID = PID;
msg.PAYLOAD = PAYLOAD;
msg.PRIV = 0;
msg.EXEC_REQ= 0;
send_msg_iommu_to_hb(&msg);
return;
}
uint8_t
do_iofence_c(
uint8_t PR, uint8_t PW, uint8_t AV, uint8_t WSI_BIT, uint64_t ADDR, uint32_t DATA) {
uint8_t status;
// The IOMMU fetches commands from the CQ in order but the IOMMU may execute the fetched
// commands out of order. The IOMMU advancing cqh is not a guarantee that the commands
// fetched by the IOMMU have been executed or committed. A IOFENCE.C command guarantees
// that all previous commands fetched from the CQ have been completed and committed.
g_iofence_wait_pending_inv = 1;
if ( any_ats_invalidation_requests_pending() ) {
// if all previous ATS invalidation requests
// have not completed then IOFENCE waits for
// them to complete - or timeout
g_iofence_pending_PR = PR;
g_iofence_pending_PW = PW;
g_iofence_pending_AV = AV;
g_iofence_pending_WSI_BIT = WSI_BIT;
g_iofence_pending_ADDR = ADDR;
g_iofence_pending_DATA = DATA;
return 1;
}
// All previous pending invalidation requests completed or timed out
g_iofence_wait_pending_inv = 0;
// If any ATC invalidation requests timed out then set command timeout
if ( g_ats_inv_req_timeout == 1 ) {
if ( g_reg_file.cqcsr.cmd_to == 0 ) {
g_reg_file.cqcsr.cmd_to = 1;
generate_interrupt(COMMAND_QUEUE);
}
g_ats_inv_req_timeout = 0;
return 1;
}
// The commands may be used to order memory accesses from I/O devices connected to the IOMMU
// as viewed by the IOMMU, other RISC-V harts, and external devices or co-processors. The
// PR and PW bits can be used to request that the IOMMU ensure that all previous requests
// from devices that have already been processed by the IOMMU be committed to a global
// ordering point such that they can be observed by all RISC-V harts and IOMMUs in the machine.
if ( PR == 1 || PW == 1 )
iommu_to_hb_do_global_observability_sync(PR, PW);
// The AV command operand indicates if ADDR[63:2] operand and DATA operands are valid.
// If AV=1, the IOMMU writes DATA to memory at a 4-byte aligned address ADDR[63:2] * 4 as
// a 4-byte store.
if ( AV == 1 ) {
status = write_memory((char *)&DATA, ADDR, 4);
if ( status != 0 ) {
if ( g_reg_file.cqcsr.cqmf == 0 ) {
g_reg_file.cqcsr.cqmf = 1;
generate_interrupt(COMMAND_QUEUE);
}
return 1;
}
}
// The wired-signaled-interrupt (WSI) bit when set to 1 causes a wired-interrupt from the command
// queue to be generated on completion of IOFENCE.C. This bit is reserved if the IOMMU supports MSI
if ( g_reg_file.cqcsr.fence_w_ip == 0 && WSI_BIT == 1 ) {
g_reg_file.cqcsr.fence_w_ip = 1;
generate_interrupt(COMMAND_QUEUE);
}
return 0;
}
// Retry a pending IOFENCE if all invalidations received
void
do_pending_iofence() {
if ( do_iofence_c(g_iofence_pending_PR, g_iofence_pending_PW, g_iofence_pending_AV,
g_iofence_pending_WSI_BIT, g_iofence_pending_ADDR, g_iofence_pending_DATA) == 0 ) {
// If not still pending then advance the CQH
g_reg_file.cqh.index =
(g_reg_file.cqh.index + 1) & ((1UL << (g_reg_file.cqb.log2szm1 + 1)) - 1);
}
// If IOFENCE is not pending and CQ was requested to be
// turned off then turn it off now
if ( g_iofence_wait_pending_inv == 0 ) {
g_reg_file.cqcsr.cqon = g_reg_file.cqcsr.cqen;
g_reg_file.cqcsr.busy = 0;
}
return;
}
void
queue_any_blocked_ats_inval_req() {
uint8_t itag;
if ( g_command_queue_stall_for_itag == 1 ) {
// Allocate a ITAG for the request
if ( allocate_itag(g_pending_inval_req_DSV, g_pending_inval_req_DSEG,
g_pending_inval_req_RID, &itag) )
return;
// ITAG allocated successfully, send invalidate request
do_ats_msg(INVAL_REQ_MSG_CODE, itag, g_pending_inval_req_DSV,
g_pending_inval_req_DSEG, g_pending_inval_req_RID,
g_pending_inval_req_PV, g_pending_inval_req_PID,
g_pending_inval_req_PAYLOAD);
// Remove the command queue stall
g_command_queue_stall_for_itag = 0;
}
return;
}