From cbffb824b1933b9a8cad748d4db7cea6d252e449 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 17:52:20 +0200 Subject: [PATCH 01/27] [ot] scripts/opentitan: log.py: remove duplicate handlers Ensure there could not be more than one handler assigned to each logger, to avoid duplication of messages. Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/util/log.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/scripts/opentitan/ot/util/log.py b/scripts/opentitan/ot/util/log.py index 6189bf92e71ed..df41fe0b39316 100644 --- a/scripts/opentitan/ot/util/log.py +++ b/scripts/opentitan/ot/util/log.py @@ -1,14 +1,16 @@ -"""Logging helpers. -""" - # Copyright (c) 2023-2024 Rivos, Inc. # SPDX-License-Identifier: Apache2 +"""Logging helpers. + + :author: Emmanuel Blot +""" + from logging import (Formatter, Logger, StreamHandler, CRITICAL, DEBUG, INFO, ERROR, WARNING, getLogger) from os import isatty from sys import stderr -from typing import List +from typing import List, Tuple class ColorLogFormatter(Formatter): @@ -82,12 +84,18 @@ def configure_loggers(level: int, *lognames: List[str], **kwargs) \ logh = StreamHandler(stderr) logh.setFormatter(formatter) loggers: List[Logger] = [] + logdefs: List[Tuple[List[str], Logger]] = [] for logdef in lognames: if isinstance(logdef, int): loglevel += -10 * logdef continue log = getLogger(logdef) log.setLevel(max(DEBUG, loglevel)) - log.addHandler(logh) loggers.append(log) + logdefs.append((logdef.split('.'), log)) + logdefs.sort(key=lambda p: len(p[0])) + # ensure there is only one handler per logger subtree + for _, log in logdefs: + if not log.hasHandlers(): + log.addHandler(logh) return loggers From ad2c6fe7f9a14970d7ba9de55022927dfc9ae16f Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 19:23:35 +0200 Subject: [PATCH 02/27] [ot] scripts/opentitan: move SPDX license identifier to the first lines. Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/lc_ctrl/__init__.py | 3 +-- scripts/opentitan/ot/mailbox/__init__.py | 4 ++-- scripts/opentitan/ot/mailbox/sysmbox.py | 3 +++ scripts/opentitan/ot/util/__init__.py | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/scripts/opentitan/ot/lc_ctrl/__init__.py b/scripts/opentitan/ot/lc_ctrl/__init__.py index c7a9e406c9215..eab046554bd88 100644 --- a/scripts/opentitan/ot/lc_ctrl/__init__.py +++ b/scripts/opentitan/ot/lc_ctrl/__init__.py @@ -1,8 +1,7 @@ -"""LifeCycle tools.""" - # Copyright (c) 2024 Rivos, Inc. # SPDX-License-Identifier: Apache2 +"""LifeCycle tools.""" class LifeCycleError(RuntimeError): """Life Cycle Error""" diff --git a/scripts/opentitan/ot/mailbox/__init__.py b/scripts/opentitan/ot/mailbox/__init__.py index 7fbe72811a0d3..f777a2d21e612 100644 --- a/scripts/opentitan/ot/mailbox/__init__.py +++ b/scripts/opentitan/ot/mailbox/__init__.py @@ -1,4 +1,4 @@ -"""Mailbox tools.""" - # Copyright (c) 2023-2024 Rivos, Inc. # SPDX-License-Identifier: Apache2 + +"""Mailbox tools.""" diff --git a/scripts/opentitan/ot/mailbox/sysmbox.py b/scripts/opentitan/ot/mailbox/sysmbox.py index b45d0b3c735af..cfe6b034fa21f 100644 --- a/scripts/opentitan/ot/mailbox/sysmbox.py +++ b/scripts/opentitan/ot/mailbox/sysmbox.py @@ -1,3 +1,6 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + """System Mailbox. :author: Emmanuel Blot diff --git a/scripts/opentitan/ot/util/__init__.py b/scripts/opentitan/ot/util/__init__.py index 890797d26e79c..06db848f9d4b2 100644 --- a/scripts/opentitan/ot/util/__init__.py +++ b/scripts/opentitan/ot/util/__init__.py @@ -1,4 +1,4 @@ -"""Utilities.""" - # Copyright (c) 2024 Rivos, Inc. # SPDX-License-Identifier: Apache2 + +"""Utilities.""" From 392e796229ecbb551b63cd1e955b4c8a004d9768 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 17:04:44 +0200 Subject: [PATCH 03/27] [ot] scripts/opentitan: pyot.py: detect and report no test to execute. Signed-off-by: Emmanuel Blot --- docs/opentitan/pyot.md | 5 ++++- scripts/opentitan/pyot.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/opentitan/pyot.md b/docs/opentitan/pyot.md index 9f8ab10cc0ed2..b253b51fa428b 100644 --- a/docs/opentitan/pyot.md +++ b/docs/opentitan/pyot.md @@ -51,6 +51,7 @@ Files: -f RAW, --flash RAW embedded Flash image file -x file, --exec file rom extension or application -b file, --boot file bootloader 0 file + -Z, --zero do not error if no test can be executed Execution: -R, --summary show a result summary @@ -144,7 +145,9 @@ This tool may be used in two ways, which can be combined: * `-b` / ` --boot` specify a bootloader 0 file that can be added to the flash image file when a ROM extension file is specified with the `-x` option. This option is mutually exclusive with the `-f` option. - +* `-Z`, `--zero` do not report an error if no test can be executed with the specified filters and + detected test applications. Default behavior is to report an error should such a condition arise, + as it likely comes from a misconfiguration or build issue. ## Configuration file diff --git a/scripts/opentitan/pyot.py b/scripts/opentitan/pyot.py index 3d171c31bacba..fb7fed7a667d3 100755 --- a/scripts/opentitan/pyot.py +++ b/scripts/opentitan/pyot.py @@ -954,7 +954,7 @@ def build(self) -> None: raise ValueError('Invalid suffixes sub-section') self._suffixes.extend(suffixes) - def run(self, debug: bool) -> int: + def run(self, debug: bool, allow_no_test: bool) -> int: """Execute all requested tests. :return: success or the code of the first encountered error @@ -989,6 +989,9 @@ def run(self, debug: bool) -> int: tests = self._build_test_list() tcount = len(tests) self._log.info('Found %d tests to execute', tcount) + if not tcount and not allow_no_test: + self._log.error('No test can be run') + return 1 for tpos, test in enumerate(tests, start=1): self._log.info('[TEST %s] (%d/%d)', self.get_test_radix(test), tpos, tcount) @@ -1435,6 +1438,9 @@ def main(): metavar='file', help='rom extension or application') files.add_argument('-b', '--boot', metavar='file', help='bootloader 0 file') + files.add_argument('-Z', '--zero', action='store_true', + default=False, + help='do not error if no test can be executed') exe = argparser.add_argument_group(title='Execution') exe.add_argument('-R', '--summary', action='store_true', help='show a result summary') @@ -1537,7 +1543,7 @@ def main(): if debug: print(format_exc(chain=False), file=stderr) argparser.error(str(exc)) - ret = qexc.run(args.debug) + ret = qexc.run(args.debug, args.zero) if args.summary: rfmt = ResultFormatter() rfmt.load(args.result) From 7347976158841117d6cb3aabb83d2ad0e154e8fb Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 9 Apr 2024 14:59:53 +0200 Subject: [PATCH 04/27] [ot] scripts/opentitan: jtag.py: use caches to speed up transitions - use a cache to track transitions for TMS sequences - use a cache to update current state from new transitions Signed-off-by: Emmanuel Blot --- scripts/jtag/bitbang.py | 2 +- scripts/jtag/jtag.py | 34 ++++++++++++++++++++++++++-------- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/scripts/jtag/bitbang.py b/scripts/jtag/bitbang.py index 1c13f03b3f633..7a0d8d9197eae 100644 --- a/scripts/jtag/bitbang.py +++ b/scripts/jtag/bitbang.py @@ -156,7 +156,7 @@ def tdi(self) -> bool: @tdi.setter def tdi(self, value: bool): self._tdi = bool(value) - self._log.info('SET TDI %u', self._tdi) + self._log.debug('set TDI %u', self._tdi) @property def tms(self) -> bool: diff --git a/scripts/jtag/jtag.py b/scripts/jtag/jtag.py index ae72aab5038fd..a8432f5209a68 100644 --- a/scripts/jtag/jtag.py +++ b/scripts/jtag/jtag.py @@ -12,7 +12,7 @@ # pylint: enable=missing-function-docstring from logging import getLogger -from typing import List, Tuple, Union +from typing import Dict, List, Tuple, Union from .bits import BitSequence @@ -101,6 +101,10 @@ def __init__(self): self['exit_2_ir'].setx(self['shift_ir'], self['update_ir']) self['update_ir'].setx(self['run_test_idle'], self['select_dr_scan']) self._current = self['test_logic_reset'] + self._tr_cache: Dict[Tuple[str, # current state name + int, # event length + int], # event value + JtagState] = {} # new state def __getitem__(self, name: str) -> JtagState: return self.states[name] @@ -176,8 +180,13 @@ def handle_events(self, events: BitSequence) -> None: :param events: a sequence of boolean events to advance the FSM. """ + transit = (self._current.name, len(events), int(events)) + if transit in self._tr_cache: + self._current = self._tr_cache[transit] + return for event in events: self._current = self._current.getx(event) + self._tr_cache[transit] = self._current class JtagController: @@ -256,6 +265,9 @@ def __init__(self, ctrl: 'JtagController'): self._ctrl = ctrl self._log = getLogger('jtag.eng') self._fsm = JtagStateMachine() + self._tr_cache: Dict[Tuple[str, # from state + str], # to state + BitSequence] = {} # TMS sequence self._seq = bytearray() @property @@ -279,16 +291,22 @@ def get_available_statenames(self): def change_state(self, statename) -> None: """Advance the TAP controller to the defined state""" - # find the state machine path to move to the new instruction - path = self._fsm.find_path(statename) - self._log.debug('path: %s', - ', '.join((str(s).upper() for s in path[1:]))) - # convert the path into an event sequence - events = self._fsm.get_events(path) + transition = (self._fsm.state, statename) + if transition not in self._tr_cache: + # find the state machine path to move to the new instruction + path = self._fsm.find_path(statename) + self._log.debug('new path: %s', + ', '.join((str(s).upper() for s in path[1:]))) + # convert the path into an event sequence + events = self._fsm.get_events(path) + self._tr_cache[transition] = events + else: + # transition already in cache + events = self._tr_cache[transition] # update the remote device tap controller (write TMS consumes the seq) self._ctrl.write_tms(events.copy()) # update the current state machine's state - self._fsm.handle_events(events) + self._fsm.handle_events(events.copy()) def go_idle(self) -> None: """Change the current TAP controller to the IDLE state""" From 6b5ea53798689bccf8cbe8d95c5e422c83bacb95 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 17:05:45 +0200 Subject: [PATCH 05/27] [ot] scripts/jtag: bitbang.py optimize comm management - reduce log evaluation for low level comm traces - use local bytearray to build TCP packet - use local variables for JTAG signal management Signed-off-by: Emmanuel Blot --- scripts/jtag/bitbang.py | 79 ++++++++++++++++++++++++++++------------- scripts/jtag/jtag.py | 2 -- 2 files changed, 55 insertions(+), 26 deletions(-) diff --git a/scripts/jtag/bitbang.py b/scripts/jtag/bitbang.py index 7a0d8d9197eae..5b87eef6b58c5 100644 --- a/scripts/jtag/bitbang.py +++ b/scripts/jtag/bitbang.py @@ -37,7 +37,11 @@ class JtagBitbangController(JtagController): - """JTAG master for Remote Bitbang connection.""" + """JTAG master for Remote Bitbang connection. + + :param sock: communication socket with remote QEMU JTAG server + :param link_log: whether to emit link log messages + """ DEFAULT_PORT = 3335 """Default TCP port.""" @@ -50,15 +54,16 @@ class JtagBitbangController(JtagController): controller. """ - READ = 'R'.encode() + READ = b'R' """JTAG bitbang code to receive data from TDO.""" - QUIT = 'Q'.encode() + QUIT = b'Q' """JTAG bitbang code to quit.""" - def __init__(self, sock: socket): + def __init__(self, sock: socket, link_log: bool = False): self._log = getLogger('jtag.ctrl') self._sock = sock + self._link_log = link_log self._last: Optional[bool] = None # Last deferred TDO bit self._outbuf = bytearray() self._tck = False @@ -96,14 +101,21 @@ def write_tms(self, modesel: BitSequence) -> None: if self._last is not None: self._tdi = self._last self._last = None - self._log.debug('write TMS [%d] %s', len(modesel), modesel) + if self._link_log: + self._log.debug('write TMS [%d] %s', len(modesel), modesel) + tck = self._tck + tdi = self._tdi + code = self._bus_code + stream = bytearray() while modesel: tms = modesel.pop_left_bit() - self._write(self._bus_code(self._tck, tms, self._tdi)) - self._tck = not self._tck - self._write(self._bus_code(self._tck, tms, self._tdi)) - self._tck = not self._tck + stream.append(code(tck, tms, tdi)) + tck = not tck + stream.append(code(tck, tms, tdi)) + tck = not tck + self._sock.send(stream) self._tms = tms + self._tck = tck def write(self, out: BitSequence, use_last: bool = True): if not isinstance(out, BitSequence): @@ -113,14 +125,22 @@ def write(self, out: BitSequence, use_last: bool = True): # TODO: check if this case needs to be handled raise NotImplementedError('Last is lost') self._last = out.pop_left_bit() - self._log.debug('write TDI [%d] %s', len(out), out) + if self._link_log: + self._log.debug('write TDI [%d] %s', len(out), out) + tms = self._tms + tck = self._tck + code = self._bus_code + stream = bytearray() while out: tdi = out.pop_right_bit() - self._write(self._bus_code(self._tck, self._tms, tdi)) - self._tck = not self._tck - self._write(self._bus_code(self._tck, self._tms, tdi)) - self._tck = not self._tck + stream.append(code(tck, tms, tdi)) + tck = not tck + stream.append(code(tck, tms, tdi)) + tck = not tck + self._sock.send(stream) self._tdi = tdi + self._tms = tms + self._tck = tck def read(self, length: int) -> BitSequence: if length == 0: @@ -128,13 +148,21 @@ def read(self, length: int) -> BitSequence: bseq = BitSequence() rem = length timeout = now() + self.RECV_TIMEOUT - self._log.debug('read %d bits, TMS: %d', length, self._tms) + if self._link_log: + self._log.debug('read %d bits, TMS: %d', length, self._tms) + tms = self._tms + tck = self._tck + tdi = self._tdi + read = ord(self.READ) + code = self._bus_code + stream = bytearray() for _ in range(length): - self._write(self._bus_code(self._tck, self._tms, self._tdi)) - self._tck = not self._tck - self._write(self._bus_code(self._tck, self._tms, self._tdi)) - self._tck = not self._tck - self._sock.send(self.READ) + stream.append(code(tck, tms, tdi)) + tck = not tck + stream.append(code(tck, tms, tdi)) + tck = not tck + stream.append(read) + self._sock.send(stream) while rem: try: data = self._sock.recv(length) @@ -146,7 +174,8 @@ def read(self, length: int) -> BitSequence: bseq.push_right(data) timeout = now() + self.RECV_TIMEOUT bseq.reverse() - self._log.debug('read TDI [%d] %s', len(bseq), bseq) + if self._link_log: + self._log.debug('read TDI [%d] %s', len(bseq), bseq) return bseq @property @@ -156,7 +185,8 @@ def tdi(self) -> bool: @tdi.setter def tdi(self, value: bool): self._tdi = bool(value) - self._log.debug('set TDI %u', self._tdi) + if self._link_log: + self._log.debug('set TDI %u', self._tdi) @property def tms(self) -> bool: @@ -175,6 +205,7 @@ def _reset_code(cls, trst: bool, srst: bool) -> int: return ord('r') + ((int(trst) << 1) | srst) def _write(self, code: int): - self._log.debug('_write 0x%02x %s (%s)', code, f'{code-0x30:03b}', - chr(code)) + if self._link_log: + self._log.debug('_write 0x%02x %s (%s)', code, f'{code-0x30:03b}', + chr(code)) self._sock.send(bytes([code])) diff --git a/scripts/jtag/jtag.py b/scripts/jtag/jtag.py index a8432f5209a68..f1f29f3d6ec19 100644 --- a/scripts/jtag/jtag.py +++ b/scripts/jtag/jtag.py @@ -9,8 +9,6 @@ Based on JTAG support for FTDI from PyFtdi module """ -# pylint: enable=missing-function-docstring - from logging import getLogger from typing import Dict, List, Tuple, Union From 3bbab877ad622b0e4493a1a30342f2618a6309cf Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 16:46:18 +0200 Subject: [PATCH 06/27] [ot] scripts/opentitan: lcdmi.py: remove an invalid line Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/lc_ctrl/lcdmi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/opentitan/ot/lc_ctrl/lcdmi.py b/scripts/opentitan/ot/lc_ctrl/lcdmi.py index d47c3baffb257..3552bcb40d900 100644 --- a/scripts/opentitan/ot/lc_ctrl/lcdmi.py +++ b/scripts/opentitan/ot/lc_ctrl/lcdmi.py @@ -80,7 +80,6 @@ class LifeCycleController: TOKEN_FORMAT = '<4I' def __init__(self, dtm: DebugTransportModule, address: int): - super().__init__() self._log = getLogger('dtm.lcctrl') self._dtm = dtm self._dmi = dtm['dmi'] From 34fbdf8d88b1a76589c61ca11555f9045c474598 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 19:19:39 +0200 Subject: [PATCH 07/27] [ot] scripts/opentitan: move HexInt into a dedicated module. Signed-off-by: Emmanuel Blot --- scripts/opentitan/flashgen.py | 7 ++----- scripts/opentitan/gpiodev.py | 7 ++----- scripts/opentitan/ot/util/misc.py | 18 ++++++++++++++++++ scripts/opentitan/otptool.py | 13 +------------ 4 files changed, 23 insertions(+), 22 deletions(-) create mode 100644 scripts/opentitan/ot/util/misc.py diff --git a/scripts/opentitan/flashgen.py b/scripts/opentitan/flashgen.py index ad62590d0562e..34abd0ad57f11 100755 --- a/scripts/opentitan/flashgen.py +++ b/scripts/opentitan/flashgen.py @@ -24,6 +24,7 @@ Tuple, Union) from ot.util.log import configure_loggers +from ot.util.misc import HexInt try: # note: pyelftools package is an OpenTitan toolchain requirement, see @@ -781,10 +782,6 @@ def _log_manifest(self, manifest): self._log.debug('%s: (%d) %s', item, len(value), value) -def hexint(val: str) -> int: - return int(val, val.startswith('0x') and 16 or 10) - - def main(): """Main routine""" debug = True @@ -800,7 +797,7 @@ def main(): img.add_argument('-a', '--bank', type=int, choices=banks, default=banks[0], help=f'flash bank for data (default: {banks[0]})') - img.add_argument('-s', '--offset', type=hexint, + img.add_argument('-s', '--offset', type=HexInt.parse, default=FlashGen.CHIP_ROM_EXT_SIZE_MAX, help=f'offset of the BL0 file (default: ' f'0x{FlashGen.CHIP_ROM_EXT_SIZE_MAX:x})') diff --git a/scripts/opentitan/gpiodev.py b/scripts/opentitan/gpiodev.py index a3ef2729b9715..5cca77c9dbed5 100755 --- a/scripts/opentitan/gpiodev.py +++ b/scripts/opentitan/gpiodev.py @@ -18,6 +18,7 @@ from typing import Optional, TextIO, Tuple from ot.util.log import configure_loggers +from ot.util.misc import HexInt # pylint: disable-msg=missing-function-docstring,missing-class-docstring # pylint: disable-msg=too-few-public-methods @@ -262,10 +263,6 @@ def _build_reply(cls, **kwargs) -> str: return ''.join(lines) -def hexint(val: str) -> int: - return int(val, val.startswith('0x') and 16 or 10) - - def main(): """Main routine. """ @@ -279,7 +276,7 @@ def main(): help='input file to check command sequence') argparser.add_argument('-r', '--record', type=FileType('wt'), help='output file to record command sequence') - argparser.add_argument('-e', '--end', type=hexint, + argparser.add_argument('-e', '--end', type=HexInt.parse, help='emit the specified value to trigger ' 'remote exit on last received command') argparser.add_argument('-q', '--quit-on-error', action='store_true', diff --git a/scripts/opentitan/ot/util/misc.py b/scripts/opentitan/ot/util/misc.py new file mode 100644 index 0000000000000..b60df27051ae5 --- /dev/null +++ b/scripts/opentitan/ot/util/misc.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Miscellaneous helpers. + + :author: Emmanuel Blot +""" + +class HexInt(int): + """Simple wrapper to always represent an integer in hexadecimal format.""" + + def __repr__(self) -> str: + return f'0x{self:x}' + + @staticmethod + def parse(val: str) -> int: + """Simple helper to support hexadecimal integer in argument parser.""" + return int(val, val.startswith('0x') and 16 or 10) diff --git a/scripts/opentitan/otptool.py b/scripts/opentitan/otptool.py index eb66c5c37fe45..c0605114ceb81 100755 --- a/scripts/opentitan/otptool.py +++ b/scripts/opentitan/otptool.py @@ -22,6 +22,7 @@ Tuple, Union) from ot.util.log import configure_loggers +from ot.util.misc import HexInt try: # try to load HJSON if available @@ -45,18 +46,6 @@ def round_up(value: int, rnd: int) -> int: return (value + rnd - 1) & -rnd -class HexInt(int): - """Simple wrapper to always represent an integer in hexadecimal format.""" - - def __repr__(self) -> str: - return f'0x{self:x}' - - @staticmethod - def parse(val: str) -> int: - """Simple helper to support hexadecimal integer in argument parser.""" - return int(val, val.startswith('0x') and 16 or 10) - - class classproperty(property): """Getter property decorator for a class""" # pylint: disable=invalid-name From 3c55b14a18b4014585fcafb7ffc0b4e4f1c5b6cd Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 17:06:48 +0200 Subject: [PATCH 08/27] [ot] scripts/opentitan: misc.py: add a dump buffer debug function. Output is similar to `hexdump -C` Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/util/misc.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/scripts/opentitan/ot/util/misc.py b/scripts/opentitan/ot/util/misc.py index b60df27051ae5..7d39514694616 100644 --- a/scripts/opentitan/ot/util/misc.py +++ b/scripts/opentitan/ot/util/misc.py @@ -6,6 +6,15 @@ :author: Emmanuel Blot """ +from typing import Optional + +try: + # only available from Python 3.12+ + from collections.abc import Buffer +except ImportError: + Buffer = [bytes | bytearray | memoryview] + + class HexInt(int): """Simple wrapper to always represent an integer in hexadecimal format.""" @@ -13,6 +22,20 @@ def __repr__(self) -> str: return f'0x{self:x}' @staticmethod - def parse(val: str) -> int: + def parse(val: Optional[str]) -> Optional[int]: """Simple helper to support hexadecimal integer in argument parser.""" + if val is None: + return None return int(val, val.startswith('0x') and 16 or 10) + + +def dump_buffer(buffer: Buffer, addr: int) -> None: + """Dump a binary buffer, same format as hexdump -C.""" + view = buffer.getbuffer() + size = len(view) + for pos in range(0, size, 16): + chunks = view[pos:pos+8], view[pos+8:pos+16] + buf = ' '.join(' '.join(f'{x:02x}' for x in c) for c in chunks) + text = ''.join(chr(c) if 0x20 <= c < 0x7f else '.' + for c in view[pos:pos+16]) + print(f'{addr+pos:08x} {buf} |{text}|') From fb2578a1fca83ade8290bb119280debdc2c10e09 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 15:07:58 +0200 Subject: [PATCH 09/27] [ot] scripts/opentitan: factorize ElfBlob utility into a module Signed-off-by: Emmanuel Blot --- scripts/opentitan/flashgen.py | 207 ++----------------------------- scripts/opentitan/gdbreplay.py | 160 +----------------------- scripts/opentitan/ot/util/elf.py | 201 ++++++++++++++++++++++++++++++ 3 files changed, 216 insertions(+), 352 deletions(-) create mode 100644 scripts/opentitan/ot/util/elf.py diff --git a/scripts/opentitan/flashgen.py b/scripts/opentitan/flashgen.py index 34abd0ad57f11..0d83d45afc0b3 100755 --- a/scripts/opentitan/flashgen.py +++ b/scripts/opentitan/flashgen.py @@ -12,7 +12,6 @@ from binascii import hexlify from hashlib import sha256 from itertools import repeat -from io import BytesIO from logging import getLogger from os import SEEK_END, SEEK_SET, rename, stat from os.path import abspath, basename, exists, isfile @@ -20,23 +19,12 @@ from struct import calcsize as scalc, pack as spack, unpack as sunpack from sys import exit as sysexit, modules, stderr, version_info from traceback import format_exc -from typing import (Any, BinaryIO, Dict, Iterator, List, NamedTuple, Optional, - Tuple, Union) +from typing import Any, BinaryIO, Dict, List, NamedTuple, Optional, Tuple +from ot.util.elf import ElfBlob from ot.util.log import configure_loggers from ot.util.misc import HexInt -try: - # note: pyelftools package is an OpenTitan toolchain requirement, see - # python-requirements.txt file from OT top directory. - from elftools.common.exceptions import ELFError - from elftools.elf.constants import SH_FLAGS - from elftools.elf.elffile import ELFFile - from elftools.elf.sections import Section - from elftools.elf.segments import Segment -except ImportError: - ELFError = BaseException - ELFFile = None # pylint: disable=missing-function-docstring @@ -49,186 +37,12 @@ class BootLocation(NamedTuple): seq: int -class ElfBlob: - """Load ELF application.""" - - def __init__(self): - self._log = getLogger('flashgen.elf') - self._elf: Optional[ELFFile] = None - self._payload_address: int = 0 - self._payload_size: int = 0 - self._payload: bytes = b'' - - def load(self, efp: BinaryIO) -> None: - """Load the content of an ELF file. - - The ELF file stream is no longer accessed once this method - completes. - - :param efp: a File-like (binary read access) - """ - # use a copy of the stream to release the file pointer. - try: - self._elf = ELFFile(BytesIO(efp.read())) - except ELFError as exc: - raise ValueError(f'Invalid ELF file: {exc}') from exc - if self._elf['e_machine'] != 'EM_RISCV': - raise ValueError('Not an RISC-V ELF file') - if self._elf['e_type'] != 'ET_EXEC': - raise ValueError('Not an executable ELF file') - self._log.debug('entry point: 0x%X', self.entry_point) - self._log.debug('data size: %d', self.raw_size) - - @property - def address_size(self) -> int: - """Provide the width of address value used in the ELFFile. - - :return: the address width in bits (not bytes!) - """ - return self._elf.elfclass if self._elf else 0 - - @property - def entry_point(self) -> Optional[int]: - """Provide the entry point of the application, if any. - - :return: the entry point address - """ - return self._elf and self._elf.header.get('e_entry', None) - - @property - def raw_size(self) -> int: - """Provide the size of the Secure Boot Header section, if any. - - :return: the data/payload size in bytes - """ - if not self._payload_size: - self._payload_address, self._payload_size = self._parse_segments() - return self._payload_size - - @property - def load_address(self) -> int: - """Provide the first destination address on target to copy the - application blob. - - :return: the load address - """ - if not self._payload_address: - self._payload_address, self._payload_size = self._parse_segments() - return self._payload_address - - @property - def blob(self) -> bytes: - """Provide the application blob, i.e. the whole loadable binary. - - :return: the raw application binary. - """ - if not self._payload: - self._payload = self._build_payload() - if len(self._payload) != self.raw_size: - raise RuntimeError('Internal error: size mismatch') - return self._payload - - @property - def code_span(self) -> Tuple[int, int]: - """Report the extent of the executable portion of the ELF file. - - :return: (start address, end address) - """ - loadable_segments = list(self._loadable_segments()) - base_addr = None - last_addr = None - for section in self._elf.iter_sections(): - if not self.is_section_executable(section): - continue - for segment in loadable_segments: - if segment.section_in_segment(section): - break - else: - continue - addr = section.header['sh_addr'] - size = section.header['sh_size'] - if base_addr is None or base_addr > addr: - base_addr = addr - last = addr + size - if last_addr is None or last_addr < last: - last_addr = last - self._log.debug('Code section @ 0x%08x 0x%08x bytes', addr, size) - return base_addr, last_addr - - def is_section_executable(self, section: 'Section') -> bool: - """Report whether the section is flagged as executable. - - :return: True is section is executable - """ - return bool(section.header['sh_flags'] & SH_FLAGS.SHF_EXECINSTR) - - def _loadable_segments(self) -> Iterator['Segment']: - """Provide an iterator on segments that should be loaded into the final - binary. - """ - if not self._elf: - raise RuntimeError('No ELF file loaded') - for segment in sorted(self._elf.iter_segments(), - key=lambda seg: seg['p_paddr']): - if segment['p_type'] not in ('PT_LOAD', ): - continue - if not segment['p_filesz']: - continue - yield segment - - def _parse_segments(self) -> Tuple[int, int]: - """Parse ELF segments and extract physical location and size. - - :return: the location of the first byte and the overall payload size - in bytes - """ - size = 0 - phy_start = None - for segment in self._loadable_segments(): - seg_size = segment['p_filesz'] - if not seg_size: - continue - phy_addr = segment['p_paddr'] - if phy_start is None: - phy_start = phy_addr - else: - if phy_addr > phy_start+size: - self._log.debug('fill gap with previous segment') - size = phy_addr-phy_start - size += seg_size - if phy_start is None: - raise ValueError('No loadable segment found') - return phy_start, size - - def _build_payload(self) -> bytes: - """Extract the loadable payload from the ELF file and generate a - unique, contiguous binary buffer. - - :return: the payload to store as the application blob - """ - buf = BytesIO() - phy_start = None - for segment in self._loadable_segments(): - phy_addr = segment['p_paddr'] - if phy_start is None: - phy_start = phy_addr - else: - current_addr = phy_start+buf.tell() - if phy_addr > current_addr: - fill_size = phy_addr-current_addr - buf.write(bytes(fill_size)) - buf.write(segment.data()) - data = buf.getvalue() - buf.close() - return data - - class RuntimeDescriptor(NamedTuple): """Description of an executable binary. """ code_start: int code_end: int - raw_size: int + size: int entry_point: int @@ -398,7 +212,7 @@ def info_part_size(cls) -> int: return sum(cls.INFOS) * cls.BYTES_PER_PAGE def read_boot_info(self) -> Dict[BootLocation, - Dict[str, Union[int, bytes]]]: + Dict[str, [int | bytes]]]: size = self._boot_header_size fmt = ''.join(self.BOOT_HEADER_FORMAT.values()) boot_entries = {} @@ -578,7 +392,7 @@ def store_ot_files(self, otdescs: List[str]) -> None: def _compare_bin_elf(self, bindesc: RuntimeDescriptor, elfpath: str) \ -> Optional[bool]: - if ELFFile is None: + if not ElfBlob.LOADED: return None with open(elfpath, 'rb') as efp: elfdesc = self._load_elf_info(efp) @@ -593,7 +407,7 @@ def _compare_bin_elf(self, bindesc: RuntimeDescriptor, elfpath: str) \ self._log.debug('ELF base offset 0x%08x', offset) relfdesc = RuntimeDescriptor(elfdesc.code_start - offset, elfdesc.code_end - offset, - elfdesc.raw_size, + elfdesc.size, elfdesc.entry_point - offset) match = bindesc == relfdesc logfunc = self._log.debug if match else self._log.warning @@ -602,7 +416,7 @@ def _compare_bin_elf(self, bindesc: RuntimeDescriptor, elfpath: str) \ logfunc('end bin %08x / elf %08x', bindesc.code_end, relfdesc.code_end) logfunc('size bin %08x / elf %08x', - bindesc.raw_size, relfdesc.raw_size) + bindesc.size, relfdesc.size) logfunc('entry bin %08x / elf %08x', bindesc.entry_point, relfdesc.entry_point) return match @@ -683,7 +497,7 @@ def _get_elf_filename(self, filename: str) -> str: def _load_elf_info(self, efp: BinaryIO) \ -> Optional[RuntimeDescriptor]: - if not ELFFile: + if not ElfBlob.LOADED: # ELF tools are not available self._log.warning('ELF file cannot be verified') return None @@ -692,8 +506,7 @@ def _load_elf_info(self, efp: BinaryIO) \ if elf.address_size != 32: raise ValueError('Spefified ELF file {} is not an ELF32 file') elfstart, elfend = elf.code_span - return RuntimeDescriptor(elfstart, elfend, elf.raw_size, - elf.entry_point) + return RuntimeDescriptor(elfstart, elfend, elf.size, elf.entry_point) def _store_debug_info(self, entryname: str, filename: Optional[str]) \ -> None: @@ -827,7 +640,7 @@ def main(): args = argparser.parse_args() debug = args.debug - configure_loggers(args.verbose, 'flashgen') + configure_loggers(args.verbose, 'flashgen', 'elf') use_bl0 = bool(args.boot) or len(args.otdesc) > 1 gen = FlashGen(args.offset if use_bl0 else 0, bool(args.unsafe_elf), diff --git a/scripts/opentitan/gdbreplay.py b/scripts/opentitan/gdbreplay.py index 3dfefa6814a16..5ec90bda34fae 100755 --- a/scripts/opentitan/gdbreplay.py +++ b/scripts/opentitan/gdbreplay.py @@ -10,7 +10,6 @@ from argparse import ArgumentParser, FileType, Namespace from binascii import hexlify -from io import BytesIO from logging import getLogger from os import linesep from os.path import dirname, isfile, join as joinpath, normpath @@ -20,160 +19,11 @@ from string import ascii_uppercase from sys import exit as sysexit, modules, stderr from traceback import format_exc -from typing import (BinaryIO, Dict, Iterator, List, Optional, TextIO, Tuple, - Union) +from typing import BinaryIO, Dict, List, Optional, TextIO, Tuple +from ot.util.elf import ElfBlob from ot.util.log import configure_loggers -try: - from elftools.common.exceptions import ELFError - from elftools.elf.elffile import ELFFile - from elftools.elf.segments import Segment -except ImportError: - ELFError = None - ELFFile = None - Segment = None - - -class ElfBlob: - """Load ELF application.""" - - def __init__(self): - self._log = getLogger('gdbrp.elf') - self._elf: Optional[ELFFile] = None - self._payload_address: int = 0 - self._payload_size: int = 0 - self._payload: bytes = b'' - - def load(self, efp: BinaryIO) -> None: - """Load the content of an ELF file. - - The ELF file stream is no longer accessed once this method - completes. - - :param efp: a File-like (binary read access) - """ - # use a copy of the stream to release the file pointer. - try: - self._elf = ELFFile(BytesIO(efp.read())) - except ELFError as exc: - raise ValueError(f'Invalid ELF file: {exc}') from exc - if self._elf['e_machine'] != 'EM_RISCV': - raise ValueError('Not a RISC-V ELF file') - if self._elf['e_type'] != 'ET_EXEC': - raise ValueError('Not an executable ELF file') - self._log.debug('entry point: 0x%X', self.entry_point) - self._log.debug('data size: %d', self.raw_size) - - @property - def address_size(self) -> int: - """Provide the width of address value used in the ELFFile. - - :return: the address width in bits (not bytes!) - """ - return self._elf.elfclass if self._elf else 0 - - @property - def entry_point(self) -> Optional[int]: - """Provide the entry point of the application, if any. - - :return: the entry point address - """ - return self._elf and self._elf.header.get('e_entry', None) - - @property - def raw_size(self) -> int: - """Provide the size of the payload section, if any. - - :return: the data/payload size in bytes - """ - if not self._payload_size: - self._payload_address, self._payload_size = self._parse_segments() - return self._payload_size - - @property - def load_address(self) -> int: - """Provide the first destination address on target to copy the - application blob. - - :return: the load address - """ - if not self._payload_address: - self._payload_address, self._payload_size = self._parse_segments() - return self._payload_address - - @property - def blob(self) -> bytes: - """Provide the application blob, i.e. the whole loadable binary. - - :return: the raw application binary. - """ - if not self._payload: - self._payload = self._build_payload() - if len(self._payload) != self.raw_size: - raise RuntimeError('Internal error: size mismatch') - return self._payload - - def _loadable_segments(self) -> Iterator[Segment]: - """Provide an iterator on segments that should be loaded into the final - binary. - """ - if not self._elf: - raise RuntimeError('No ELF file loaded') - for segment in sorted(self._elf.iter_segments(), - key=lambda seg: seg['p_paddr']): - if segment['p_type'] not in ('PT_LOAD', ): - continue - if not segment['p_filesz']: - continue - yield segment - - def _parse_segments(self) -> Tuple[int, int]: - """Parse ELF segments and extract physical location and size. - - :return: the location of the first byte and the overall payload size - in bytes - """ - size = 0 - phy_start = None - for segment in self._loadable_segments(): - seg_size = segment['p_filesz'] - if not seg_size: - continue - phy_addr = segment['p_paddr'] - if phy_start is None: - phy_start = phy_addr - else: - if phy_addr > phy_start+size: - self._log.debug('fill gap with previous segment') - size = phy_addr-phy_start - size += seg_size - if phy_start is None: - raise ValueError('No loadable segment found') - return phy_start, size - - def _build_payload(self) -> bytes: - """Extract the loadable payload from the ELF file and generate a - unique, contiguous binary buffer. - - :return: the payload to store as the application blob - """ - buf = BytesIO() - phy_start = None - for segment in self._loadable_segments(): - phy_addr = segment['p_paddr'] - if phy_start is None: - phy_start = phy_addr - else: - current_addr = phy_start+buf.tell() - if phy_addr > current_addr: - fill_size = phy_addr-current_addr - buf.write(bytes(fill_size)) - buf.write(segment.data()) - data = buf.getvalue() - buf.close() - return data - class QEMUMemoryController: """Memory controller. @@ -606,7 +456,7 @@ def _send(self, payload: str): self._log.info('Reply: "%s"', payload) self._send_bytes(payload.encode()) - def _send_bytes(self, payload: Union[bytes, bytearray]): + def _send_bytes(self, payload: [bytes | bytearray]): """Send a reply to the remote GDB client. :param payload: the byte sequence to send @@ -825,7 +675,7 @@ def main(): args = argparser.parse_args() debug = args.debug - configure_loggers(args.verbose, 'gdbrp') + configure_loggers(args.verbose, 'gdbrp', 'elf') acount = len(args.address or []) bcount = len(args.bin or []) @@ -834,7 +684,7 @@ def main(): gdbr = QEMUGDBReplay() if args.elf: - if ELFFile is None: + if not ElfBlob.LOADED: argparser.error('Please install PyElfTools package') for elf in args.elf: gdbr.load_elf(elf) diff --git a/scripts/opentitan/ot/util/elf.py b/scripts/opentitan/ot/util/elf.py new file mode 100644 index 0000000000000..59e9e427c30f3 --- /dev/null +++ b/scripts/opentitan/ot/util/elf.py @@ -0,0 +1,201 @@ +# Copyright (c) 2023-2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""ELF helpers. + + :author: Emmanuel Blot +""" + +from io import BytesIO +from logging import getLogger +from typing import BinaryIO, Iterator, Optional, Tuple + +try: + # note: pyelftools package is an OpenTitan toolchain requirement, see + # python-requirements.txt file from OT top directory. + from elftools.common.exceptions import ELFError + from elftools.elf.constants import SH_FLAGS + from elftools.elf.elffile import ELFFile + from elftools.elf.sections import Section + from elftools.elf.segments import Segment +except ImportError: + ELFFile = None + + +class ElfBlob: + """Load ELF application.""" + + LOADED = ELFFile is not None + """Report whether ELF tools have been loaded.""" + + def __init__(self): + if not self.LOADED: + raise ImportError('pyelftools package not available') + self._log = getLogger('elf') + self._elf: Optional[ELFFile] = None + self._payload_address: int = 0 + self._payload_size: int = 0 + self._payload: bytes = b'' + + def load(self, efp: BinaryIO) -> None: + """Load the content of an ELF file. + + The ELF file stream is no longer accessed once this method + completes. + + :param efp: a File-like (binary read access) + """ + # use a copy of the stream to release the file pointer. + try: + self._elf = ELFFile(BytesIO(efp.read())) + except ELFError as exc: + raise ValueError(f'Invalid ELF file: {exc}') from exc + if self._elf['e_machine'] != 'EM_RISCV': + raise ValueError('Not an RISC-V ELF file') + if self._elf['e_type'] != 'ET_EXEC': + raise ValueError('Not an executable ELF file') + self._log.debug('entry point: 0x%X', self.entry_point) + self._log.debug('data size: %d', self.size) + + @property + def address_size(self) -> int: + """Provide the width of address value used in the ELFFile. + + :return: the address width in bits (not bytes!) + """ + return self._elf.elfclass if self._elf else 0 + + @property + def entry_point(self) -> Optional[int]: + """Provide the entry point of the application, if any. + + :return: the entry point address + """ + return self._elf and self._elf.header.get('e_entry', None) + + @property + def size(self) -> int: + """Provide the size of the payload section, if any. + + :return: the data/payload size in bytes + """ + if not self._payload_size: + self._payload_address, self._payload_size = self._parse_segments() + return self._payload_size + + @property + def load_address(self) -> int: + """Provide the first destination address on target to copy the + application blob. + + :return: the load address + """ + if not self._payload_address: + self._payload_address, self._payload_size = self._parse_segments() + return self._payload_address + + @property + def blob(self) -> bytes: + """Provide the application blob, i.e. the whole loadable binary. + + :return: the raw application binary. + """ + if not self._payload: + self._payload = self._build_payload() + if len(self._payload) != self.size: + raise RuntimeError('Internal error: size mismatch') + return self._payload + + @property + def code_span(self) -> Tuple[int, int]: + """Report the extent of the executable portion of the ELF file. + + :return: (start address, end address) + """ + loadable_segments = list(self._loadable_segments()) + base_addr = None + last_addr = None + for section in self._elf.iter_sections(): + if not self.is_section_executable(section): + continue + for segment in loadable_segments: + if segment.section_in_segment(section): + break + else: + continue + addr = section.header['sh_addr'] + size = section.header['sh_size'] + if base_addr is None or base_addr > addr: + base_addr = addr + last = addr + size + if last_addr is None or last_addr < last: + last_addr = last + self._log.debug('Code section @ 0x%08x 0x%08x bytes', addr, size) + return base_addr, last_addr + + def is_section_executable(self, section: 'Section') -> bool: + """Report whether the section is flagged as executable. + + :return: True is section is executable + """ + return bool(section.header['sh_flags'] & SH_FLAGS.SHF_EXECINSTR) + + def _loadable_segments(self) -> Iterator['Segment']: + """Provide an iterator on segments that should be loaded into the final + binary. + """ + if not self._elf: + raise RuntimeError('No ELF file loaded') + for segment in sorted(self._elf.iter_segments(), + key=lambda seg: seg['p_paddr']): + if segment['p_type'] not in ('PT_LOAD', ): + continue + if not segment['p_filesz']: + continue + yield segment + + def _parse_segments(self) -> Tuple[int, int]: + """Parse ELF segments and extract physical location and size. + + :return: the location of the first byte and the overall payload size + in bytes + """ + size = 0 + phy_start = None + for segment in self._loadable_segments(): + seg_size = segment['p_filesz'] + if not seg_size: + continue + phy_addr = segment['p_paddr'] + if phy_start is None: + phy_start = phy_addr + else: + if phy_addr > phy_start+size: + self._log.debug('fill gap with previous segment') + size = phy_addr-phy_start + size += seg_size + if phy_start is None: + raise ValueError('No loadable segment found') + return phy_start, size + + def _build_payload(self) -> bytes: + """Extract the loadable payload from the ELF file and generate a + unique, contiguous binary buffer. + + :return: the payload to store as the application blob + """ + buf = BytesIO() + phy_start = None + for segment in self._loadable_segments(): + phy_addr = segment['p_paddr'] + if phy_start is None: + phy_start = phy_addr + else: + current_addr = phy_start+buf.tell() + if phy_addr > current_addr: + fill_size = phy_addr-current_addr + buf.write(bytes(fill_size)) + buf.write(segment.data()) + data = buf.getvalue() + buf.close() + return data From a8ce0e125fce7dfa1859dbcdd8d12b5d21dd514d Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 16:20:03 +0200 Subject: [PATCH 10/27] [ot] scripts/opentitan: fix argument parser description message. Should only report the first line of the module documentation Signed-off-by: Emmanuel Blot --- scripts/opentitan/gdbreplay.py | 3 ++- scripts/opentitan/otphelp.py | 3 ++- scripts/opentitan/pyot.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/opentitan/gdbreplay.py b/scripts/opentitan/gdbreplay.py index 5ec90bda34fae..8ba2ff483ac3c 100755 --- a/scripts/opentitan/gdbreplay.py +++ b/scripts/opentitan/gdbreplay.py @@ -648,7 +648,8 @@ def main(): qemu_path = None try: args: Optional[Namespace] = None - argparser = ArgumentParser(description=modules[__name__].__doc__) + desc = modules[__name__].__doc__.split('.', 1)[0].strip() + argparser = ArgumentParser(description=f'{desc}.') argparser.add_argument('-t', '--trace', metavar='LOG', type=FileType('rt'), help='QEMU execution trace log') diff --git a/scripts/opentitan/otphelp.py b/scripts/opentitan/otphelp.py index 97a23578e1137..32c02d5ba98dd 100755 --- a/scripts/opentitan/otphelp.py +++ b/scripts/opentitan/otphelp.py @@ -126,7 +126,8 @@ def main(): """Main routine""" debug = True try: - argparser = ArgumentParser(description=modules[__name__].__doc__) + desc = modules[__name__].__doc__.split('.', 1)[0].strip() + argparser = ArgumentParser(description=f'{desc}.') argparser.add_argument('-c', '--config', metavar='JSON', type=FileType('rt', encoding='utf-8'), help='path to configuration file') diff --git a/scripts/opentitan/pyot.py b/scripts/opentitan/pyot.py index fb7fed7a667d3..dd31dbd463540 100755 --- a/scripts/opentitan/pyot.py +++ b/scripts/opentitan/pyot.py @@ -1384,7 +1384,8 @@ def main(): tmp_result: Optional[str] = None try: args: Optional[Namespace] = None - argparser = ArgumentParser(description=modules[__name__].__doc__) + desc = modules[__name__].__doc__.split('.', 1)[0].strip() + argparser = ArgumentParser(description=f'{desc}.') qvm = argparser.add_argument_group(title='Virtual machine') rel_qemu_path = relpath(qemu_path) if qemu_path else '?' qvm.add_argument('-q', '--qemu', From bb80f0a730f061934728cc9c6c9e526b484830d7 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 9 Apr 2024 17:41:38 +0200 Subject: [PATCH 11/27] [ot] scripts/opentitan: create a small Debug Module to test RISC-V DM Basic features, limited to RV32 and 32-bit operations: - read/write DM registers - read/write CSR and registers or RISC-V core - read/write memory - load and execute ELF file Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/bitfield.py | 97 ++++++ scripts/opentitan/ot/dm/__init__.py | 9 + scripts/opentitan/ot/dm/dm.py | 488 ++++++++++++++++++++++++++++ scripts/opentitan/ot/dm/regs.py | 442 +++++++++++++++++++++++++ 4 files changed, 1036 insertions(+) create mode 100644 scripts/opentitan/ot/bitfield.py create mode 100644 scripts/opentitan/ot/dm/__init__.py create mode 100644 scripts/opentitan/ot/dm/dm.py create mode 100644 scripts/opentitan/ot/dm/regs.py diff --git a/scripts/opentitan/ot/bitfield.py b/scripts/opentitan/ot/bitfield.py new file mode 100644 index 0000000000000..0ac03a82771fa --- /dev/null +++ b/scripts/opentitan/ot/bitfield.py @@ -0,0 +1,97 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""Simple BitField container. + + :author: Emmanuel Blot +""" + +from typing import Any, Dict + +from .util.misc import HexInt + + +class BitField: + """BitField container + """ + + def __init__(self, *args, **kwargs): + self._bits = {} + self._named_bits = {} + for bits in args: + self._bits.update(bits) + for name, bits in kwargs.items(): + self._named_bits[name] = {} + self._named_bits[name].update(bits) + self._selector = None + sels = {k: v for k, v in self._bits.items() if len(v) > 3 and v[3]} + if sels: + if len(sels) > 1: + raise ValueError(f'Too many selectors: {", ".join(sels)}') + name, desc = sels.popitem() + enum_ = desc[2] + if name != enum_.__name__: + raise ValueError(f'Invalid selector name: {enum_.__name__}') + self._selector = enum_ + + def decode(self, value: int) -> Dict[str, Any]: + """Decode a value into a dictionary.""" + bits = dict(self._bits) + if self._selector: + offset, length, enum_ = self._bits[self._selector.__name__][:3] + mask = ((1 << length) - 1) + val = (value >> offset) & mask + try: + sel = enum_(val).name + except ValueError: + sel = None + if sel: + bits.update(self._named_bits[sel]) + values = {} + for name, code in bits.items(): + offset, length = code[:2] + enum = code[2] if len(code) > 2 else None + mask = ((1 << length) - 1) + val = (value >> offset) & mask + if enum: + values[name] = enum(val) + else: + if length == 1: + values[name] = bool(val) + else: + values[name] = HexInt(val) + return values + + def encode(self, *init, **values: Dict[str, Any]) -> HexInt: + """Encode a dictionary into a value.""" + if init: + value = init[0] + if len(init) > 1: + raise ValueError('Unknown argument') + else: + value = 0 + values = dict(values) # duplicate as entries are removed + bits = dict(self._bits) # duplicate as selector may be folded into + if self._selector: + selname = self._selector.__name__ + sel = values.get(selname) + if sel: + bits.update(self._named_bits[sel]) + for name, code in bits.items(): + if name not in values: + continue + val = values[name] + del values[name] + offset, length = code[:2] + enum = code[2] if len(code) > 2 else None + if enum and isinstance(val, str): + val = enum[val] + if length == 1 and isinstance(val, bool): + val = int(val) + mask = ((1 << length) - 1) + val &= mask + value &= ~(mask << offset) + value |= val << offset + if values: + raise ValueError(f'Unknown field {", ".join(values)}') + return HexInt(value) diff --git a/scripts/opentitan/ot/dm/__init__.py b/scripts/opentitan/ot/dm/__init__.py new file mode 100644 index 0000000000000..7dc6f558a1619 --- /dev/null +++ b/scripts/opentitan/ot/dm/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""RISC-V Debug Module tools. + + :author: Emmanuel Blot +""" + +from .dm import DebugModule diff --git a/scripts/opentitan/ot/dm/dm.py b/scripts/opentitan/ot/dm/dm.py new file mode 100644 index 0000000000000..b08168015096a --- /dev/null +++ b/scripts/opentitan/ot/dm/dm.py @@ -0,0 +1,488 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""RISC-V Debug Module tools. + + :author: Emmanuel Blot +""" + +from enum import IntEnum +from io import SEEK_END +from logging import getLogger +from time import sleep, time as now +from typing import Any, BinaryIO, Dict, Optional + +from .regs import CSRS, GPRS +from ..bitfield import BitField +from ..dtm import DebugTransportModule + + +class DebugModule: + """RISC-V Debug Module. + + Only support a single hart for now + """ + + # pylint: disable=attribute-defined-outside-init + + DM_VERSION = (0, 13, 2) + """Supported version.""" + + REGISTERS = { + 'data0': 0x04, + 'data1': 0x05, + 'data2': 0x06, + 'data3': 0x07, + 'data4': 0x08, + 'data5': 0x09, + 'data6': 0x0a, + 'data7': 0x0b, + 'data8': 0x0c, + 'data9': 0x0d, + 'data10': 0x0e, + 'data11': 0x0f, + 'dmcontrol': 0x10, + 'dmstatus': 0x11, + 'hartinfo': 0x12, + 'abstractcs': 0x16, + 'command': 0x17, + 'abstractauto': 0x18, + 'nextdm': 0x1d, + 'progbuf0': 0x20, + 'progbuf1': 0x21, + 'progbuf2': 0x22, + 'progbuf3': 0x23, + 'progbuf4': 0x24, + 'progbuf5': 0x25, + 'progbuf6': 0x26, + 'progbuf7': 0x27, + 'progbuf8': 0x28, + 'progbuf9': 0x29, + 'progbuf10': 0x2a, + 'progbuf11': 0x2b, + 'progbuf12': 0x2c, + 'progbuf13': 0x2d, + 'progbuf14': 0x2e, + 'progbuf15': 0x2f, + 'sbcs': 0x38, + 'sbaddress0': 0x39, + 'sbaddress1': 0x3a, + 'sbdata0': 0x3c, + 'sbdata1': 0x3d, + 'haltsum0': 0x40, + } + """Supported registers""" + + CMDERR = IntEnum('cmderr', + ['none', 'busy', 'notsup', 'exc', 'halt', 'bus', 'rsv', + 'other'], start=0) + """Command error.""" + + VERSION = IntEnum('version', ['nodebug', 'v0.11', 'v0.13', 'v1.0'], start=0) + """DMSTATUS version.""" + + SBERROR = IntEnum('sberror', ['none', 'timeout', 'badaddr', 'badalign', + 'badsize', 'rsv5', 'rsv6', 'other'], start=0) + """SBCS sberror.""" + + SBVERSION = IntEnum('sbversion', ['legacy', 'v1.0'], start=0) + """SBCS sbversion.""" + + BITFIELDS = dict( + DMCONTROL=BitField({ + 'dmactive': (0, 1), + 'ndmreset': (1, 1), + 'clrresethaltreq': (2, 1), + 'setresethaltreq': (3, 1), + 'hartselhi': (6, 10), + 'hartsello': (16, 10), + 'hasel': (26, 1), + 'ackhavereset': (28, 1), + 'hartreset': (29, 1), + 'resumereq': (30, 1), + 'haltreq': (31, 1)}), + DMSTATUS=BitField({ + 'version': (0, 4, VERSION), + 'confstrptrvalid': (4, 1), + 'hasresethaltreq': (5, 1), + 'authbusy': (6, 1), + 'authenticated': (7, 1), + 'anyhalted': (8, 1), + 'allhalted': (9, 1), + 'anyrunning': (10, 1), + 'allrunning': (11, 1), + 'anyunavail': (12, 1), + 'allunavail': (13, 1), + 'anynonexistent': (14, 1), + 'allnonexistent': (15, 1), + 'anyresumeack': (16, 1), + 'allresumeack': (17, 1), + 'anyhavereset': (18, 1), + 'allhavereset': (19, 1), + 'impebreak': (22, 1)}), + HARTINFO=BitField({ + 'dataaddr': (0, 12), + 'datasize': (12, 4), + 'dataaccess': (16, 1), + 'nscratch': (20, 4)}), + ABSTRACTCS=BitField({ + 'datacount': (0, 4), + 'cmderr': (8, 3, CMDERR), + 'busy': (12, 1), + 'progbufsize': (24, 5)}), + COMMAND=BitField({ + 'control': (0, 24), + 'cmdtype': (24, 8, IntEnum('cmdtype', ['reg', 'quick', 'mem'], + start=0), True), + 'write': (16, 1), + }, reg={ + 'regno': (0, 16), + 'transfer': (17, 1), + 'postexec': (18, 1), + 'aarpostincrement': (19, 1), + 'aarsize': (20, 3, IntEnum('aarsize', + ['b8', 'b16', 'b32', 'b64', 'b128'], + start=0)), + }, mem={ + 'aampostincrement': (19, 1), + 'aamsize': (20, 3), + 'aamvirtual': (23, 1)}), + ABSTRACTAUTO=BitField({ + 'autoexecdata': (0, 12), + 'autoexecprogbuf': (16, 16)}), + SBCS=BitField({ + 'sbaccess8': (0, 1), + 'sbaccess16': (1, 1), + 'sbaccess32': (2, 1), + 'sbaccess64': (3, 1), + 'sbaccess128': (4, 1), + 'sbasize': (5, 7), + 'sberror': (12, 3, SBERROR), + 'sbreadondata': (15, 1), + 'sbautoincrement': (16, 1), + 'sbaccess': (17, 3), + 'sbreadonaddr': (20, 1), + 'sbbusy': (21, 1), + 'sbbusyerror': (22, 1), + 'sbversion': (29, 3, SBVERSION) + }) + ) + + def __init__(self, dtm: DebugTransportModule, address: int): + self._log = getLogger('dtm.rvdm') + self._dtm = dtm + self._dmi = dtm['dmi'] + self._address = address + self._hart: int = 0 # currently selected hart + self._cache: Dict[str, int] = {} + + def restart_system(self) -> None: + """Restart the remote machine.""" + self._dtm.engine.controller.system_reset() + + @classmethod + def decode(cls, name: str, value: int) -> Dict[str, Any]: + """Decode a bitfield register.""" + bitfield = cls.BITFIELDS.get(f'{name.upper()}') + if not bitfield: + raise ValueError('Cannot decode {name} register') + return bitfield.decode(value) + + def initialize(self) -> None: + """Initialize the debug module.""" + btf = self.BITFIELDS['DMCONTROL'] + self.dmcontrol = 0 + enable = btf.encode(dmactive=True) + self.dmcontrol = enable + allharts = btf.encode(dmactive=True, hartsello=-1, hartselhi=-1, + hasel=True) + self.dmcontrol = allharts + self._hart = 0 + select = btf.encode(dmactive=True, hasel=False, hartsello=self._hart) + self.dmcontrol = select + dmcontrol = btf.decode(self.dmcontrol) + assert dmcontrol['dmactive'] + btf = self.BITFIELDS['DMSTATUS'] + version = btf.decode(self.dmstatus)['version'] + if version == self.VERSION['v0.11']: + raise RuntimeError(f'Detected incompatible DM version {version!r}') + if version != self.VERSION['v0.13']: + self._log.warning('Detected incompatible DM version %r', version) + + @property + def status(self) -> Dict[str, int]: + """Report debug module status.""" + btf = self.BITFIELDS['DMSTATUS'] + # TODO would need to check if another hart needs to be selected first + return btf.decode(self.dmstatus) + + @property + def hart_info(self) -> Dict[str, int]: + """Report current hart information.""" + btf = self.BITFIELDS['HARTINFO'] + # TODO would need to check if another hart needs to be selected first + return btf.decode(self.hartinfo) + + @property + def system_bus_info(self) -> Dict[str, int]: + """Report system bus capabilities.""" + btf = self.BITFIELDS['SBCS'] + return btf.decode(self.sbcs) + + @property + def is_halted(self) -> bool: + """Report whether the currently selected hart is halted.""" + btf = self.BITFIELDS['DMSTATUS'] + val = self.dmstatus + status = btf.decode(val) + return status['allhalted'] + + def halt(self, hart: int = 0) -> None: + """Halt execution the selected hart.""" + btf = self.BITFIELDS['DMCONTROL'] + halt = btf.encode(dmactive=True, haltreq=True, + hasel=False, hartsello=hart) + self.dmcontrol = halt + self._hart = hart + timeout = now() + 1.0 + btf = self.BITFIELDS['DMSTATUS'] + while now() < timeout: + val = self.dmstatus + status = btf.decode(val) + if status['allhalted']: + self._log.info('Hart %d halted', hart) + break + sleep(0.001) + else: + self._log.error('Status %s', status) + raise TimeoutError(f'Cannot halt hart {self._hart}') + + def resume(self, hart: int = 0) -> None: + """Resume execution of the selected hart.""" + btf = self.BITFIELDS['DMCONTROL'] + resume = btf.encode(dmactive=True, resumereq=True, + hasel=False, hartsello=hart) + self.dmcontrol = resume + self._hart = hart + timeout = now() + 1.0 + btf = self.BITFIELDS['DMSTATUS'] + while now() < timeout: + val = self.dmstatus + status = btf.decode(val) + if status['anyresumeack']: + self._log.info('Hart %d resumed', hart) + break + sleep(0.001) + else: + self._log.error('Status %s', status) + raise TimeoutError(f'Cannot resume hart {self._hart}') + + def read_csr(self, reg: [str | int]) -> int: + """Read the value of a CSR.""" + ireg = self._get_register_index(reg) + btf = self.BITFIELDS['COMMAND'] + command = btf.encode(cmdtype='reg', regno=ireg, aarsize='b32', + write=False, transfer=True) + self.command = command + try: + self._wait_abtract_command() + except (RuntimeError, TimeoutError, ValueError) as exc: + raise exc.__class__(f'{exc} while reading register {reg}') from exc + value = self.data0 + self._log.info('read %s = %08x', reg, value) + return value + + def write_csr(self, reg: [str | int], value: int) -> None: + """Write a value to a CSR.""" + ireg = self._get_register_index(reg) + btf = self.BITFIELDS['COMMAND'] + self.data0 = value + command = btf.encode(cmdtype='reg', regno=ireg, aarsize='b32', + write=True, transfer=True) + self.command = command + try: + self._wait_abtract_command() + except (RuntimeError, TimeoutError, ValueError) as exc: + raise exc.__class__(f'{exc} while writing register {reg}') from exc + self._log.info('write %s = %08x', reg, value) + return value + + def memory_copy(self, mfp: BinaryIO, mop: str, addr: int, + size: Optional[int]) -> None: + """Handle memory operations. + + Only support 32-bit transfers (address and size should be aligned) + for now. + + :param mfp: I/O stream to read data from or write data to, depending + on the selected operation + :param mop: the operation to perform (read, write) + :param addr: start address + :param size: count of bytes to write + """ + read = mop == 'read' + write = mop == 'write' + if not (read or write): + raise ValueError(f'Unsupported memcopy operation {mop}') + if addr & 0x3 != 0: + raise ValueError('Invalid address') + if (size and size & 0x3 != 0) or (read and not size): + raise ValueError('Invalid size') + start = now() + btf = self.BITFIELDS['SBCS'] + val = self._wait_sb_idle(check=True) + val = btf.encode(val, + sbreadonaddr=read, + sbreadondata=read, + sbautoincrement=True, + sbaccess=2) # 32-bit access + self.sbcs = val + # trigger first read (sbreadonaddr) in read mode + self.sbaddress0 = addr + if read: + to_go = size + # pylint: disable=access-member-before-definition + while to_go > 0: + self._log.debug('reading mem from 0x%08x', addr) + self._wait_sb_idle() + # trigger next read (sbreadondata), inc addr (sbautoincrement) + data = self.sbdata0 + mfp.write(data.to_bytes(4, 'little')) + to_go -= 4 + addr += 4 + elif write: + if not size: + # mfp needs to be seekable + pos = mfp.tell() + mfp.seek(0, SEEK_END) + end = mfp.tell() + size = (end - pos) & ~0x3 + mfp.seek(pos) + to_go = size + while to_go > 0: + buf = mfp.read(4) + self._log.debug('writing mem to 0x%08x %d', addr, len(buf)) + assert len(buf) == 4 + data = int.from_bytes(buf, 'little') + # inc addr (sbautoincrement) + self.sbdata0 = data + self._wait_sb_idle() + to_go -= 4 + addr += 4 + lap = now() - start + rate = size / (lap * 1024) + self._log.info('copied %d KB @ %.1f KB/s', size//1024, rate) + + def set_pc(self, addr: int) -> None: + """Set the next Program Counter address.""" + if not self.is_halted: + raise RuntimeError('Cannot update PC while running') + self.write_csr('dpc', addr) + + def __getattr__(self, name) -> int: + name = name.lower() + regaddr = self.REGISTERS.get(name, None) + if regaddr is None: + raise AttributeError('No such attribute {name}') + return self._read_reg(regaddr) + + def __setattr__(self, name, value): + name = name.lower() + regaddr = self.REGISTERS.get(name, None) + if regaddr is not None: + self._write_reg(regaddr, value) + else: + super().__setattr__(name, value) + + def _write_reg(self, reg: int, value: int) -> None: + if not isinstance(value, int): + raise TypeError(f'Invalid type {type(value)}') + if value >= (1 << 32): + raise ValueError('Invalid value') + self._log.debug('write %02x: 0x%08x', reg, value) + self._dmi.write(self._address + reg, value) + self._cache[reg] = value + + def _read_reg(self, reg: str) -> int: + self._log.debug('read %02x', reg) + value = self._dmi.read(self._address + reg) + self._cache[reg] = value + self._log.debug('read 0x%08x', value) + return value + + def _get_register_index(self, reg: [str | int]) -> int: + if isinstance(reg, str): + # Not supported: FPR, Vector, etc. + ireg = CSRS.get(reg.lower()) + if ireg is None: + ireg = GPRS.get(reg.lower()) + if ireg is None: + raise ValueError(f"No such CSR '{reg}'") + ireg += 0x1000 + return ireg + return reg + + def _wait_abtract_command(self) -> None: + """Wait for the completion of an abstract command.""" + timeout = now() + 1.0 + btf = self.BITFIELDS['ABSTRACTCS'] + exc = None + error = 0 + while now() < timeout: + # pylint: disable=access-member-before-definition + val = self.abstractcs + cmd = btf.decode(val) + error = cmd['cmderr'] + if error != self.CMDERR.none: + exc = RuntimeError(f'DM in error: {error!r}') + break + if not cmd['busy']: + break + sleep(0.001) + else: + # need a recovery feature, see hung command handling in spec. + raise TimeoutError() + if exc: + clear_err = btf.encode(cmderr=error) + self.abstractcs = clear_err + raise exc + + def _wait_sb_idle(self, check: bool = False) -> int: + """Wait for the completion of a system bus access. + + :param check: whether to check the access is supported + """ + btf = self.BITFIELDS['SBCS'] + timeout = now() + 1.0 + while now() < timeout: + # pylint: disable=access-member-before-definition + val = self.sbcs + sbcs = btf.decode(val) + if check: + # check supported version + assert sbcs['sbversion'] == self.SBVERSION['v1.0'] + # check System Bus access is supported + assert sbcs['sbasize'] != 0 + # for now, only use 32-bit access + assert sbcs['sbaccess32'] + check = False + error = sbcs['sberror'] + if sbcs['sberror'] != self.SBERROR['none']: + # clear the error + val = btf.encode(val, sberror=True) + self.sbcs = val + # then raise the error + self._log.error('sbcs 0x%08x %s', val, btf.decode(val)) + raise RuntimeError(f'SBCS in error {error!r}') + if sbcs['sbbusyerror']: + # clear the error + val = btf.encode(val, sbbusyerror=True) + self.sbcs = val + # then raise the error + self._log.error('sbcs 0x%08x %s', val, btf.decode(val)) + raise RuntimeError('SBCS in busy error') + if not sbcs['sbbusy']: + return val + sleep(0.001) + # need a recovery feature, see hung command handling in spec. + raise TimeoutError('System Bus stalled') diff --git a/scripts/opentitan/ot/dm/regs.py b/scripts/opentitan/ot/dm/regs.py new file mode 100644 index 0000000000000..608eb4718aa9e --- /dev/null +++ b/scripts/opentitan/ot/dm/regs.py @@ -0,0 +1,442 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""RISC-V CSRs + + :author: Emmanuel Blot +""" + +GPRS = { + 'x0': 0x00, + 'x1': 0x01, + 'x2': 0x02, + 'x3': 0x03, + 'x4': 0x04, + 'x5': 0x05, + 'x6': 0x06, + 'x7': 0x07, + 'x8': 0x08, + 'x9': 0x09, + 'x10': 0x0a, + 'x11': 0x0b, + 'x12': 0x0c, + 'x13': 0x0d, + 'x14': 0x0e, + 'x15': 0x0f, + 'x16': 0x10, + 'x17': 0x11, + 'x18': 0x12, + 'x19': 0x13, + 'x20': 0x14, + 'x21': 0x15, + 'x22': 0x16, + 'x23': 0x17, + 'x24': 0x18, + 'x25': 0x19, + 'x26': 0x1a, + 'x27': 0x1b, + 'x28': 0x1c, + 'x29': 0x1d, + 'x30': 0x1e, + 'x31': 0x1f, + 'zero': 0x00, + 'ra': 0x01, + 'sp': 0x02, + 'gp': 0x03, + 'tp': 0x04, + 't0': 0x05, + 't1': 0x06, + 't2': 0x07, + 's0': 0x08, + 'fp': 0x08, + 's1': 0x09, + 'a0': 0x0a, + 'a1': 0x0b, + 'a2': 0x0c, + 'a3': 0x0d, + 'a4': 0x0e, + 'a5': 0x0f, + 'a6': 0x10, + 'a7': 0x11, + 's2': 0x12, + 's3': 0x13, + 's4': 0x14, + 's5': 0x15, + 's6': 0x16, + 's7': 0x17, + 's8': 0x18, + 's9': 0x19, + 's10': 0x1a, + 's11': 0x1b, + 't3': 0x1c, + 't4': 0x1d, + 't5': 0x1e, + 't6': 0x1f, +} + +CSRS = { + 'ustatus': 0x000, + 'uie': 0x004, + 'utvec': 0x005, + 'uscratch': 0x040, + 'uepc': 0x041, + 'ucause': 0x042, + 'utval': 0x043, + 'uip': 0x044, + 'fflags': 0x001, + 'frm': 0x002, + 'fcsr': 0x003, + 'vstart': 0x008, + 'vxsat': 0x009, + 'vxrm': 0x00a, + 'vcsr': 0x00f, + 'vl': 0xc20, + 'vtype': 0xc21, + 'vlenb': 0xc22, + 'cycle': 0xc00, + 'time': 0xc01, + 'instret': 0xc02, + 'hpmcounter3': 0xc03, + 'hpmcounter4': 0xc04, + 'hpmcounter5': 0xc05, + 'hpmcounter6': 0xc06, + 'hpmcounter7': 0xc07, + 'hpmcounter8': 0xc08, + 'hpmcounter9': 0xc09, + 'hpmcounter10': 0xc0a, + 'hpmcounter11': 0xc0b, + 'hpmcounter12': 0xc0c, + 'hpmcounter13': 0xc0d, + 'hpmcounter14': 0xc0e, + 'hpmcounter15': 0xc0f, + 'hpmcounter16': 0xc10, + 'hpmcounter17': 0xc11, + 'hpmcounter18': 0xc12, + 'hpmcounter19': 0xc13, + 'hpmcounter20': 0xc14, + 'hpmcounter21': 0xc15, + 'hpmcounter22': 0xc16, + 'hpmcounter23': 0xc17, + 'hpmcounter24': 0xc18, + 'hpmcounter25': 0xc19, + 'hpmcounter26': 0xc1a, + 'hpmcounter27': 0xc1b, + 'hpmcounter28': 0xc1c, + 'hpmcounter29': 0xc1d, + 'hpmcounter30': 0xc1e, + 'hpmcounter31': 0xc1f, + 'cycleh': 0xc80, + 'timeh': 0xc81, + 'instreth': 0xc82, + 'hpmcounter3h': 0xc83, + 'hpmcounter4h': 0xc84, + 'hpmcounter5h': 0xc85, + 'hpmcounter6h': 0xc86, + 'hpmcounter7h': 0xc87, + 'hpmcounter8h': 0xc88, + 'hpmcounter9h': 0xc89, + 'hpmcounter10h': 0xc8a, + 'hpmcounter11h': 0xc8b, + 'hpmcounter12h': 0xc8c, + 'hpmcounter13h': 0xc8d, + 'hpmcounter14h': 0xc8e, + 'hpmcounter15h': 0xc8f, + 'hpmcounter16h': 0xc90, + 'hpmcounter17h': 0xc91, + 'hpmcounter18h': 0xc92, + 'hpmcounter19h': 0xc93, + 'hpmcounter20h': 0xc94, + 'hpmcounter21h': 0xc95, + 'hpmcounter22h': 0xc96, + 'hpmcounter23h': 0xc97, + 'hpmcounter24h': 0xc98, + 'hpmcounter25h': 0xc99, + 'hpmcounter26h': 0xc9a, + 'hpmcounter27h': 0xc9b, + 'hpmcounter28h': 0xc9c, + 'hpmcounter29h': 0xc9d, + 'hpmcounter30h': 0xc9e, + 'hpmcounter31h': 0xc9f, + 'mcycle': 0xb00, + 'minstret': 0xb02, + 'mcycleh': 0xb80, + 'minstreth': 0xb82, + 'mvendorid': 0xf11, + 'marchid': 0xf12, + 'mimpid': 0xf13, + 'mhartid': 0xf14, + 'mconfigptr': 0xf15, + 'mstatus': 0x300, + 'misa': 0x301, + 'medeleg': 0x302, + 'mideleg': 0x303, + 'mie': 0x304, + 'mtvec': 0x305, + 'mcounteren': 0x306, + 'mstatush': 0x310, + 'mscratch': 0x340, + 'mepc': 0x341, + 'mcause': 0x342, + 'mtval': 0x343, + 'mip': 0x344, + 'miselect': 0x350, + 'mireg': 0x351, + 'mtopei': 0x35c, + 'mtopi': 0xfb0, + 'mvien': 0x308, + 'mvip': 0x309, + 'midelegh': 0x313, + 'mieh': 0x314, + 'mvienh': 0x318, + 'mviph': 0x319, + 'miph': 0x354, + 'sstatus': 0x100, + 'sie': 0x104, + 'stvec': 0x105, + 'scounteren': 0x106, + 'senvcfg': 0x10a, + 'sstateen0': 0x10c, + 'sstateen1': 0x10d, + 'sstateen2': 0x10e, + 'sstateen3': 0x10f, + 'sscratch': 0x140, + 'sepc': 0x141, + 'scause': 0x142, + 'stval': 0x143, + 'sip': 0x144, + 'stimecmp': 0x14d, + 'stimecmph': 0x15d, + 'sptbr': 0x180, + 'satp': 0x180, + 'siselect': 0x150, + 'sireg': 0x151, + 'stopei': 0x15c, + 'stopi': 0xdb0, + 'sieh': 0x114, + 'siph': 0x154, + 'hstatus': 0x600, + 'hedeleg': 0x602, + 'hideleg': 0x603, + 'hie': 0x604, + 'hcounteren': 0x606, + 'hgeie': 0x607, + 'htval': 0x643, + 'hvip': 0x645, + 'hip': 0x644, + 'htinst': 0x64a, + 'hgeip': 0xe12, + 'hgatp': 0x680, + 'htimedelta': 0x605, + 'htimedeltah': 0x615, + 'henvcfg': 0x60a, + 'henvcfgh': 0x61a, + 'hstateen0': 0x60c, + 'hstateen0h': 0x61c, + 'hstateen1': 0x60d, + 'hstateen1h': 0x61d, + 'hstateen2': 0x60e, + 'hstateen2h': 0x61e, + 'hstateen3': 0x60f, + 'hstateen3h': 0x61f, + 'vsstatus': 0x200, + 'vsie': 0x204, + 'vstvec': 0x205, + 'vsscratch': 0x240, + 'vsepc': 0x241, + 'vscause': 0x242, + 'vstval': 0x243, + 'vsip': 0x244, + 'vsatp': 0x280, + 'vstimecmp': 0x24d, + 'vstimecmph': 0x25d, + 'mtinst': 0x34a, + 'mtval2': 0x34b, + 'hvien': 0x608, + 'hvictl': 0x609, + 'hviprio1': 0x646, + 'hviprio2': 0x647, + 'vsiselect': 0x250, + 'vsireg': 0x251, + 'vstopei': 0x25c, + 'vstopi': 0xeb0, + 'hidelegh': 0x613, + 'hvienh': 0x618, + 'hviph': 0x655, + 'hviprio1h': 0x656, + 'hviprio2h': 0x657, + 'vsieh': 0x214, + 'vsiph': 0x254, + 'menvcfg': 0x30a, + 'menvcfgh': 0x31a, + 'mstateen0': 0x30c, + 'mstateen0h': 0x31c, + 'mstateen1': 0x30d, + 'mstateen1h': 0x31d, + 'mstateen2': 0x30e, + 'mstateen2h': 0x31e, + 'mstateen3': 0x30f, + 'mstateen3h': 0x31f, + 'mseccfg': 0x747, + 'mseccfgh': 0x757, + 'pmpcfg0': 0x3a0, + 'pmpcfg1': 0x3a1, + 'pmpcfg2': 0x3a2, + 'pmpcfg3': 0x3a3, + 'pmpaddr0': 0x3b0, + 'pmpaddr1': 0x3b1, + 'pmpaddr2': 0x3b2, + 'pmpaddr3': 0x3b3, + 'pmpaddr4': 0x3b4, + 'pmpaddr5': 0x3b5, + 'pmpaddr6': 0x3b6, + 'pmpaddr7': 0x3b7, + 'pmpaddr8': 0x3b8, + 'pmpaddr9': 0x3b9, + 'pmpaddr10': 0x3ba, + 'pmpaddr11': 0x3bb, + 'pmpaddr12': 0x3bc, + 'pmpaddr13': 0x3bd, + 'pmpaddr14': 0x3be, + 'pmpaddr15': 0x3bf, + 'tselect': 0x7a0, + 'tdata1': 0x7a1, + 'tdata2': 0x7a2, + 'tdata3': 0x7a3, + 'tinfo': 0x7a4, + 'dcsr': 0x7b0, + 'dpc': 0x7b1, + 'dscratch0': 0x7b2, + 'dscratch1': 0x7b3, + 'mhpmcounter3': 0xb03, + 'mhpmcounter4': 0xb04, + 'mhpmcounter5': 0xb05, + 'mhpmcounter6': 0xb06, + 'mhpmcounter7': 0xb07, + 'mhpmcounter8': 0xb08, + 'mhpmcounter9': 0xb09, + 'mhpmcounter10': 0xb0a, + 'mhpmcounter11': 0xb0b, + 'mhpmcounter12': 0xb0c, + 'mhpmcounter13': 0xb0d, + 'mhpmcounter14': 0xb0e, + 'mhpmcounter15': 0xb0f, + 'mhpmcounter16': 0xb10, + 'mhpmcounter17': 0xb11, + 'mhpmcounter18': 0xb12, + 'mhpmcounter19': 0xb13, + 'mhpmcounter20': 0xb14, + 'mhpmcounter21': 0xb15, + 'mhpmcounter22': 0xb16, + 'mhpmcounter23': 0xb17, + 'mhpmcounter24': 0xb18, + 'mhpmcounter25': 0xb19, + 'mhpmcounter26': 0xb1a, + 'mhpmcounter27': 0xb1b, + 'mhpmcounter28': 0xb1c, + 'mhpmcounter29': 0xb1d, + 'mhpmcounter30': 0xb1e, + 'mhpmcounter31': 0xb1f, + 'mcountinhibit': 0x320, + 'mhpmevent3': 0x323, + 'mhpmevent4': 0x324, + 'mhpmevent5': 0x325, + 'mhpmevent6': 0x326, + 'mhpmevent7': 0x327, + 'mhpmevent8': 0x328, + 'mhpmevent9': 0x329, + 'mhpmevent10': 0x32a, + 'mhpmevent11': 0x32b, + 'mhpmevent12': 0x32c, + 'mhpmevent13': 0x32d, + 'mhpmevent14': 0x32e, + 'mhpmevent15': 0x32f, + 'mhpmevent16': 0x330, + 'mhpmevent17': 0x331, + 'mhpmevent18': 0x332, + 'mhpmevent19': 0x333, + 'mhpmevent20': 0x334, + 'mhpmevent21': 0x335, + 'mhpmevent22': 0x336, + 'mhpmevent23': 0x337, + 'mhpmevent24': 0x338, + 'mhpmevent25': 0x339, + 'mhpmevent26': 0x33a, + 'mhpmevent27': 0x33b, + 'mhpmevent28': 0x33c, + 'mhpmevent29': 0x33d, + 'mhpmevent30': 0x33e, + 'mhpmevent31': 0x33f, + 'mhpmevent3h': 0x723, + 'mhpmevent4h': 0x724, + 'mhpmevent5h': 0x725, + 'mhpmevent6h': 0x726, + 'mhpmevent7h': 0x727, + 'mhpmevent8h': 0x728, + 'mhpmevent9h': 0x729, + 'mhpmevent10h': 0x72a, + 'mhpmevent11h': 0x72b, + 'mhpmevent12h': 0x72c, + 'mhpmevent13h': 0x72d, + 'mhpmevent14h': 0x72e, + 'mhpmevent15h': 0x72f, + 'mhpmevent16h': 0x730, + 'mhpmevent17h': 0x731, + 'mhpmevent18h': 0x732, + 'mhpmevent19h': 0x733, + 'mhpmevent20h': 0x734, + 'mhpmevent21h': 0x735, + 'mhpmevent22h': 0x736, + 'mhpmevent23h': 0x737, + 'mhpmevent24h': 0x738, + 'mhpmevent25h': 0x739, + 'mhpmevent26h': 0x73a, + 'mhpmevent27h': 0x73b, + 'mhpmevent28h': 0x73c, + 'mhpmevent29h': 0x73d, + 'mhpmevent30h': 0x73e, + 'mhpmevent31h': 0x73f, + 'mhpmcounter3h': 0xb83, + 'mhpmcounter4h': 0xb84, + 'mhpmcounter5h': 0xb85, + 'mhpmcounter6h': 0xb86, + 'mhpmcounter7h': 0xb87, + 'mhpmcounter8h': 0xb88, + 'mhpmcounter9h': 0xb89, + 'mhpmcounter10h': 0xb8a, + 'mhpmcounter11h': 0xb8b, + 'mhpmcounter12h': 0xb8c, + 'mhpmcounter13h': 0xb8d, + 'mhpmcounter14h': 0xb8e, + 'mhpmcounter15h': 0xb8f, + 'mhpmcounter16h': 0xb90, + 'mhpmcounter17h': 0xb91, + 'mhpmcounter18h': 0xb92, + 'mhpmcounter19h': 0xb93, + 'mhpmcounter20h': 0xb94, + 'mhpmcounter21h': 0xb95, + 'mhpmcounter22h': 0xb96, + 'mhpmcounter23h': 0xb97, + 'mhpmcounter24h': 0xb98, + 'mhpmcounter25h': 0xb99, + 'mhpmcounter26h': 0xb9a, + 'mhpmcounter27h': 0xb9b, + 'mhpmcounter28h': 0xb9c, + 'mhpmcounter29h': 0xb9d, + 'mhpmcounter30h': 0xb9e, + 'mhpmcounter31h': 0xb9f, + 'umte': 0x4c0, + 'upmmask': 0x4c1, + 'upmbase': 0x4c2, + 'mmte': 0x3c0, + 'mpmmask': 0x3c1, + 'mpmbase': 0x3c2, + 'smte': 0x1c0, + 'spmmask': 0x1c1, + 'spmbase': 0x1c2, + 'vsmte': 0x2c0, + 'vspmmask': 0x2c1, + 'vspmbase': 0x2c2, + 'scountovf': 0xda0, + 'seed': 0x015, + 'jvt': 0x017, +} From 05bb7261d44fe5e0d2157acc400569e4d2333a0b Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 17:11:17 +0200 Subject: [PATCH 12/27] [ot] scripts/opentitan: add a Python module & script to exercise RV DM. Signed-off-by: Emmanuel Blot --- docs/opentitan/dtm.md | 129 +++++++++++++++++++++++++++++ docs/opentitan/jtag-dm.md | 14 +++- docs/opentitan/tools.md | 2 + scripts/opentitan/dtm.py | 142 ++++++++++++++++++++++++++++---- scripts/opentitan/ot/dtm/dtm.py | 6 +- 5 files changed, 272 insertions(+), 21 deletions(-) create mode 100644 docs/opentitan/dtm.md diff --git a/docs/opentitan/dtm.md b/docs/opentitan/dtm.md new file mode 100644 index 0000000000000..5eedd734271ce --- /dev/null +++ b/docs/opentitan/dtm.md @@ -0,0 +1,129 @@ +# `dtm.py` + +`dtm.py` checks that the JTAG/DTM/DM stack is up and running and demonstrates how to use the +Debug Module to access the Ibex core. + +## Usage + +````text +usage: dtm.py [-h] [-H HOST] [-P PORT] [-Q] [-l IR_LENGTH] [-b BASE] [-I] [-c] + [-C MISA_CHECK] [-x] [-X] [-a ADDRESS] [-m {read,write}] + [-s SIZE] [-f FILE] [-e ELF] [-v] [-d] + +Debug Transport Module tiny demo + +options: + -h, --help show this help message and exit + +Virtual machine: + -H HOST, --host HOST JTAG host (default: localhost) + -P PORT, --port PORT JTAG port, default: 3335 + -Q, --no-quit do not ask the QEMU to quit on exit + +DMI: + -l IR_LENGTH, --ir-length IR_LENGTH + bit length of the IR register + -b BASE, --base BASE define DMI base address + +Info: + -I, --info report JTAG ID code and DTM configuration + -c CSR, --csr CSR read CSR value from hart + -C CSR_CHECK, --csr-check CSR_CHECK + check CSR value matches + +Actions: + -x, --execute update the PC from a loaded ELF file + -X, --no-exec does not resume hart execution + +Memory: + -a ADDRESS, --address ADDRESS + address of the first byte to access + -m {read,write}, --mem {read,write} + access memory using System Bus + -s SIZE, --size SIZE size in bytes of memory to access + -f FILE, --file FILE file to read/write data for memory access + -e ELF, --elf ELF load ELF file into memory + +Extras: + -v, --verbose increase verbosity + -d, --debug enable debug mode +```` + +### Arguments + +* `-a` specify the memory address where data is loaded or stored. Useful with the `--mem` option. + See also the `--size` option. Note that only 32-bit aligned addresses are supported for now. + +* `-b` specify the DMI base address for the RISC-V Debug Module + +* `-C` compare a CSR value to the specified value. Requires option `--csr`. + +* `-c` read and report a CSR from the Ibex core. + +* `-d` only useful to debug the script, reports any Python traceback to the standard error stream. + +* `-e` specify an ELF32 application file to upload into memory. See also the `--exec` option. + +* `-H` specify the address of the QEMU VM. + +* `-I` report the JTAG ID code and the DTM configuration. + +* `-l` specify the length of the TAP instruction register length. + +* `-m ` specify a memory operation to perform. See also `--address`, `--size` and + `--file` options. With `read` operation, if no `--file` is specified, the content of the selected + memory segment is dumped to stdout, with a similar format as the output of `hexdump -C`. If a file + is supplied, the content of the segment is written as binary data into this file. With `write` + operation, `--file` argument is mandatory. The content of the binary file is copied into the + memory, starting at the `--address`. See also the `--elf` option for uploading applications. + +* `-P` specify the TCP port of the JTAG server in the QEMU VM, should match the port part of `-jtag` + option for invoking QEMU. + +* `-Q` to not send QEMU a request for termination when this script exits. + +* `-s` specify the number of bytes to read from or write to memory. Useful with the `--mem` option. + See also the `--address` option. This option may be omitted for the `write` memory operation, in + which case the size of the specified file is used. Note that only sizes multiple of 4-byte are + supported for now. + +* `-v` can be repeated to increase verbosity of the script, mostly for debug purpose. + +* `-X` do not attempt to resume normal execution of the hart once DTM operation have been completed. + This can be useful for example when the QEMU VM is started with `-S` and no application code has + been loaded in memory: once the DTM operations are completed, the default behavior is to resume + the hart execution, would start execution code from the current PC and cause an immediate + exception. + +* `-x` execute the loaded ELF application from its entry point. Requires the `--elf` option + +### Examples + +Running QEMU VM with the `-jtag tcp::3335` option: + +* Retrieve JTAG/DTM/DM information and `mtvec` CSR value + ````sh + ./scripts/opentitan/dtm.py -I -c mtvec + ```` + +* Check that the MISA CSR matches the expected Ibex core value: + ````sh + ./scripts/opentitan/dtm.py -c misa -C 0x401411ad + ```` + +* Load and execute an application + ````sh + ./scripts/opentitan/dtm.py -e .../helloworld -x + ```` + +* Dump a memory segment to stdout + ````sh + ./scripts/opentitan/dtm.py -m read -a 0x1000_0080 -s 0x100 + ```` + +* Upload a file into memory and leave the Ibex core halted + ````sh + ./scripts/opentitan/dtm.py -m write -a 0x1000_0000 -f file.dat -X + + ```` + diff --git a/docs/opentitan/jtag-dm.md b/docs/opentitan/jtag-dm.md index ba836d36bac5b..26990d51d518d 100644 --- a/docs/opentitan/jtag-dm.md +++ b/docs/opentitan/jtag-dm.md @@ -22,9 +22,9 @@ dispatches requests to Debug Module depending on the received DMI address. See also [JTAG mailbox](jtagmbx.md) and [Life Controller](lc_ctrl_dmi.md) for other Debug Modules. ``` -+----------------+ -| Host (OpenOCD) | -+----------------+ ++--------------------------+ +| Host (OpenOCD or Python) | ++--------------------------+ | | TCP connection ("bitbang mode") | @@ -118,3 +118,11 @@ A basic `$HOME/.gdbinit` as the following should connect GDB to the running Open ``` target remote :3333 ``` + +## Communicating with JTAG server using Python + +`scripts/opentitan/ot` directory contains Python modules that provide several APIs to test the +JTAG/DTM/DM stack. + +A demo application is available from [`scripts/opentitan/dtm.py`](dtm.md) that can report basic +information about this stack and demonstrate how to use the Debug Module to access the Ibex core. diff --git a/docs/opentitan/tools.md b/docs/opentitan/tools.md index 578a8e4645dec..5cefc1b7dbd7f 100644 --- a/docs/opentitan/tools.md +++ b/docs/opentitan/tools.md @@ -31,6 +31,8 @@ directory to help with these tasks. develop the machine itself. * `devproxy.py` is a Python module that provides an API to remote drive the [DevProxy](devproxy.md) communication interface. +* [`dtm.py`](dtm.md) is a tiny Python script that can be used to check the JTAG/DTM/DM stack is + up and running and demonstrate how to use the Debug Module to access the Ibex core. * [`gdbreplay.py`](gdbreplay.md) is a basic GDB server that can be used to replay Ibex execution stream from a QEMU execution trace. * [`gpiodev.py`](gpiodev.md) is a tiny script to run regression tests with GPIO device. diff --git a/scripts/opentitan/dtm.py b/scripts/opentitan/dtm.py index 14c86900e5839..8ccc2af335ac0 100755 --- a/scripts/opentitan/dtm.py +++ b/scripts/opentitan/dtm.py @@ -8,7 +8,8 @@ :author: Emmanuel Blot """ -from argparse import ArgumentParser, Namespace +from argparse import ArgumentParser, Namespace, FileType +from io import BytesIO from os import linesep from os.path import dirname, join as joinpath, normpath from socket import create_connection @@ -23,8 +24,11 @@ # JTAG module is available from the scripts/ directory sys.path.append(joinpath(normpath(dirname(dirname(sys.argv[0]))))) +from ot.util.elf import ElfBlob # noqa: E402 from ot.util.log import configure_loggers # noqa: E402 +from ot.util.misc import HexInt, dump_buffer # noqa: E402 from ot.dtm import DebugTransportModule # noqa: E402 +from ot.dm import DebugModule # noqa: E402 from jtag.bits import BitSequence # noqa: E402 from jtag.bitbang import JtagBitbangController # noqa: E402 from jtag.jtag import JtagEngine # noqa: E402 @@ -44,7 +48,8 @@ def main(): debug = True try: args: Optional[Namespace] = None - argparser = ArgumentParser(description=sys.modules[__name__].__doc__) + argparser = ArgumentParser( + description=sys.modules[__name__].__doc__.split('.')[0]) qvm = argparser.add_argument_group(title='Virtual machine') qvm.add_argument('-H', '--host', default='127.0.0.1', @@ -53,10 +58,36 @@ def main(): default=JtagBitbangController.DEFAULT_PORT, help=f'JTAG port, ' f'default: {JtagBitbangController.DEFAULT_PORT}') - qvm.add_argument('-I', '--info', action='store_true', - help='Report JTAG ID code and DTM configuration') - qvm.add_argument('-l', '--ir-length', type=int, default=5, + qvm.add_argument('-Q', '--no-quit', action='store_true', default=False, + help='do not ask the QEMU to quit on exit') + dmi = argparser.add_argument_group(title='DMI') + dmi.add_argument('-l', '--ir-length', type=int, default=5, help='bit length of the IR register') + dmi.add_argument('-b', '--base', type=HexInt.parse, default=0, + help='define DMI base address') + info = argparser.add_argument_group(title='Info') + info.add_argument('-I', '--info', action='store_true', + help='report JTAG ID code and DTM configuration') + info.add_argument('-c', '--csr', + help='read CSR value from hart') + info.add_argument('-C', '--csr-check', type=HexInt.parse, default=None, + help='check CSR value matches') + act = argparser.add_argument_group(title='Actions') + act.add_argument('-x', '--execute', action='store_true', + help='update the PC from a loaded ELF file') + act.add_argument('-X', '--no-exec', action='store_true', default=False, + help='does not resume hart execution') + mem = argparser.add_argument_group(title='Memory') + mem.add_argument('-a', '--address', type=HexInt.parse, + help='address of the first byte to access') + mem.add_argument('-m', '--mem', choices=('read', 'write'), + help='access memory using System Bus') + mem.add_argument('-s', '--size', type=HexInt.parse, + help='size in bytes of memory to access') + mem.add_argument('-f', '--file', + help='file to read/write data for memory access') + mem.add_argument('-e', '--elf', type=FileType('rb'), + help='load ELF file into memory') extra = argparser.add_argument_group(title='Extras') extra.add_argument('-v', '--verbose', action='count', help='increase verbosity') @@ -66,7 +97,7 @@ def main(): args = argparser.parse_args() debug = args.debug - configure_loggers(args.verbose, 'dtm', 'jtag') + configure_loggers(args.verbose, 'dtm.rvdm', -1, 'dtm', 'jtag') sock = create_connection((args.host, args.port), timeout=0.5) sock.settimeout(0.1) @@ -75,15 +106,96 @@ def main(): ctrl.tap_reset(True) ir_length = args.ir_length dtm = DebugTransportModule(eng, ir_length) - if args.info: - code = idcode(eng, ir_length) - print(f'IDCODE: 0x{code:x}') - sys.exit(0) - version = dtm['dtmcs'].dmi_version - abits = dtm['dtmcs'].abits - print(f'DTM: v{version[0]}.{version[1]}, {abits} bits') - dtm['dtmcs'].check() - dtm['dtmcs'].dmireset() + rvdm = None + try: + if args.info: + code = idcode(eng, ir_length) + print(f'IDCODE: 0x{code:x}') + version = dtm['dtmcs'].dmi_version + abits = dtm['dtmcs'].abits + print(f'DTM: v{version[0]}.{version[1]}, {abits} bits') + dtm['dtmcs'].check() + dtm['dtmcs'].dmireset() + if args.csr_check is None and not args.csr: + argparser.error('CSR check requires CSR option') + if args.csr: + if not rvdm: + rvdm = DebugModule(dtm, args.base) + rvdm.initialize() + rvdm.halt() + dmver = rvdm.status['version'] + if args.info: + print(f'DM: {dmver.name}') + sbver = rvdm.system_bus_info['sbversion'] + if args.info: + print(f'SYSBUS: {sbver.name}') + if not args.csr: + csr = 'misa' + else: + try: + csr = HexInt.parse(args.csr) + except ValueError: + csr = args.csr + csr_val = rvdm.read_csr(csr) + rvdm.resume() + if args.csr_check is not None: + if csr_val != args.csr_check: + raise RuntimeError(f'CSR {args.csr} check failed: ' + f'0x{csr_val:08x} != ' + f'0x{args.csr_check:08x}') + else: + pad = ' ' * (10 - len(args.csr)) + print(f'{args.csr}:{pad}0x{csr_val:08x}') + if args.mem: + if args.address is None: + argparser.error('no address specified for memory operation') + if args.mem == 'write' and not args.file: + argparser.error('no file specified for mem write operation') + if args.mem == 'read' and not args.size: + argparser.error('no size specified for mem read operation') + if not rvdm: + rvdm = DebugModule(dtm, args.base) + rvdm.initialize() + try: + rvdm.halt() + if args.file: + mode = 'rb' if args.mem == 'write' else 'wb' + with open(args.file, mode) as mfp: + rvdm.memory_copy(mfp, args.mem, args.address, + args.size) + else: + mfp = BytesIO() + rvdm.memory_copy(mfp, args.mem, args.address, args.size) + dump_buffer(mfp, args.address) + finally: + if not args.no_exec: + rvdm.resume() + if args.elf: + if not ElfBlob.LOADED: + argparser.error('pyelftools module not available') + elf = ElfBlob() + elf.load(args.elf) + args.elf.close() + if elf.address_size != 32: + argparser.error('Only ELF32 files are supported') + if not rvdm: + rvdm = DebugModule(dtm, args.base) + rvdm.initialize() + try: + rvdm.halt() + mfp = BytesIO(elf.blob) + rvdm.memory_copy(mfp, 'write', elf.load_address, args.size) + if args.execute: + rvdm.set_pc(elf.entry_point) + finally: + if not args.no_exec: + rvdm.resume() + else: + if args.execute: + argparser.error('Cannot execute without loaded an ELF file') + finally: + if not args.no_quit: + ctrl.quit() # pylint: disable=broad-except except Exception as exc: diff --git a/scripts/opentitan/ot/dtm/dtm.py b/scripts/opentitan/ot/dtm/dtm.py index 54a64b4d15fb0..d7191c61fe28d 100644 --- a/scripts/opentitan/ot/dtm/dtm.py +++ b/scripts/opentitan/ot/dtm/dtm.py @@ -174,7 +174,7 @@ def write(self, address: int, value: int) -> None: dmi |= value << 2 dmi |= self.OPS['write'] wbseq = BitSequence(dmi, self.length) - self._log.info('write: 0x%08x', value) + self._log.debug('write: 0x%08x', value) self._write(wbseq) rbseq = self._read(self.length) res = int(rbseq) & 0b11 @@ -187,7 +187,7 @@ def write(self, address: int, value: int) -> None: def read(self, address: int) -> int: """Read a 32-bit value from the specified address """ - self._log.info('read @ 0x%x', address) + self._log.debug('read @ 0x%x', address) dmi = self._build_dmi(address) dmi |= self.OPS['read'] wbseq = BitSequence(dmi, self.length) @@ -202,7 +202,7 @@ def read(self, address: int) -> int: raise err value >>= 2 value &= 0xffff_ffff - self._log.info('read: 0x%08x', value) + self._log.debug('read: 0x%08x', value) return value def _build_dmi(self, address: int) -> int: From 1531c240bae524d92f55e2e0342398b3ec73f6a0 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Wed, 10 Apr 2024 17:52:59 +0200 Subject: [PATCH 13/27] [ot] scripts/opentitan: dm.py: add word memory accessors Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/dm/dm.py | 77 +++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/scripts/opentitan/ot/dm/dm.py b/scripts/opentitan/ot/dm/dm.py index b08168015096a..65008e1cd3fe7 100644 --- a/scripts/opentitan/ot/dm/dm.py +++ b/scripts/opentitan/ot/dm/dm.py @@ -373,6 +373,83 @@ def memory_copy(self, mfp: BinaryIO, mop: str, addr: int, rate = size / (lap * 1024) self._log.info('copied %d KB @ %.1f KB/s', size//1024, rate) + def read32(self, addr: int) -> int: + """Read a single word from memory.""" + if addr & 0x3 != 0: + raise ValueError('Invalid address') + btf = self.BITFIELDS['SBCS'] + val = self._wait_sb_idle(check=True) + val = btf.encode(val, + sbreadonaddr=True, + sbreadondata=False, + sbautoincrement=False, + sbaccess=2) # 32-bit access + self.sbcs = val + # trigger first read (sbreadonaddr) in read mode + self._log.debug('reading mem from 0x%08x', addr) + self.sbaddress0 = addr + self._wait_sb_idle() + value = self.sbdata0 + return value + + def write32(self, addr: int, value: int) -> None: + """Write a single word to memory.""" + if addr & 0x3 != 0: + raise ValueError('Invalid address') + btf = self.BITFIELDS['SBCS'] + val = self._wait_sb_idle(check=True) + val = btf.encode(val, + sbreadonaddr=False, + sbreadondata=False, + sbautoincrement=False, + sbaccess=2) # 32-bit access + self.sbcs = val + self._log.debug('writing mem to 0x%08x', addr) + self.sbaddress0 = addr + self.sbdata0 = value + self._wait_sb_idle() + + def read64(self, addr: int) -> int: + """Read two words from memory.""" + if addr & 0x3 != 0: + raise ValueError('Invalid address') + btf = self.BITFIELDS['SBCS'] + val = self._wait_sb_idle(check=True) + val = btf.encode(val, + sbreadonaddr=True, + sbreadondata=False, + sbautoincrement=False, + sbaccess=2) # 32-bit access + self.sbcs = val + self._log.debug('reading mem from 0x%08x', addr) + self.sbaddress0 = addr + self._wait_sb_idle() + value = self.sbdata0 + self.sbaddress0 = addr + 4 + self._wait_sb_idle() + value |= self.sbdata0 << 32 + return value + + def write64(self, addr: int, value: int) -> None: + """Write two words to memory.""" + if addr & 0x3 != 0: + raise ValueError('Invalid address') + btf = self.BITFIELDS['SBCS'] + val = self._wait_sb_idle(check=True) + val = btf.encode(val, + sbreadonaddr=False, + sbreadondata=False, + sbautoincrement=True, + sbaccess=2) # 32-bit access + self.sbcs = val + self._log.debug('writing mem to 0x%08x', addr) + self.sbaddress0 = addr + self.sbdata0 = value & 0xffff_ffff + self._wait_sb_idle() + value >>= 32 + self.sbdata0 = value & 0xffff_ffff + self._wait_sb_idle() + def set_pc(self, addr: int) -> None: """Set the next Program Counter address.""" if not self.is_halted: From f696692d51d09e420a5e0c74c114d93125d533ee Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 12:05:43 +0200 Subject: [PATCH 14/27] [ot] scripts/opentitan: otptool.py: move utility functions to ot.util.misc Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/util/misc.py | 14 +++++++++++++- scripts/opentitan/otptool.py | 14 +------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/scripts/opentitan/ot/util/misc.py b/scripts/opentitan/ot/util/misc.py index 7d39514694616..1d9edea05d766 100644 --- a/scripts/opentitan/ot/util/misc.py +++ b/scripts/opentitan/ot/util/misc.py @@ -6,7 +6,7 @@ :author: Emmanuel Blot """ -from typing import Optional +from typing import Any, Optional try: # only available from Python 3.12+ @@ -15,6 +15,13 @@ Buffer = [bytes | bytearray | memoryview] +class classproperty(property): + """Getter property decorator for a class""" + # pylint: disable=invalid-name + def __get__(self, obj: Any, objtype=None) -> Any: + return super().__get__(objtype) + + class HexInt(int): """Simple wrapper to always represent an integer in hexadecimal format.""" @@ -39,3 +46,8 @@ def dump_buffer(buffer: Buffer, addr: int) -> None: text = ''.join(chr(c) if 0x20 <= c < 0x7f else '.' for c in view[pos:pos+16]) print(f'{addr+pos:08x} {buf} |{text}|') + + +def round_up(value: int, rnd: int) -> int: + """Round up a integer value.""" + return (value + rnd - 1) & -rnd diff --git a/scripts/opentitan/otptool.py b/scripts/opentitan/otptool.py index c0605114ceb81..4d775fb25a6ba 100755 --- a/scripts/opentitan/otptool.py +++ b/scripts/opentitan/otptool.py @@ -22,7 +22,7 @@ Tuple, Union) from ot.util.log import configure_loggers -from ot.util.misc import HexInt +from ot.util.misc import HexInt, classproperty, round_up try: # try to load HJSON if available @@ -41,18 +41,6 @@ raise RuntimeError('Unsupported Python version') -def round_up(value: int, rnd: int) -> int: - """Round up a integer value.""" - return (value + rnd - 1) & -rnd - - -class classproperty(property): - """Getter property decorator for a class""" - # pylint: disable=invalid-name - def __get__(self, obj: Any, objtype=None) -> Any: - return super().__get__(objtype) - - class OtpPartitionDecoder: """Custom partition value decoder.""" From 71b6c2ad6f362006a3a6d1a399ad7b09415b1127 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 20:37:48 +0200 Subject: [PATCH 15/27] [ot] scripts/opentitan: misc.py: update and fix dump_buffer Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/util/misc.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/scripts/opentitan/ot/util/misc.py b/scripts/opentitan/ot/util/misc.py index 1d9edea05d766..3d2b20cc10d66 100644 --- a/scripts/opentitan/ot/util/misc.py +++ b/scripts/opentitan/ot/util/misc.py @@ -6,7 +6,8 @@ :author: Emmanuel Blot """ -from typing import Any, Optional +from sys import stdout +from typing import Any, Optional, TextIO try: # only available from Python 3.12+ @@ -36,16 +37,26 @@ def parse(val: Optional[str]) -> Optional[int]: return int(val, val.startswith('0x') and 16 or 10) -def dump_buffer(buffer: Buffer, addr: int) -> None: +def dump_buffer(buffer: Buffer, addr: int = 0, file: Optional[TextIO] = None) \ + -> None: """Dump a binary buffer, same format as hexdump -C.""" - view = buffer.getbuffer() + if isinstance(buffer, memoryview): + view = buffer.getbuffer() + else: + view = buffer size = len(view) + if not file: + file = stdout for pos in range(0, size, 16): chunks = view[pos:pos+8], view[pos+8:pos+16] buf = ' '.join(' '.join(f'{x:02x}' for x in c) for c in chunks) - text = ''.join(chr(c) if 0x20 <= c < 0x7f else '.' - for c in view[pos:pos+16]) - print(f'{addr+pos:08x} {buf} |{text}|') + if len(buf) < 48: + buf = f'{buf}{" " * (48 - len(buf))}' + chunk = view[pos:pos+16] + text = ''.join(chr(c) if 0x20 <= c < 0x7f else '.' for c in chunk) + if len(text) < 16: + text = f'{text}{" " * (16-len(text))}' + print(f'{addr+pos:08x} {buf} |{text}|', file=file) def round_up(value: int, rnd: int) -> int: From d81e9bc515d8cbe9f2e4b1fdcb77040969d8b196 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 13:35:43 +0200 Subject: [PATCH 16/27] [ot] scripts/opentitan: otptool.py: split into front-end and modules Signed-off-by: Emmanuel Blot --- scripts/opentitan/ot/otp/__init__.py | 9 + scripts/opentitan/ot/otp/descriptor.py | 165 +++++ scripts/opentitan/ot/otp/image.py | 318 +++++++++ scripts/opentitan/ot/otp/map.py | 197 ++++++ scripts/opentitan/ot/otp/partition.py | 280 ++++++++ scripts/opentitan/otptool.py | 910 +------------------------ 6 files changed, 977 insertions(+), 902 deletions(-) create mode 100644 scripts/opentitan/ot/otp/__init__.py create mode 100644 scripts/opentitan/ot/otp/descriptor.py create mode 100644 scripts/opentitan/ot/otp/image.py create mode 100644 scripts/opentitan/ot/otp/map.py create mode 100644 scripts/opentitan/ot/otp/partition.py diff --git a/scripts/opentitan/ot/otp/__init__.py b/scripts/opentitan/ot/otp/__init__.py new file mode 100644 index 0000000000000..8f820e1f88e3d --- /dev/null +++ b/scripts/opentitan/ot/otp/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""One-Time Programmable controller.""" + +from .descriptor import OTPPartitionDesc, OTPRegisterDef # noqa: F401 +from .image import OtpImage # noqa: F401 +from .map import OtpMap # noqa: F401 +from .partition import OtpLifecycleExtension, OtpPartition # noqa: F401 diff --git a/scripts/opentitan/ot/otp/descriptor.py b/scripts/opentitan/ot/otp/descriptor.py new file mode 100644 index 0000000000000..6cd24480a02d1 --- /dev/null +++ b/scripts/opentitan/ot/otp/descriptor.py @@ -0,0 +1,165 @@ +# Copyright (c) 2023-2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""OTP descriptors. + + :author: Emmanuel Blot +""" + +from logging import getLogger +from typing import TYPE_CHECKING, List, TextIO, Tuple + +if TYPE_CHECKING: + from .map import OtpMap + + +class OTPPartitionDesc: + """OTP Partition descriptor generator.""" + + ATTRS = dict( + size=None, + offset=None, + digest_offset=None, + hw_digest='', + sw_digest='', + secret='', + variant='buffer', + write_lock='wlock', + read_lock='rlock', + integrity='', + iskeymgr='', + iskeymgr_creator='', + iskeymgr_owner='', + wide='' + ) + + def __init__(self, otpmap: 'OtpMap'): + self._log = getLogger('otptool.partdesc') + self._otpmap = otpmap + + def save(self, hjname: str, scriptname: str, cfp: TextIO) -> None: + """Generate a C file with a static description for the partitions.""" + # pylint: disable=f-string-without-interpolation + attrs = {n: getattr(self, f'_convert_to_{k}') if k else lambda x: x + for n, k in self.ATTRS.items() if k is not None} + print(f'/* Generated from {hjname} with {scriptname} */', file=cfp) + print(file=cfp) + print('/* clang-format off */', file=cfp) + print('/* NOLINTBEGIN */', file=cfp) + print('static const OtOTPPartDesc OtOTPPartDescs[] = {', file=cfp) + for part in self._otpmap.enumerate_partitions(): + print(f' [OTP_PART_{part.name}] = {{', file=cfp) + print(f' .size = {part.size}u,', file=cfp) + print(f' .offset = {part.offset}u,', file=cfp) + if part.digest_offset is not None: + print(f' .digest_offset = {part.digest_offset}u,', + file=cfp) + else: + print(f' .digest_offset = UINT16_MAX,', # noqa: F541 + file=cfp) + for attr in attrs: + value = getattr(part, attr, None) + if value is None: + continue + convs = attrs[attr](value) + if not isinstance(convs, list): + convs = [convs] + for conv in convs: + if isinstance(conv, tuple): + attr_name = conv[0] + attr_val = conv[1] + else: + attr_name = attr + attr_val = conv + if isinstance(attr_val, bool): + attr_val = str(attr_val).lower() + print(f' .{attr_name} = {attr_val},', file=cfp) + print(f' }},', file=cfp) # noqa: F541 + print('};', file=cfp) + print('', file=cfp) + print('#define OTP_PART_COUNT ARRAY_SIZE(OtOTPPartDescs)', file=cfp) + print(file=cfp) + print('/* NOLINTEND */', file=cfp) + print('/* clang-format on */', file=cfp) + # pylint: enable=f-string-without-interpolation + + @classmethod + def _convert_to_bool(cls, value) -> str: + return str(value).lower() + + @classmethod + def _convert_to_buffer(cls, value) -> Tuple[str, bool]: + return { + 'unbuffered': ('buffered', False), + 'buffered': ('buffered', True), + 'lifecycle': ('buffered', True), + }[value.lower()] + + @classmethod + def _convert_to_wlock(cls, value) -> bool: + return value == 'digest' + + @classmethod + def _convert_to_rlock(cls, value) -> List[Tuple[str, bool]]: + value = value.lower() + if value == 'csr': + return [('read_lock_csr', True), ('read_lock', True)] + if value == 'digest': + return 'read_lock', True + if value == 'none': + return 'read_lock', False + assert False, 'Unknown RLOCK type' + + +class OTPRegisterDef: + """OTP Partition register generator.""" + + def __init__(self, otpmap: 'OtpMap'): + self._log = getLogger('otptool.reg') + self._otpmap = otpmap + + def save(self, hjname: str, scriptname: str, cfp: TextIO) -> None: + """Generate a C file with register definition for the partitions.""" + reg_offsets = [] + reg_sizes = [] + part_names = [] + for part in self._otpmap.enumerate_partitions(): + part_names.append(f'OTP_PART_{part.name}') + offset = part.offset + reg_sizes.append((f'{part.name}_SIZE', part.size)) + for itname, itdict in part.items.items(): + size = itdict['size'] + if not itname.startswith(f'{part.name}_'): + name = f'{part.name}_{itname}'.upper() + else: + name = itname + reg_offsets.append((name, offset)) + reg_sizes.append((f'{name}_SIZE', size)) + offset += size + print(f'/* Generated from {hjname} with {scriptname} */') + print(file=cfp) + print('/* clang-format off */', file=cfp) + for reg, off in reg_offsets: + print(f'REG32({reg}, {off}u)', file=cfp) + print(file=cfp) + regwidth = max(len(r[0]) for r in reg_sizes) + for reg, size in reg_sizes: + print(f'#define {reg:{regwidth}s} {size}u', file=cfp) + print(file=cfp) + pcount = len(part_names) + part_names.extend(( + '_OTP_PART_COUNT', + 'OTP_ENTRY_DAI = _OTP_PART_COUNT', + 'OTP_ENTRY_KDI', + '_OTP_ENTRY_COUNT')) + print('typedef enum {', file=cfp) + for pname in part_names: + print(f' {pname},', file=cfp) + print('} OtOTPPartitionType;', file=cfp) + print(file=cfp) + print('static const char *PART_NAMES[] = {', file=cfp) + for pname in part_names[:pcount]: + print(f' OTP_NAME_ENTRY({pname}),', file=cfp) + print('};', file=cfp) + print('/* clang-format on */', file=cfp) + print(file=cfp) diff --git a/scripts/opentitan/ot/otp/image.py b/scripts/opentitan/ot/otp/image.py new file mode 100644 index 0000000000000..b539083171df4 --- /dev/null +++ b/scripts/opentitan/ot/otp/image.py @@ -0,0 +1,318 @@ +# Copyright (c) 2023-2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""OTP QEMU image. + + :author: Emmanuel Blot +""" + +from binascii import unhexlify +from io import BytesIO +from logging import getLogger +from re import match as re_match, sub as re_sub +from struct import calcsize as scalc, pack as spack, unpack as sunpack +from typing import Any, BinaryIO, Dict, List, Optional, Set, TextIO + +from .map import OtpMap +from .partition import OtpPartition, OtpLifecycleExtension +from ..util.misc import HexInt, classproperty + + +class OtpImage: + """QEMU 'RAW' OTP image.""" + + HEADER_FORMAT = { + 'magic': '4s', # "vOTP" + 'hlength': 'I', # count of header bytes after this point + 'version': 'I', # version of the header + 'eccbits': 'H', # count of ECC bits for each ECC granule + 'eccgran': 'H', # size in bytes of ECC granule + 'dlength': 'I', # count of data bytes (padded to 64-bit entries) + 'elength': 'I', # count of ecc bytes (padded to 64-bit entries) + } + + HEADER_FORMAT_V2_EXT = { + 'digiv': '8s', # Present digest scrambler IV + 'digfc': '16s', # Present digest scrambler finalization constant + } + + KINDS = { + 'OTP MEM': 'otp', + 'FUSEMAP': 'fuz', + } + + RE_VMEMLOC = r'(?i)^@((?:[0-9a-f]{2})+)\s((?:[0-9a-f]{2})+)$' + RE_VMEMDESC = r'(?i)^//\s?([\w\s]+) file with (\d+)[^\d]*(\d+)\s?bit layout' + + DEFAULT_ECC_BITS = 6 + + def __init__(self, ecc_bits: Optional[int] = None): + self._log = getLogger('otptool.img') + self._header: Dict[str, Any] = {} + self._magic = b'' + self._data = b'' + self._ecc = b'' + if ecc_bits is None: + ecc_bits = self.DEFAULT_ECC_BITS + self._ecc_bits = ecc_bits + self._ecc_bytes = (ecc_bits + 7) // 8 + self._ecc_granule = 0 + self._digest_iv: Optional[int] = None + self._digest_constant: Optional[int] = None + self._partitions: List[OtpPartition] = [] + + @property + def version(self) -> int: + """Provide the version of the RAW image.""" + return self._header.get('version', 0) + + @property + def loaded(self) -> int: + """Report whether data have been loaded into the image.""" + return len(self._data) > 0 + + @property + def is_opentitan(self) -> bool: + """Report whether the current image contains OpenTitan OTP data.""" + return self._magic == b'vOTP' + + @classproperty + def vmem_kinds(cls) -> List[str]: + """Reports the supported content kinds of VMEM files.""" + # pylint: disable=no-self-argument + return ['auto'] + list(cls.KINDS.values()) + + @classproperty + def logger(self): + """Return logger instance.""" + # pylint: disable=no-self-argument + return getLogger('otptool') + + def load_raw(self, rfp: BinaryIO) -> None: + """Load OTP image from a QEMU 'RAW' image stream.""" + header = self._load_header(rfp) + self._header = header + self._data = rfp.read(header['dlength']) + self._ecc = rfp.read(header['elength']) + if header['version'] > 1: + self._digest_iv = header['digiv'] + self._digest_constant = header['digfc'] + + def save_raw(self, rfp: BinaryIO) -> None: + """Save OTP image as a QEMU 'RAW' image stream.""" + header = self._build_header() + rfp.write(header) + self._pad(rfp) + rfp.write(self._data) + self._pad(rfp) + rfp.write(self._ecc) + self._pad(rfp, 4096) + + def load_vmem(self, vfp: TextIO, vmem_kind: Optional[str] = None, + swap: bool = True): + """Parse a VMEM '24' text stream.""" + data_buf: List[bytes] = [] + ecc_buf: List[bytes] = [] + last_addr = 0 + granule_sizes: Set[int] = set() + vkind: Optional[str] = None + row_count = 0 + byte_count = 0 + line_count = 0 + if vmem_kind: + vmem_kind = vmem_kind.lower() + if vmem_kind == 'auto': + vmem_kind = None + if vmem_kind and vmem_kind not in self.KINDS.values(): + raise ValueError(f"Unknown VMEM file kind '{vmem_kind}'") + for lno, line in enumerate(vfp, start=1): + if vkind is None: + kmo = re_match(self.RE_VMEMDESC, line) + if kmo: + vkind = kmo.group(1) + row_count = int(kmo.group(2)) + bits = int(kmo.group(3)) + byte_count = bits // 8 + continue + line = re_sub(r'//.*', '', line) + line = line.strip() + if not line: + continue + lmo = re_match(self.RE_VMEMLOC, line) + if not lmo: + self._log.error('Unexpected line @ %d: %s', lno, line) + continue + line_count += 1 + saddr, sdata = lmo.groups() + addr = int(saddr, 16) + if last_addr < addr: + self._log.info('Padding addr from 0x%04x to 0x%04x', + last_addr, addr) + data_buf.append(bytes(addr-last_addr)) + rdata = unhexlify(sdata) + if byte_count != len(rdata): + self._log.warning('Expected %d bytes @ line %s, found %d', + byte_count, lno, len(sdata)) + ecc, data = rdata[:self._ecc_bytes], rdata[self._ecc_bytes:] + if swap: + data = bytes(reversed(data)) + data_buf.append(data) + ecc_buf.append(ecc) + dlen = len(data) + granule_sizes.add(dlen) + last_addr = addr+dlen # ECC is not accounted for in address + self._data = b''.join(data_buf) + self._ecc = b''.join(ecc_buf) + if granule_sizes: + if len(granule_sizes) != 1: + raise ValueError('Variable data size') + self._ecc_granule = granule_sizes.pop() + if row_count and row_count != line_count: + self._log.error('Should have parsed %d lines, found %d', + row_count, line_count) + if not vkind: + if vmem_kind: + vkind = vmem_kind + else: + vkind = self.KINDS.get(vkind.upper(), None) + if vmem_kind: + if vkind and vkind != vmem_kind: + self._log.warning("Detected VMEM kind '%s' differs from " + "'%s'", vkind, vmem_kind) + # use user provided type, even if it is not the one detected + vkind = vmem_kind + if not vkind: + raise ValueError('Unable to detect VMEM find, please specify') + self._magic = f'v{vkind[:3].upper()}'.encode() + + def load_lifecycle(self, lcext: OtpLifecycleExtension) -> None: + """Load lifecyle values.""" + for part in self._partitions: + if part.name == 'LIFE_CYCLE': + part.set_decoder(lcext) + + # pylint: disable=invalid-name + def set_digest_iv(self, iv: int) -> None: + """Set the Present digest initialization 64-bit vector.""" + if iv >> 64: + raise ValueError('Invalid digest initialization vector') + self._digest_iv = iv + + def set_digest_constant(self, constant: int) -> None: + """Set the Present digest finalization 128-bit constant.""" + if constant >> 128: + raise ValueError('Invalid digest finalization constant') + self._digest_constant = constant + + @property + def has_present_constants(self) -> bool: + """Reports whether the Present scrambler constants are known/defined.""" + return self._digest_iv is not None and self._digest_constant is not None + + def dispatch(self, cfg: OtpMap) -> None: + """Dispatch RAW image data into the partitions.""" + bfp = BytesIO(self._data) + for part in cfg.enumerate_partitions(): + self._log.debug('%s %d', part.name, bfp.tell()) + part.load(bfp) + self._partitions.append(part) + # all data bytes should have been dispatched into the partitions + assert bfp.tell() == len(self._data), 'Unexpected remaining data bytes' + if self._header: + data_size = self._header.get('dlength', 0) + assert bfp.tell() == data_size, 'Unexpected remaining data bytes' + + def verify(self, show: bool = False) -> bool: + """Verify the partition digests, if any.""" + if any(c is None for c in (self._digest_iv, self._digest_constant)): + raise RuntimeError('Missing Present constants') + results: Dict[str, Optional[bool]] = {} + for part in self._partitions: + if not part.hw_digest: + continue + results[part.name] = part.verify(self._digest_iv, + self._digest_constant) + if show: + print('HW digests:') + width = max(len(x) for x in results) + for name, result in results.items(): + if result is None: + status = 'No digest' + elif result: + status = 'OK' + else: + status = 'Failed' + print(f' * {name:{width}s}: {status}') + # any partition with a defined digest should be valid + return not any(r is False for r in results.values()) + + def decode(self, decode: bool = True, wide: int = 0, + ofp: Optional[TextIO] = None) -> None: + """Decode the content of the image, one partition at a time.""" + version = self.version + if version: + print(f'OTP image v{version}') + if version > 1: + print(f' * present iv {self._digest_iv:016x}') + print(f' * present constant {self._digest_constant:032x}') + for part in self._partitions: + part.decode(decode, wide, ofp) + + def _load_header(self, bfp: BinaryIO) -> Dict[str, Any]: + hfmt = self.HEADER_FORMAT + fhfmt = ''.join(hfmt.values()) + # hlength is the length of header minus the two first items (T, L) + fhsize = scalc(fhfmt) + hdata = bfp.read(fhsize) + parts = sunpack(f'<{fhfmt}', hdata) + header = dict(zip(hfmt.keys(), parts)) + magics = set(f'v{k.upper()}'.encode() for k in self.KINDS.values()) + if header['magic'] not in magics: + raise ValueError(f'{bfp.name} is not a QEMU OTP RAW image') + self._magic = header['magic'] + version = header['version'] + if version > 2: + raise ValueError(f'{bfp.name} is not a valid QEMU OTP RAW image') + if version > 1: + hfmt = self.HEADER_FORMAT_V2_EXT + fhfmt = ''.join(hfmt.values()) + fhsize = scalc(fhfmt) + hdata = bfp.read(fhsize) + parts = sunpack(f'<{fhfmt}', hdata) + headerv2 = dict(zip(hfmt.keys(), + (HexInt(int.from_bytes(v, 'little')) + for v in parts))) + header.update(headerv2) + return header + + def _build_header(self) -> bytes: + assert self._magic, "File kind unknown" + hfmt = self.HEADER_FORMAT + # use V2 image format if Present scrambling constants are available, + # otherwise use V1 + use_v2 = bool(self._digest_iv) or bool(self._digest_constant) + if use_v2: + hfmt.update(self.HEADER_FORMAT_V2_EXT) + fhfmt = ''.join(hfmt.values()) + shfmt = ''.join(hfmt[k] for k in list(hfmt)[:2]) + # hlength is the length of header minus the two first items (T, L) + hlen = scalc(fhfmt)-scalc(shfmt) + dlen = (len(self._data)+7) & ~0x7 + elen = (len(self._ecc)+7) & ~0x7 + values = dict(magic=self._magic, hlength=hlen, version=1+int(use_v2), + eccbits=self._ecc_bits, eccgran=self._ecc_granule, + dlength=dlen, elength=elen) + if use_v2: + values['digiv'] = self._digest_iv.to_bytes(8, byteorder='little') + values['digfc'] = self._digest_constant.to_bytes(16, + byteorder='little') + args = [values[k] for k in hfmt] + header = spack(f'<{fhfmt}', *args) + return header + + def _pad(self, bfp: BinaryIO, padsize: Optional[int] = None): + if padsize is None: + padsize = OtpMap.BLOCK_SIZE + tail = bfp.tell() % padsize + if tail: + bfp.write(bytes(padsize-tail)) diff --git a/scripts/opentitan/ot/otp/map.py b/scripts/opentitan/ot/otp/map.py new file mode 100644 index 0000000000000..a12bfb06dbabe --- /dev/null +++ b/scripts/opentitan/ot/otp/map.py @@ -0,0 +1,197 @@ +# Copyright (c) 2023-2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""OTP map. + + :author: Emmanuel Blot +""" + +from logging import getLogger +from typing import Any, Dict, Iterator, List, Optional, TextIO, Tuple + +try: + # try to load HJSON if available + from hjson import load as hjload +except ImportError: + hjload = None + +from ot.util.misc import round_up + + +class OtpMap: + """OTP configuration. + + Assume partition file does not contain any error or missing information, + it should have been validated by OT tools first. + """ + BLOCK_SIZE = 8 + + HARDENED_BOOLEANS = { + 0x739: True, + 0x1d4: False + } + + MUBI8_BOOLEANS = { + 0x96: False, + 0x69: True, + 0x00: None + } + + def __init__(self): + self._log = getLogger('otptool.map') + self._map: Dict = {} + self._otp_size = 0 + self._partitions: List[OtpPartition] = [] + + def load(self, hfp: TextIO) -> None: + """Parse a HJSON configuration file, typically otp_ctrl_mmap.hjson + """ + if hjload is None: + raise ImportError('HJSON module is required') + self._map = hjload(hfp, object_pairs_hook=dict) + otp = self._map['otp'] + self._otp_size = int(otp['width']) * int(otp['depth']) + self._generate_partitions() + self._compute_locations() + + @property + def partitions(self) -> Dict[str, Any]: + """Return the partitions (in any)""" + return {p['name']: p for p in self._map.get('partitions', [])} + + @classmethod + def part_offset(cls, part: Dict[str, Any]) -> int: + """Get the offset of a partition.""" + # expect a KeyError if missing + return int(part['offset']) + + def enumerate_partitions(self) -> Iterator['OtpPartition']: + """Enumerate the partitions in their address order.""" + return iter(self._partitions) + + def _generate_partitions(self) -> None: + parts = self._map.get('partitions', []) + have_offset = all('offset' in p for p in parts) + if not have_offset: + # either all or no partition should have an offset definition + if any('offset' in p for p in parts): + raise RuntimeError('Incoherent offset use in partitions') + if have_offset: + # if offset are defined, first create a shallow copy of the + # partition in sorted order + parts = list(sorted(parts, key=OtpMap.part_offset)) + self._partitions = [] + for part in parts: + # shallow copy of the partition + part = dict(part) + name = part['name'] + # remove the name from the dict + del part['name'] + desc = part.get('desc', '').replace('\n', ' ') + # remove description from partition + if desc: + del part['desc'] + # remove descriptions from items + items = {} + for item in part.get('items', []): + assert isinstance(item, dict) + # shallow copy + item = dict(item) + if 'desc' in item: + del item['desc'] + # assume name & size are always defined for each item + item_name = item['name'] + del item['name'] + item_size = int(item['size']) + # handle very weird case where the size define the number of + # a multibit bool but not its size in bytes + item_size = round_up(item_size, 4) + item['size'] = item_size + assert item_name not in items + items[item_name] = item + part['items'] = items + # size are always encoded as strings, not integers + items_size = sum(int(i.get('size')) for i in items.values()) + # some partitions define their overall size, most don't + # if the size is defined, it takes precedence over the sum of its + # items + part_size = int(part.get('size', '0')) + has_digest = any(part.get(f'{k}w_digest') for k in 'sh') + if has_digest: + items_size += OtpPartition.DIGEST_SIZE + if part_size: + assert items_size <= part_size + else: + part_size = round_up(items_size, self.BLOCK_SIZE) + # update the partition with is actual size + part['size'] = part_size + # special ugly case as configuration file defines is_keymgr per item + # but RTL defines it per partition for some reason + kmm = self._check_keymgr_materials(name, part['items']) + if kmm: + part[kmm[0]] = kmm[1] + prefix = name.title().replace('_', '') + partname = f'{prefix}Part' + newpart = type(partname, (OtpPartition,), + dict(name=name, __doc__=desc)) + self._partitions.append(newpart(part)) + + def _check_keymgr_materials(self, partname: str, items: Dict[str, Dict]) \ + -> Optional[Tuple[str, bool]]: + """Check partition for key manager material fields.""" + kms: Dict[str, bool] = {} + kmprefix = 'iskeymgr' + for props in items.values(): + for prop, value in props.items(): + if prop.startswith(kmprefix): + kind = prop[len(kmprefix):] + if kind not in kms: + kms[kind] = set() + kms[kind].add(value) + kind_count = len(kms) + if not kind_count: + return None + if kind_count > 1: + raise ValueError(f'Incoherent key manager material definition in ' + f'{partname} partition') + kind = set(kms).pop() + enable = any(kms[kind]) + return f'{kmprefix}{kind}', enable + + def _compute_locations(self) -> None: + """Update partitions with their location within the OTP map.""" + absorb_parts = [p for p in self._partitions + if getattr(p, 'absorb', False)] + total_size = sum(p.size for p in self._partitions) + rem_size = self._otp_size - total_size + rem_blocks = rem_size // self.BLOCK_SIZE + absorb_count = len(absorb_parts) + blk_per_part = rem_blocks // absorb_count + extra_blocks = rem_blocks % absorb_count + self._log.info("%d bytes (%d blocks) to absorb into %d partition%s", + rem_size, rem_blocks, absorb_count, + 's' if absorb_count > 1 else '') + for part in absorb_parts: + psize = part.size + part.size += self.BLOCK_SIZE * blk_per_part + if extra_blocks: + part.size += self.BLOCK_SIZE + extra_blocks -= 1 + self._log.info('Partition %s size augmented from %u to %u', + part.name, psize, part.size) + for part in self._partitions: + part_offset = 0 + for part in self._partitions: + if part.sw_digest or part.hw_digest: + digest_offset = part_offset + part.size - 8 + else: + digest_offset = None + setattr(part, 'offset', part_offset) + setattr(part, 'digest_offset', digest_offset) + part_offset += part.size + assert part_offset == self._otp_size, "Unexpected partition offset" + + +# imported here to avoid Python circular dependency issue +# pylint: disable=wrong-import-position +from .partition import OtpPartition # noqa: E402 diff --git a/scripts/opentitan/ot/otp/partition.py b/scripts/opentitan/ot/otp/partition.py new file mode 100644 index 0000000000000..afcfb7b559eac --- /dev/null +++ b/scripts/opentitan/ot/otp/partition.py @@ -0,0 +1,280 @@ +# Copyright (c) 2023-2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""OTP partitions. + + :author: Emmanuel Blot +""" + +from binascii import hexlify, unhexlify +from io import BytesIO, StringIO +from logging import getLogger +from os.path import basename +from re import match as re_match +from textwrap import fill +from typing import BinaryIO, Dict, List, Optional, TextIO + +try: + # try to load Present if available + from present import Present +except ImportError: + Present = None + + +class OtpPartitionDecoder: + """Custom partition value decoder.""" + + def decode(self, category: str, seq: str) -> Optional[str | int]: + """Decode a value (if possible).""" + raise NotImplementedError('abstract base class') + + +class OtpPartition: + """Partition abstract base class. + + :param params: initial partition attributes. + """ + # pylint: disable=no-member + + DIGEST_SIZE = 8 # bytes + + MAX_DATA_WIDTH = 20 + + def __init__(self, params): + self.__dict__.update(params) + self._decoder = None + self._log = getLogger('otptool.part') + self._data = b'' + self._digest_bytes: Optional[bytes] = None + + @property + def has_digest(self) -> bool: + """Check if the partition supports any kind of digest (SW or HW).""" + return any(getattr(self, f'{k}w_digest', False) for k in 'sh') + + @property + def is_locked(self) -> bool: + """Check if the partition is locked, based on its digest.""" + return (self.has_digest and self._digest_bytes and + self._digest_bytes != bytes(self.DIGEST_SIZE)) + + def __repr__(self) -> str: + return repr(self.__dict__) + + def load(self, bfp: BinaryIO) -> None: + """Load the content of the partition from a binary stream.""" + data = bfp.read(self.size) + if len(data) != self.size: + raise IOError(f'{self.name} Cannot load {self.size} from stream') + if self.has_digest: + data, digest = data[:-self.DIGEST_SIZE], data[-self.DIGEST_SIZE:] + self._digest_bytes = digest + self._data = data + + def verify(self, digest_iv: int, digest_constant: int) -> Optional[bool]: + """Verify if the digest matches the content of the partition, if any. + """ + self._log.debug('Verify %s', self.name) + if not self.is_locked: + self._log.info('%s has no stored digest', self.name) + return None + return self.check_digest(digest_iv, digest_constant) + + def check_digest(self, digest_iv: int, digest_constant: int) \ + -> Optional[bool]: + """Verify if the digest matches the content of the partition.""" + # don't ask about the byte order. Something is inverted somewhere, and + # this is all that matters for now + assert self._digest_bytes is not None + idigest = int.from_bytes(self._digest_bytes, byteorder='little') + if idigest == 0: + self._log.warning('Partition %s digest empty', self.name) + return None + lidigest = self.compute_digest(self._data, digest_iv, digest_constant) + if lidigest != idigest: + self._log.error('Partition %s digest mismatch (%016x/%016x)', + self.name, lidigest, idigest) + return False + self._log.info('Partition %s digest match (%016x)', self.name, lidigest) + return True + + @classmethod + def compute_digest(cls, data: bytes, digest_iv: int, digest_constant: int) \ + -> int: + """Compute the HW digest of the partition.""" + if Present is None: + raise RuntimeError('Cannot check digest, Present module not found') + block_sz = OtpMap.BLOCK_SIZE + assert block_sz == 8 # should be 64 bits for Present to work + if len(data) % block_sz != 0: + # this case is valid but not yet impplemented (paddding) + raise RuntimeError('Invalid partition size') + block_count = len(data) // block_sz + if block_count & 1: + data = b''.join((data, data[-block_sz:])) + state = digest_iv + for offset in range(0, len(data), 16): + chunk = data[offset:offset+16] + b128 = int.from_bytes(chunk, byteorder='little') + present = Present(b128) + tmp = present.encrypt(state) + state ^= tmp + present = Present(digest_constant) + state ^= present.encrypt(state) + return state + + def set_decoder(self, decoder: OtpPartitionDecoder) -> None: + """Assign a custom value decoder.""" + self._decoder = decoder + + def decode(self, decode: bool = True, wide: int = 0, + ofp: Optional[TextIO] = None) -> None: + """Decode the content of the partition.""" + buf = BytesIO(self._data) + if ofp: + def emit(fmt, *args): + print(fmt % args, file=ofp) + else: + emit = self._log.info + pname = self.name + for itname, itdef in self.items.items(): + itsize = itdef['size'] + itvalue = buf.read(itsize) + if itname.startswith(f'{pname}_'): + name = f'{pname}:{itname[len(pname)+1:]}' + else: + name = f'{pname}:{itname}' + if itsize > 8: + rvalue = bytes(reversed(itvalue)) + sval = hexlify(rvalue).decode() + if decode and self._decoder: + dval = self._decoder.decode(itname, sval) + if dval is not None: + emit('%-46s (decoded) %s', name, dval) + continue + if not sum(itvalue) and wide < 2: + emit('%-46s [%d] 0...', name, itsize) + else: + if not wide and itsize > self.MAX_DATA_WIDTH: + sval = f'{sval[:self.MAX_DATA_WIDTH*2]}...' + emit('%-46s [%d] %s', name, itsize, sval) + else: + ival = int.from_bytes(itvalue, 'little') + if decode: + if itdef.get('ismubi'): + emit('%-46s (decoded) %s', + name, str(OtpMap.MUBI8_BOOLEANS.get(ival, ival))) + elif itsize == 4 and ival in OtpMap.HARDENED_BOOLEANS: + emit('%-46s (decoded) %s', + name, str(OtpMap.HARDENED_BOOLEANS[ival])) + else: + emit('%-46s %x', name, ival) + + +class OtpLifecycleExtension(OtpPartitionDecoder): + """Decoder for Lifecyle bytes sequences. + """ + + EXTRA_SLOTS = { + 'lc_state': { + 'post_transition': None, + 'escalate': None, + 'invalid': None, + } + } + + def __init__(self): + self._log = getLogger('otptool.lc') + self._tables: Dict[str, Dict[str, str]] = {} + + def decode(self, category: str, seq: str) -> Optional[str | int]: + return self._tables.get(category, {}).get(seq, None) + + def load(self, svp: TextIO): + """Decode LifeCycle information. + + :param svp: System Verilog stream with OTP definitions. + """ + ab_re = (r"\s*parameter\s+logic\s+\[\d+:\d+\]\s+" + r"([ABCD]\d+|ZRO)\s+=\s+\d+'(b(?:[01]+)|h(?:[0-9a-fA-F]+));") + tbl_re = r"\s*Lc(St|Cnt)(\w+)\s+=\s+\{([^\}]+)\}\s*,?" + codes: Dict[str, int] = {} + sequences: Dict[str, List[str]] = {} + for line in svp: + cmt = line.find('//') + if cmt >= 0: + line = line[:cmt] + line = line.strip() + abmo = re_match(ab_re, line) + if not sequences and abmo: + name = abmo.group(1) + sval = abmo.group(2) + val = int(sval[1:], 2 if sval.startswith('b') else 16) + if name in codes: + self._log.error('Redefinition of %s', name) + continue + codes[name] = val + continue + smo = re_match(tbl_re, line) + if smo: + kind = smo.group(1).lower() + name = smo.group(2) + seq = smo.group(3) + items = [x.strip() for x in seq.split(',')] + inv = [it for it in items if it not in codes] + if inv: + self._log.error('Unknown state seq: %s', ', '.join(inv)) + if kind not in sequences: + sequences[kind] = {} + sequences[kind][name] = items + continue + for kind, seqs in sequences.items(): + mkind, conv = dict(st=('LC_STATE', str), + cnt=('LC_TRANSITION_CNT', int))[kind] + self._tables[mkind] = {} + for ref, seq in seqs.items(): + seq = ''.join((f'{x:04x}'for x in map(codes.get, seq))) + self._tables[mkind][seq] = conv(ref) + + def save(self, cfp: TextIO): + """Save OTP life cycle definitions as a C file. + + :param cfp: output text stream + """ + print(f'/* Section auto-generated with {basename(__file__)} ' + f'script */', file=cfp) + for kind, table in self._tables.items(): + enum_io = StringIO() + array_io = StringIO() + count = len(table) + length = max(len(x) for x in table.keys())//2 + print(f'static const char {kind.lower()}s[{count}u][{length}u]' + f' = {{', file=array_io) + pad = ' ' * 8 + for seq, ref in table.items(): + if isinstance(ref, str): + slot = f'{kind}_{ref}'.upper() + print(f' {slot},', file=enum_io) + else: + slot = f'{ref}u' + seqstr = ', '.join((f'0x{b:02x}u' for b in + reversed(unhexlify(seq)))) + defstr = fill(seqstr, width=80, initial_indent=pad, + subsequent_indent=pad) + print(f' [{slot}] = {{\n{defstr}\n }},', + file=array_io) + print('};', file=array_io) + for extra in self.EXTRA_SLOTS.get(kind.lower(), {}): + slot = f'{kind}_{extra}'.upper() + print(f' {slot},', file=enum_io) + enum_str = enum_io.getvalue() + if enum_str: + # likely to be moved to a header file + print(f'enum {kind.lower()} {{\n{enum_str}}};\n', file=cfp) + print(f'{array_io.getvalue()}', file=cfp) + print('/* End of auto-generated section */', file=cfp) + + +# imported here to avoid Python circular dependency issue +# pylint: disable=wrong-import-position +from .map import OtpMap # noqa: E402 diff --git a/scripts/opentitan/otptool.py b/scripts/opentitan/otptool.py index 4d775fb25a6ba..19a1e82135298 100755 --- a/scripts/opentitan/otptool.py +++ b/scripts/opentitan/otptool.py @@ -9,917 +9,22 @@ """ from argparse import ArgumentParser, FileType -from binascii import hexlify, unhexlify -from io import BytesIO, StringIO -from logging import getLogger from os.path import basename -from re import match as re_match, sub as re_sub -from struct import calcsize as scalc, pack as spack, unpack as sunpack from sys import argv, exit as sysexit, modules, stderr, stdout, version_info -from textwrap import fill from traceback import format_exc -from typing import (Any, BinaryIO, Dict, Iterator, List, Optional, Set, TextIO, - Tuple, Union) +from typing import Optional from ot.util.log import configure_loggers -from ot.util.misc import HexInt, classproperty, round_up +from ot.util.misc import HexInt +from ot.otp import (OtpImage, OtpLifecycleExtension, OtpMap, OTPPartitionDesc, + OTPRegisterDef) -try: - # try to load HJSON if available - from hjson import load as hjload -except ImportError: - hjload = None - -try: - # try to load Present if available - from present import Present -except ImportError: - Present = None # requirement: Python 3.7+: dict entries are kept in creation order if version_info[:2] < (3, 7): raise RuntimeError('Unsupported Python version') -class OtpPartitionDecoder: - """Custom partition value decoder.""" - - def decode(self, category: str, seq: str) -> Union[str, int, None]: - """Decode a value (if possible).""" - raise NotImplementedError('abstract base class') - - -class OtpPartition: - """Partition abstract base class. - - :param params: initial partition attributes. - """ - # pylint: disable=no-member - - DIGEST_SIZE = 8 # bytes - - MAX_DATA_WIDTH = 20 - - def __init__(self, params): - self.__dict__.update(params) - self._decoder = None - self._log = getLogger('otptool.part') - self._data = b'' - self._digest_bytes: Optional[bytes] = None - - @property - def has_digest(self) -> bool: - """Check if the partition supports any kind of digest (SW or HW).""" - return any(getattr(self, f'{k}w_digest', False) for k in 'sh') - - @property - def is_locked(self) -> bool: - """Check if the partition is locked, based on its digest.""" - return (self.has_digest and self._digest_bytes and - self._digest_bytes != bytes(self.DIGEST_SIZE)) - - def load(self, bfp: BinaryIO) -> None: - """Load the content of the partition from a binary stream.""" - data = bfp.read(self.size) - if len(data) != self.size: - raise IOError(f'{self.name} Cannot load {self.size} from stream') - if self.has_digest: - data, digest = data[:-self.DIGEST_SIZE], data[-self.DIGEST_SIZE:] - self._digest_bytes = digest - self._data = data - - def verify(self, digest_iv: int, digest_constant: int) -> Optional[bool]: - """Verify if the digest matches the content of the partition, if any. - """ - self._log.debug('Verify %s', self.name) - if not self.is_locked: - self._log.info('%s has no stored digest', self.name) - return None - return self.check_digest(digest_iv, digest_constant) - - def check_digest(self, digest_iv: int, digest_constant: int) \ - -> Optional[bool]: - """Verify if the digest matches the content of the partition.""" - # don't ask about the byte order. Something is inverted somewhere, and - # this is all that matters for now - assert self._digest_bytes is not None - idigest = int.from_bytes(self._digest_bytes, byteorder='little') - if idigest == 0: - self._log.warning('Partition %s digest empty', self.name) - return None - lidigest = self.compute_digest(self._data, digest_iv, digest_constant) - if lidigest != idigest: - self._log.error('Partition %s digest mismatch (%016x/%016x)', - self.name, lidigest, idigest) - return False - self._log.info('Partition %s digest match (%016x)', self.name, lidigest) - return True - - @classmethod - def compute_digest(cls, data: bytes, digest_iv: int, digest_constant: int) \ - -> int: - """Compute the HW digest of the partition.""" - if Present is None: - raise RuntimeError('Cannot check digest, Present module not found') - block_sz = OtpMap.BLOCK_SIZE - assert block_sz == 8 # should be 64 bits for Present to work - if len(data) % block_sz != 0: - # this case is valid but not yet impplemented (paddding) - raise RuntimeError('Invalid partition size') - block_count = len(data) // block_sz - if block_count & 1: - data = b''.join((data, data[-block_sz:])) - state = digest_iv - for offset in range(0, len(data), 16): - chunk = data[offset:offset+16] - b128 = int.from_bytes(chunk, byteorder='little') - present = Present(b128) - tmp = present.encrypt(state) - state ^= tmp - present = Present(digest_constant) - state ^= present.encrypt(state) - return state - - def set_decoder(self, decoder: OtpPartitionDecoder) -> None: - """Assign a custom value decoder.""" - self._decoder = decoder - - def decode(self, decode: bool = True, wide: int = 0, - ofp: Optional[TextIO] = None) -> None: - """Decode the content of the partition.""" - buf = BytesIO(self._data) - if ofp: - def emit(fmt, *args): - print(fmt % args, file=ofp) - else: - emit = self._log.info - pname = self.name - for itname, itdef in self.items.items(): - itsize = itdef['size'] - itvalue = buf.read(itsize) - if itname.startswith(f'{pname}_'): - name = f'{pname}:{itname[len(pname)+1:]}' - else: - name = f'{pname}:{itname}' - if itsize > 8: - rvalue = bytes(reversed(itvalue)) - sval = hexlify(rvalue).decode() - if decode and self._decoder: - dval = self._decoder.decode(itname, sval) - if dval is not None: - emit('%-46s (decoded) %s', name, dval) - continue - if not sum(itvalue) and wide < 2: - emit('%-46s [%d] 0...', name, itsize) - else: - if not wide and itsize > self.MAX_DATA_WIDTH: - sval = f'{sval[:self.MAX_DATA_WIDTH*2]}...' - emit('%-46s [%d] %s', name, itsize, sval) - else: - ival = int.from_bytes(itvalue, 'little') - if decode: - if itdef.get('ismubi'): - emit('%-46s (decoded) %s', - name, str(OtpMap.MUBI8_BOOLEANS.get(ival, ival))) - elif itsize == 4 and ival in OtpMap.HARDENED_BOOLEANS: - emit('%-46s (decoded) %s', - name, str(OtpMap.HARDENED_BOOLEANS[ival])) - else: - emit('%-46s %x', name, ival) - - -class OtpLifecycleExtension(OtpPartitionDecoder): - """Decoder for Lifecyle bytes sequences. - """ - - EXTRA_SLOTS = { - 'lc_state': { - 'post_transition': None, - 'escalate': None, - 'invalid': None, - } - } - - def __init__(self): - self._log = getLogger('otptool.lc') - self._tables: Dict[str, Dict[str, str]] = {} - - def decode(self, category: str, seq: str) -> Union[str, int, None]: - return self._tables.get(category, {}).get(seq, None) - - def load(self, svp: TextIO): - """Decode LifeCycle information. - - :param svp: System Verilog stream with OTP definitions. - """ - ab_re = (r"\s*parameter\s+logic\s+\[\d+:\d+\]\s+" - r"([ABCD]\d+|ZRO)\s+=\s+\d+'(b(?:[01]+)|h(?:[0-9a-fA-F]+));") - tbl_re = r"\s*Lc(St|Cnt)(\w+)\s+=\s+\{([^\}]+)\}\s*,?" - codes: Dict[str, int] = {} - sequences: Dict[str, List[str]] = {} - for line in svp: - cmt = line.find('//') - if cmt >= 0: - line = line[:cmt] - line = line.strip() - abmo = re_match(ab_re, line) - if not sequences and abmo: - name = abmo.group(1) - sval = abmo.group(2) - val = int(sval[1:], 2 if sval.startswith('b') else 16) - if name in codes: - self._log.error('Redefinition of %s', name) - continue - codes[name] = val - continue - smo = re_match(tbl_re, line) - if smo: - kind = smo.group(1).lower() - name = smo.group(2) - seq = smo.group(3) - items = [x.strip() for x in seq.split(',')] - inv = [it for it in items if it not in codes] - if inv: - self._log.error('Unknown state seq: %s', ', '.join(inv)) - if kind not in sequences: - sequences[kind] = {} - sequences[kind][name] = items - continue - for kind, seqs in sequences.items(): - mkind, conv = dict(st=('LC_STATE', str), - cnt=('LC_TRANSITION_CNT', int))[kind] - self._tables[mkind] = {} - for ref, seq in seqs.items(): - seq = ''.join((f'{x:04x}'for x in map(codes.get, seq))) - self._tables[mkind][seq] = conv(ref) - - def save(self, cfp: TextIO): - """Save OTP life cycle definitions as a C file. - - :param cfp: output text stream - """ - print(f'/* Section auto-generated with {basename(__file__)} ' - f'script */', file=cfp) - for kind, table in self._tables.items(): - enum_io = StringIO() - array_io = StringIO() - count = len(table) - length = max(len(x) for x in table.keys())//2 - print(f'static const char {kind.lower()}s[{count}u][{length}u]' - f' = {{', file=array_io) - pad = ' ' * 8 - for seq, ref in table.items(): - if isinstance(ref, str): - slot = f'{kind}_{ref}'.upper() - print(f' {slot},', file=enum_io) - else: - slot = f'{ref}u' - seqstr = ', '.join((f'0x{b:02x}u' for b in - reversed(unhexlify(seq)))) - defstr = fill(seqstr, width=80, initial_indent=pad, - subsequent_indent=pad) - print(f' [{slot}] = {{\n{defstr}\n }},', - file=array_io) - print('};', file=array_io) - for extra in self.EXTRA_SLOTS.get(kind.lower(), {}): - slot = f'{kind}_{extra}'.upper() - print(f' {slot},', file=enum_io) - enum_str = enum_io.getvalue() - if enum_str: - # likely to be moved to a header file - print(f'enum {kind.lower()} {{\n{enum_str}}};\n', file=cfp) - print(f'{array_io.getvalue()}', file=cfp) - print('/* End of auto-generated section */', file=cfp) - - -class OtpMap: - """OTP configuration. - - Assume partition file does not contain any error or missing information, - it should have been validated by OT tools first. - """ - BLOCK_SIZE = 8 - - HARDENED_BOOLEANS = { - 0x739: True, - 0x1d4: False - } - - MUBI8_BOOLEANS = { - 0x96: False, - 0x69: True, - 0x00: None - } - - def __init__(self): - self._log = getLogger('otptool.map') - self._map: Dict = {} - self._otp_size = 0 - self._partitions: List[OtpPartition] = [] - - def load(self, hfp: TextIO) -> None: - """Parse a HJSON configuration file, typically otp_ctrl_mmap.hjson - """ - if hjload is None: - raise ImportError('HJSON module is required') - self._map = hjload(hfp, object_pairs_hook=dict) - otp = self._map['otp'] - self._otp_size = int(otp['width']) * int(otp['depth']) - self._generate_partitions() - self._compute_locations() - - @property - def partitions(self) -> Dict[str, Any]: - """Return the partitions (in any)""" - return self._map.get('partitions', {}) - - @classmethod - def part_offset(cls, part: Dict[str, Any]) -> int: - """Get the offset of a partition.""" - # expect a KeyError if missing - return int(part['offset']) - - def enumerate_partitions(self) -> Iterator[OtpPartition]: - """Enumerate the partitions in their address order.""" - return iter(self._partitions) - - def _generate_partitions(self) -> None: - parts = self.partitions - have_offset = all('offset' in p for p in parts) - if not have_offset: - # either all or no partition should have an offset definition - if any('offset' in p for p in parts): - raise RuntimeError('Incoherent offset use in partitions') - if have_offset: - # if offset are defined, first create a shallow copy of the - # partition in sorted order - parts = list(sorted(parts, key=OtpMap.part_offset)) - self._partitions = [] - for part in parts: - # shallow copy of the partition - part = dict(part) - name = part['name'] - # remove the name from the dict - del part['name'] - desc = part.get('desc', '').replace('\n', ' ') - # remove description from partition - if desc: - del part['desc'] - # remove descriptions from items - items = {} - for item in part.get('items', []): - assert isinstance(item, dict) - # shallow copy - item = dict(item) - if 'desc' in item: - del item['desc'] - # assume name & size are always defined for each item - item_name = item['name'] - del item['name'] - item_size = int(item['size']) - # handle very weird case where the size define the number of - # a multibit bool but not its size in bytes - item_size = round_up(item_size, 4) - item['size'] = item_size - assert item_name not in items - items[item_name] = item - part['items'] = items - # size are always encoded as strings, not integers - items_size = sum(int(i.get('size')) for i in items.values()) - # some partitions define their overall size, most don't - # if the size is defined, it takes precedence over the sum of its - # items - part_size = int(part.get('size', '0')) - has_digest = any(part.get(f'{k}w_digest') for k in 'sh') - if has_digest: - items_size += OtpPartition.DIGEST_SIZE - if part_size: - assert items_size <= part_size - else: - part_size = round_up(items_size, self.BLOCK_SIZE) - # update the partition with is actual size - part['size'] = part_size - # special ugly case as configuration file defines is_keymgr per item - # but RTL defines it per partition for some reason - kmm = self._check_keymgr_materials(name, part['items']) - if kmm: - part[kmm[0]] = kmm[1] - prefix = name.title().replace('_', '') - partname = f'{prefix}Part' - newpart = type(partname, (OtpPartition,), - dict(name=name, __doc__=desc)) - self._partitions.append(newpart(part)) - - def _check_keymgr_materials(self, partname: str, items: Dict[str, Dict]) \ - -> Optional[Tuple[str, bool]]: - """Check partition for key manager material fields.""" - kms: Dict[str, bool] = {} - kmprefix = 'iskeymgr' - for props in items.values(): - for prop, value in props.items(): - if prop.startswith(kmprefix): - kind = prop[len(kmprefix):] - if kind not in kms: - kms[kind] = set() - kms[kind].add(value) - kind_count = len(kms) - if not kind_count: - return None - if kind_count > 1: - raise ValueError(f'Incoherent key manager material definition in ' - f'{partname} partition') - kind = set(kms).pop() - enable = any(kms[kind]) - return f'{kmprefix}{kind}', enable - - def _compute_locations(self) -> None: - """Update partitions with their location within the OTP map.""" - absorb_parts = [p for p in self._partitions - if getattr(p, 'absorb', False)] - total_size = sum(p.size for p in self._partitions) - rem_size = self._otp_size - total_size - rem_blocks = rem_size // self.BLOCK_SIZE - absorb_count = len(absorb_parts) - blk_per_part = rem_blocks // absorb_count - extra_blocks = rem_blocks % absorb_count - self._log.info("%d bytes (%d blocks) to absorb into %d partition%s", - rem_size, rem_blocks, absorb_count, - 's' if absorb_count > 1 else '') - for part in absorb_parts: - psize = part.size - part.size += self.BLOCK_SIZE * blk_per_part - if extra_blocks: - part.size += self.BLOCK_SIZE - extra_blocks -= 1 - self._log.info('Partition %s size augmented from %u to %u', - part.name, psize, part.size) - for part in self._partitions: - part_offset = 0 - for part in self._partitions: - if part.sw_digest or part.hw_digest: - digest_offset = part_offset + part.size - 8 - else: - digest_offset = None - setattr(part, 'offset', part_offset) - setattr(part, 'digest_offset', digest_offset) - part_offset += part.size - assert part_offset == self._otp_size, "Unexpected partition offset" - - -class OTPPartitionDesc: - """OTP Partition descriptor generator.""" - - ATTRS = dict( - size=None, - offset=None, - digest_offset=None, - hw_digest='', - sw_digest='', - secret='', - variant='buffer', - write_lock='wlock', - read_lock='rlock', - integrity='', - iskeymgr='', - iskeymgr_creator='', - iskeymgr_owner='', - wide='' - ) - - def __init__(self, otpmap: OtpMap): - self._log = getLogger('otptool.partdesc') - self._otpmap = otpmap - - def save(self, hjname: str, cfp: TextIO) -> None: - """Generate a C file with a static description for the partitions.""" - # pylint: disable=f-string-without-interpolation - attrs = {n: getattr(self, f'_convert_to_{k}') if k else lambda x: x - for n, k in self.ATTRS.items() if k is not None} - scriptname = basename(argv[0]) - print(f'/* Generated from {hjname} with {scriptname} */', file=cfp) - print(file=cfp) - print('/* clang-format off */', file=cfp) - print('/* NOLINTBEGIN */', file=cfp) - print('static const OtOTPPartDesc OtOTPPartDescs[] = {', file=cfp) - for part in self._otpmap.enumerate_partitions(): - print(f' [OTP_PART_{part.name}] = {{', file=cfp) - print(f' .size = {part.size}u,', file=cfp) - print(f' .offset = {part.offset}u,', file=cfp) - if part.digest_offset is not None: - print(f' .digest_offset = {part.digest_offset}u,', - file=cfp) - else: - print(f' .digest_offset = UINT16_MAX,', # noqa: F541 - file=cfp) - for attr in attrs: - value = getattr(part, attr, None) - if value is None: - continue - convs = attrs[attr](value) - if not isinstance(convs, list): - convs = [convs] - for conv in convs: - if isinstance(conv, tuple): - attr_name = conv[0] - attr_val = conv[1] - else: - attr_name = attr - attr_val = conv - if isinstance(attr_val, bool): - attr_val = str(attr_val).lower() - print(f' .{attr_name} = {attr_val},', file=cfp) - print(f' }},', file=cfp) # noqa: F541 - print('};', file=cfp) - print('', file=cfp) - print('#define OTP_PART_COUNT ARRAY_SIZE(OtOTPPartDescs)', file=cfp) - print(file=cfp) - print('/* NOLINTEND */', file=cfp) - print('/* clang-format on */', file=cfp) - # pylint: enable=f-string-without-interpolation - - @classmethod - def _convert_to_bool(cls, value) -> str: - return str(value).lower() - - @classmethod - def _convert_to_buffer(cls, value) -> Tuple[str, bool]: - return { - 'unbuffered': ('buffered', False), - 'buffered': ('buffered', True), - 'lifecycle': ('buffered', True), - }[value.lower()] - - @classmethod - def _convert_to_wlock(cls, value) -> bool: - return value == 'digest' - - @classmethod - def _convert_to_rlock(cls, value) -> List[Tuple[str, bool]]: - value = value.lower() - if value == 'csr': - return [('read_lock_csr', True), ('read_lock', True)] - if value == 'digest': - return 'read_lock', True - if value == 'none': - return 'read_lock', False - assert False, 'Unknown RLOCK type' - - -class OTPRegisterDef: - """OTP Partition register generator.""" - - def __init__(self, otpmap: OtpMap): - self._log = getLogger('otptool.reg') - self._otpmap = otpmap - - def save(self, hjname: str, cfp: TextIO) -> None: - """Generate a C file with register definition for the partitions.""" - reg_offsets = [] - reg_sizes = [] - part_names = [] - for part in self._otpmap.enumerate_partitions(): - part_names.append(f'OTP_PART_{part.name}') - offset = part.offset - reg_sizes.append((f'{part.name}_SIZE', part.size)) - for itname, itdict in part.items.items(): - size = itdict['size'] - if not itname.startswith(f'{part.name}_'): - name = f'{part.name}_{itname}'.upper() - else: - name = itname - reg_offsets.append((name, offset)) - reg_sizes.append((f'{name}_SIZE', size)) - offset += size - scriptname = basename(argv[0]) - print(f'/* Generated from {hjname} with {scriptname} */') - print(file=cfp) - print('/* clang-format off */', file=cfp) - for reg, off in reg_offsets: - print(f'REG32({reg}, {off}u)', file=cfp) - print(file=cfp) - regwidth = max(len(r[0]) for r in reg_sizes) - for reg, size in reg_sizes: - print(f'#define {reg:{regwidth}s} {size}u', file=cfp) - print(file=cfp) - pcount = len(part_names) - part_names.extend(( - '_OTP_PART_COUNT', - 'OTP_ENTRY_DAI = _OTP_PART_COUNT', - 'OTP_ENTRY_KDI', - '_OTP_ENTRY_COUNT')) - print('typedef enum {', file=cfp) - for pname in part_names: - print(f' {pname},', file=cfp) - print('} OtOTPPartitionType;', file=cfp) - print(file=cfp) - print('static const char *PART_NAMES[] = {', file=cfp) - for pname in part_names[:pcount]: - print(f' OTP_NAME_ENTRY({pname}),', file=cfp) - print('};', file=cfp) - print('/* clang-format on */', file=cfp) - print(file=cfp) - - -class OtpImage: - """QEMU 'RAW' OTP image.""" - - HEADER_FORMAT = { - 'magic': '4s', # "vOTP" - 'hlength': 'I', # count of header bytes after this point - 'version': 'I', # version of the header - 'eccbits': 'H', # count of ECC bits for each ECC granule - 'eccgran': 'H', # size in bytes of ECC granule - 'dlength': 'I', # count of data bytes (padded to 64-bit entries) - 'elength': 'I', # count of ecc bytes (padded to 64-bit entries) - } - - HEADER_FORMAT_V2_EXT = { - 'digiv': '8s', # Present digest scrambler IV - 'digfc': '16s', # Present digest scrambler finalization constant - } - - KINDS = { - 'OTP MEM': 'otp', - 'FUSEMAP': 'fuz', - } - - RE_VMEMLOC = r'(?i)^@((?:[0-9a-f]{2})+)\s((?:[0-9a-f]{2})+)$' - RE_VMEMDESC = r'(?i)^//\s?([\w\s]+) file with (\d+)[^\d]*(\d+)\s?bit layout' - - DEFAULT_ECC_BITS = 6 - - def __init__(self, ecc_bits: Optional[int] = None): - self._log = getLogger('otptool.img') - self._header: Dict[str, Any] = {} - self._magic = b'' - self._data = b'' - self._ecc = b'' - if ecc_bits is None: - ecc_bits = self.DEFAULT_ECC_BITS - self._ecc_bits = ecc_bits - self._ecc_bytes = (ecc_bits + 7) // 8 - self._ecc_granule = 0 - self._digest_iv: Optional[int] = None - self._digest_constant: Optional[int] = None - self._partitions: List[OtpPartition] = [] - - @property - def version(self) -> int: - """Provide the version of the RAW image.""" - return self._header.get('version', 0) - - @property - def loaded(self) -> int: - """Report whether data have been loaded into the image.""" - return len(self._data) > 0 - - @property - def is_opentitan(self) -> bool: - """Report whether the current image contains OpenTitan OTP data.""" - return self._magic == b'vOTP' - - @classproperty - def vmem_kinds(cls) -> List[str]: - """Reports the supported content kinds of VMEM files.""" - # pylint: disable=no-self-argument - return ['auto'] + list(cls.KINDS.values()) - - @classproperty - def logger(self): - """Return logger instance.""" - # pylint: disable=no-self-argument - return getLogger('otptool') - - def load_raw(self, rfp: BinaryIO) -> None: - """Load OTP image from a QEMU 'RAW' image stream.""" - header = self._load_header(rfp) - self._header = header - self._data = rfp.read(header['dlength']) - self._ecc = rfp.read(header['elength']) - if header['version'] > 1: - self._digest_iv = header['digiv'] - self._digest_constant = header['digfc'] - - def save_raw(self, rfp: BinaryIO) -> None: - """Save OTP image as a QEMU 'RAW' image stream.""" - header = self._build_header() - rfp.write(header) - self._pad(rfp) - rfp.write(self._data) - self._pad(rfp) - rfp.write(self._ecc) - self._pad(rfp, 4096) - - def load_vmem(self, vfp: TextIO, vmem_kind: Optional[str] = None, - swap: bool = True): - """Parse a VMEM '24' text stream.""" - data_buf: List[bytes] = [] - ecc_buf: List[bytes] = [] - last_addr = 0 - granule_sizes: Set[int] = set() - vkind: Optional[str] = None - row_count = 0 - byte_count = 0 - line_count = 0 - if vmem_kind: - vmem_kind = vmem_kind.lower() - if vmem_kind == 'auto': - vmem_kind = None - if vmem_kind and vmem_kind not in self.KINDS.values(): - raise ValueError(f"Unknown VMEM file kind '{vmem_kind}'") - for lno, line in enumerate(vfp, start=1): - if vkind is None: - kmo = re_match(self.RE_VMEMDESC, line) - if kmo: - vkind = kmo.group(1) - row_count = int(kmo.group(2)) - bits = int(kmo.group(3)) - byte_count = bits // 8 - continue - line = re_sub(r'//.*', '', line) - line = line.strip() - if not line: - continue - lmo = re_match(self.RE_VMEMLOC, line) - if not lmo: - self._log.error('Unexpected line @ %d: %s', lno, line) - continue - line_count += 1 - saddr, sdata = lmo.groups() - addr = int(saddr, 16) - if last_addr < addr: - self._log.info('Padding addr from 0x%04x to 0x%04x', - last_addr, addr) - data_buf.append(bytes(addr-last_addr)) - rdata = unhexlify(sdata) - if byte_count != len(rdata): - self._log.warning('Expected %d bytes @ line %s, found %d', - byte_count, lno, len(sdata)) - ecc, data = rdata[:self._ecc_bytes], rdata[self._ecc_bytes:] - if swap: - data = bytes(reversed(data)) - data_buf.append(data) - ecc_buf.append(ecc) - dlen = len(data) - granule_sizes.add(dlen) - last_addr = addr+dlen # ECC is not accounted for in address - self._data = b''.join(data_buf) - self._ecc = b''.join(ecc_buf) - if granule_sizes: - if len(granule_sizes) != 1: - raise ValueError('Variable data size') - self._ecc_granule = granule_sizes.pop() - if row_count and row_count != line_count: - self._log.error('Should have parsed %d lines, found %d', - row_count, line_count) - if not vkind: - if vmem_kind: - vkind = vmem_kind - else: - vkind = self.KINDS.get(vkind.upper(), None) - if vmem_kind: - if vkind and vkind != vmem_kind: - self._log.warning("Detected VMEM kind '%s' differs from " - "'%s'", vkind, vmem_kind) - # use user provided type, even if it is not the one detected - vkind = vmem_kind - if not vkind: - raise ValueError('Unable to detect VMEM find, please specify') - self._magic = f'v{vkind[:3].upper()}'.encode() - - def load_lifecycle(self, lcext: OtpLifecycleExtension) -> None: - """Load lifecyle values.""" - for part in self._partitions: - if part.name == 'LIFE_CYCLE': - part.set_decoder(lcext) - - # pylint: disable=invalid-name - def set_digest_iv(self, iv: int) -> None: - """Set the Present digest initialization 64-bit vector.""" - if iv >> 64: - raise ValueError('Invalid digest initialization vector') - self._digest_iv = iv - - def set_digest_constant(self, constant: int) -> None: - """Set the Present digest finalization 128-bit constant.""" - if constant >> 128: - raise ValueError('Invalid digest finalization constant') - self._digest_constant = constant - - @property - def has_present_constants(self) -> bool: - """Reports whether the Present scrambler constants are known/defined.""" - return self._digest_iv is not None and self._digest_constant is not None - - def dispatch(self, cfg: OtpMap) -> None: - """Dispatch RAW image data into the partitions.""" - bfp = BytesIO(self._data) - for part in cfg.enumerate_partitions(): - self._log.debug('%s %d', part.name, bfp.tell()) - part.load(bfp) - self._partitions.append(part) - # all data bytes should have been dispatched into the partitions - assert bfp.tell() == len(self._data), 'Unexpected remaining data bytes' - if self._header: - data_size = self._header.get('dlength', 0) - assert bfp.tell() == data_size, 'Unexpected remaining data bytes' - - def verify(self, show: bool = False) -> bool: - """Verify the partition digests, if any.""" - if any(c is None for c in (self._digest_iv, self._digest_constant)): - raise RuntimeError('Missing Present constants') - results: Dict[str, Optional[bool]] = {} - for part in self._partitions: - if not part.hw_digest: - continue - results[part.name] = part.verify(self._digest_iv, - self._digest_constant) - if show: - print('HW digests:') - width = max(len(x) for x in results) - for name, result in results.items(): - if result is None: - status = 'No digest' - elif result: - status = 'OK' - else: - status = 'Failed' - print(f' * {name:{width}s}: {status}') - # any partition with a defined digest should be valid - return not any(r is False for r in results.values()) - - def decode(self, decode: bool = True, wide: int = 0, - ofp: Optional[TextIO] = None) -> None: - """Decode the content of the image, one partition at a time.""" - version = self.version - if version: - print(f'OTP image v{version}') - if version > 1: - print(f' * present iv {self._digest_iv:016x}') - print(f' * present constant {self._digest_constant:032x}') - for part in self._partitions: - part.decode(decode, wide, ofp) - - def _load_header(self, bfp: BinaryIO) -> Dict[str, Any]: - hfmt = self.HEADER_FORMAT - fhfmt = ''.join(hfmt.values()) - # hlength is the length of header minus the two first items (T, L) - fhsize = scalc(fhfmt) - hdata = bfp.read(fhsize) - parts = sunpack(f'<{fhfmt}', hdata) - header = dict(zip(hfmt.keys(), parts)) - magics = set(f'v{k.upper()}'.encode() for k in self.KINDS.values()) - if header['magic'] not in magics: - raise ValueError(f'{bfp.name} is not a QEMU OTP RAW image') - self._magic = header['magic'] - version = header['version'] - if version > 2: - raise ValueError(f'{bfp.name} is not a valid QEMU OTP RAW image') - if version > 1: - hfmt = self.HEADER_FORMAT_V2_EXT - fhfmt = ''.join(hfmt.values()) - fhsize = scalc(fhfmt) - hdata = bfp.read(fhsize) - parts = sunpack(f'<{fhfmt}', hdata) - headerv2 = dict(zip(hfmt.keys(), - (HexInt(int.from_bytes(v, 'little')) - for v in parts))) - header.update(headerv2) - return header - - def _build_header(self) -> bytes: - assert self._magic, "File kind unknown" - hfmt = self.HEADER_FORMAT - # use V2 image format if Present scrambling constants are available, - # otherwise use V1 - use_v2 = bool(self._digest_iv) or bool(self._digest_constant) - if use_v2: - hfmt.update(self.HEADER_FORMAT_V2_EXT) - fhfmt = ''.join(hfmt.values()) - shfmt = ''.join(hfmt[k] for k in list(hfmt)[:2]) - # hlength is the length of header minus the two first items (T, L) - hlen = scalc(fhfmt)-scalc(shfmt) - dlen = (len(self._data)+7) & ~0x7 - elen = (len(self._ecc)+7) & ~0x7 - values = dict(magic=self._magic, hlength=hlen, version=1+int(use_v2), - eccbits=self._ecc_bits, eccgran=self._ecc_granule, - dlength=dlen, elength=elen) - if use_v2: - values['digiv'] = self._digest_iv.to_bytes(8, byteorder='little') - values['digfc'] = self._digest_constant.to_bytes(16, - byteorder='little') - args = [values[k] for k in hfmt] - header = spack(f'<{fhfmt}', *args) - return header - - def _pad(self, bfp: BinaryIO, padsize: Optional[int] = None): - if padsize is None: - padsize = OtpMap.BLOCK_SIZE - tail = bfp.tell() % padsize - if tail: - bfp.write(bytes(padsize-tail)) - - def main(): """Main routine""" debug = True @@ -995,7 +100,7 @@ def main(): if not args.otp_map: if args.generate_parts or args.generate_regs: - argparser.error('Generator require an OTP map') + argparser.error('Generator requires an OTP map') if args.show: argparser.error('Cannot decode OTP values without an OTP map') if args.digest: @@ -1012,11 +117,12 @@ def main(): if args.generate_parts: partdesc = OTPPartitionDesc(otpmap) - partdesc.save(basename(args.otp_map.name), output) + partdesc.save(basename(args.otp_map.name), basename(argv[0]), + output) if args.generate_regs: regdef = OTPRegisterDef(otpmap) - regdef.save(basename(args.otp_map.name), output) + regdef.save(basename(args.otp_map.name), basename(argv[0]), output) if args.generate_lc: if not lcext: From 57bee6f80f34551f3260b884d43cf98b12dd079e Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Thu, 11 Apr 2024 20:40:18 +0200 Subject: [PATCH 17/27] [ot] scripts/opentitan: create a new tool to access OTP controller over DM Signed-off-by: Emmanuel Blot --- docs/opentitan/dtm.md | 2 +- docs/opentitan/jtag-dm.md | 3 + docs/opentitan/otpdm.md | 135 +++++++++++ docs/opentitan/tools.md | 2 + scripts/opentitan/ot/dm/__init__.py | 2 +- scripts/opentitan/ot/dm/otp.py | 354 ++++++++++++++++++++++++++++ scripts/opentitan/otpdm.py | 205 ++++++++++++++++ 7 files changed, 701 insertions(+), 2 deletions(-) create mode 100644 docs/opentitan/otpdm.md create mode 100644 scripts/opentitan/ot/dm/otp.py create mode 100755 scripts/opentitan/otpdm.py diff --git a/docs/opentitan/dtm.md b/docs/opentitan/dtm.md index 5eedd734271ce..b1612956a7e44 100644 --- a/docs/opentitan/dtm.md +++ b/docs/opentitan/dtm.md @@ -80,7 +80,7 @@ Extras: * `-P` specify the TCP port of the JTAG server in the QEMU VM, should match the port part of `-jtag` option for invoking QEMU. -* `-Q` to not send QEMU a request for termination when this script exits. +* `-Q` do not send QEMU a request for termination when this script exits. * `-s` specify the number of bytes to read from or write to memory. Useful with the `--mem` option. See also the `--address` option. This option may be omitted for the `write` memory operation, in diff --git a/docs/opentitan/jtag-dm.md b/docs/opentitan/jtag-dm.md index 26990d51d518d..f2f94676dac5e 100644 --- a/docs/opentitan/jtag-dm.md +++ b/docs/opentitan/jtag-dm.md @@ -126,3 +126,6 @@ JTAG/DTM/DM stack. A demo application is available from [`scripts/opentitan/dtm.py`](dtm.md) that can report basic information about this stack and demonstrate how to use the Debug Module to access the Ibex core. + +The [`scripts/opentitan/otpdm.py`](otpdm.md) also use the same stack to access the cells of the OTP +controller. diff --git a/docs/opentitan/otpdm.md b/docs/opentitan/otpdm.md new file mode 100644 index 0000000000000..9d0502172e485 --- /dev/null +++ b/docs/opentitan/otpdm.md @@ -0,0 +1,135 @@ +# `dtm.py` + +`otpdm.py` gives access to the OTP Controller through the JTAG/DTM/DM interface. + +## Usage + +````text +usage: otpdm.py [-h] [-H HOST] [-P PORT] [-Q] [-l IR_LENGTH] [-b BASE] [-j HJSON] [-a ADDRESS] + [-p PARTITION] [-i ITEM] [-L] [-r] [-w WRITE] [-D] [-v] [-d] + +OTP controller access through the RISC-V Debug Module + +options: + -h, --help show this help message and exit + +Virtual machine: + -H HOST, --host HOST JTAG host (default: localhost) + -P PORT, --port PORT JTAG port, default: 3335 + -Q, --no-quit do not ask the QEMU to quit on exit + +DMI: + -l IR_LENGTH, --ir-length IR_LENGTH + bit length of the IR register + -b BASE, --base BASE define DMI base address + +OTP: + -j HJSON, --otp-map HJSON + input OTP controller memory map file + -a ADDRESS, --address ADDRESS + base address the OTP controller, default: 0x30130000 + -p PARTITION, --partition PARTITION + select a partition + -i ITEM, --item ITEM select a partition item + -L, --list list the partitions and/or the items + -r, --read read the value of the selected item + -w WRITE, --write WRITE + write the value to the selected item + -D, --digest show the OTP HW partition digests + +Extras: + -v, --verbose increase verbosity + -d, --debug enable debug mode```` +```` + +### Arguments + +* `-a` specify an alternative address for the OTP controller on the OT bus. + +* `-b` specify the DMI base address for the RISC-V Debug Module. + +* `-D` show partition digest(s). If no parition is selected (see option `-p`), the digest of all + partitions are shown; otherwise the digest of the selected partition is shown. Requires + option `-j`. + +* `-d` only useful to debug the script, reports any Python traceback to the standard error stream. + +* `-H` specify the address of the QEMU VM. + +* `-i` select a specific item from a partition. See option `-L` to get a list of valid item names + for the currently selected partition. Requires `-p` option. + +* `-j` specify the path to the OpenTitan OTP controller map, _e.g._ `otp_ctrl_mmap.hjson`. + +* `-L` list the names of the partitions if no partition is selected (see option `-p`). If option + `-p` is used, list the names of the selected partition items if no item is selected (see + option `-i`). If option `-i` is used, show all the properties of the selected item. + +* `-l` specify the length of the TAP instruction register length. + +* `-P` specify the TCP port of the JTAG server in the QEMU VM, should match the port part of `-jtag` + option for invoking QEMU. + +* `-p` select a partition using its name. See option `-L` to get a list of valid partition names. + Requires option `-j`. + +* `-Q` do not send QEMU a request for termination when this script exits. + +* `-r` load and show the value of the selected item. Requires options `-p` and `-i`. + +* `-v` can be repeated to increase verbosity of the script, mostly for debug purpose. + +* `-w` store the value into the selected item. Requires options `-p` and `-i`. The value should be + specifed as an hexadecimal or decimal integer for item whose size is less or equal to 8 + bytes. For larger items the value should be specified as a sequence of hexa-encoded bytes. + +### Examples + +Running QEMU VM with the `-jtag tcp::3335` option: + +* List all supported partitions + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -L + ```` + +* List all supported items of a partition + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG1 -L + ```` + +* List all properties of an item + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG1 -i EN_SRAM_IFETCH -L + ```` + +* Show all digests + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -D + ```` + +* Show the digest of a single partition (parsable output) + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG1 -D + ```` + +* Read the value of an item along with the item properties + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG1 -i EN_SRAM_IFETCH -r + ```` + +* Read the value of an item along (parsable output) + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG1 -i EN_SRAM_IFETCH -r + ```` + +* Write the value of an integer item + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG1 -i EN_SRAM_IFETCH -w 0xff + ```` + +* Write the value of a long item + ````sh + ./scripts/opentitan/otpdm.py -j .../otp_ctrl_mmap.hjson -p HW_CFG0 -i DEVICE_ID \ + -w 4c6f72656d20697073756d20646f6c6f722073697420616d65742c20636f6e73 + ```` + diff --git a/docs/opentitan/tools.md b/docs/opentitan/tools.md index 5cefc1b7dbd7f..b2ecdfadc3b72 100644 --- a/docs/opentitan/tools.md +++ b/docs/opentitan/tools.md @@ -17,6 +17,8 @@ directory to help with these tasks. ## Companion file management +* [`otpdm.py`](otpdm.md) can be used to access the OTP Controller over a JTAG/DTM/DM link. It reads + out partition's item values and can update those items. * [`otptool.py`](otptool.md) can be used to generate an OTP image from a OTP VMEM file and can be used to decode (some of the) encoded data in the OTP image. * [`flashgen.py`](flashgen.md) can be used to generate a flash image with either a ROM_EXT and BL0 diff --git a/scripts/opentitan/ot/dm/__init__.py b/scripts/opentitan/ot/dm/__init__.py index 7dc6f558a1619..04979ed7b3437 100644 --- a/scripts/opentitan/ot/dm/__init__.py +++ b/scripts/opentitan/ot/dm/__init__.py @@ -6,4 +6,4 @@ :author: Emmanuel Blot """ -from .dm import DebugModule +from .dm import DebugModule # noqa: F401 diff --git a/scripts/opentitan/ot/dm/otp.py b/scripts/opentitan/ot/dm/otp.py new file mode 100644 index 0000000000000..b9339e2c56cec --- /dev/null +++ b/scripts/opentitan/ot/dm/otp.py @@ -0,0 +1,354 @@ +# Copyright (c) 2024 Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""One-Time Programmable controller. + + :author: Emmanuel Blot +""" + +from binascii import unhexlify +from enum import IntEnum +from logging import getLogger +from time import sleep, time as now +from typing import Dict, Optional, Tuple + +from .dm import DebugModule +from ..bitfield import BitField +from ..otp import OtpMap, OtpPartition +from ..util.misc import HexInt + +# pylint: disable=missing-function-docstring + + +class OTPException(Exception): + """Base exception.""" + + +class OTPBusyError(OTPException): + """OTP controller is busy.""" + + +class OTPError(OTPException): + """OTP controller error.""" + + def __init__(self, err_code: 'OTPController.ERR_CODE'): + super().__init__(f'{err_code!r}') + self.err_code = err_code + + +class OTPController: + """ + Only support Darjeeling variant of the controller + Only support the Direct Access Interface for now + """ + + TOKENS = [ + 'invalid', + 'zero', + 'raw_unlock', + 'test_unlock', + 'test_exit', + 'rma', + ] + + REGISTERS = { + 'status': 0x10, + 'err_code': 0x14, + 'regwen': 0x74, + 'cmd': 0x78, + 'address': 0x7c, + 'wdata_0': 0x80, + 'wdata_1': 0x84, + 'rdata_0': 0x88, + 'rdata_1': 0x8c, + } + """Darjeeling registers""" + + ERR_CODE = IntEnum('err_code', + ['none', 'macro', 'macro_ecc_corr', 'macro_ecc_uncorr', + 'macro_write_blank', 'access', 'check_fail', + 'fsm_state'], start=0) + + ERROR_COUNT = 24 + + BITFIELDS = dict( + STATUS=BitField({ + 'vendor_test_error': (0, 1), + 'creator_sw_cfg_error': (1, 1), + 'owner_sw_cfg_error': (2, 1), + 'ownership_slot_state_error': (3, 1), + 'rot_creator_auth_error': (4, 1), + 'rot_owner_auth_slot0_error': (5, 1), + 'rot_owner_auth_slot1_error': (6, 1), + 'plat_integ_auth_slot0_error': (7, 1), + 'plat_integ_auth_slot1_error': (8, 1), + 'plat_owner_auth_slot0_error': (9, 1), + 'plat_owner_auth_slot1_error': (10, 1), + 'plat_owner_auth_slot2_error': (11, 1), + 'plat_owner_auth_slot3_error': (12, 1), + 'ext_nvm_error': (13, 1), + 'rom_patch_error': (14, 1), + 'hw_cfg0_error': (15, 1), + 'hw_cfg1_error': (16, 1), + 'secret0_error': (17, 1), + 'secret1_error': (18, 1), + 'secret2_error': (19, 1), + 'secret3_error': (20, 1), + 'life_cycle_error': (21, 1), + 'dai_error': (22, 1), + 'lci_error': (23, 1), + 'timeout_error': (24, 1), + 'lfsr_fsm_error': (25, 1), + 'scrambling_fsm_error': (26, 1), + 'key_deriv_fsm_error': (27, 1), + 'bus_integ_error': (28, 1), + 'dai_idle': (29, 1), + 'check_pending': (30, 1), + }), + REGWEN=BitField({ + 'en': (0, 1), + }), + CMD=BitField({ + 'rd': (0, 1), + 'wr': (1, 1), + 'digest': (2, 1) + }), + ADDRESS=BitField({ + 'address': (0, 14), + }), + ERR_CODE=BitField({ + 'err_code': (0, 3, ERR_CODE), + }) + ) + + def __init__(self, dbgmod: DebugModule, base: int): + self._log = getLogger('dtm.otp') + self._dm = dbgmod + self._base = base + self._max_addr = self.BITFIELDS['ADDRESS'].encode(address=-1) + self._map: Optional[OtpMap] = None + self._partitions: Dict[str, OtpPartition] = {} + self._item_offsets: Dict[str, # partition name + Dict[str, # item name + Tuple[int, # offset + int]]] = {} # size + + def set_map(self, otpmap: 'OtpMap'): + self._map = otpmap + self._partitions = {p.name: p for p in self._map.enumerate_partitions()} + self._fill_item_offsets() + + def get_hw_partition_digest(self, partname: str) -> int: + if not self._map: + raise RuntimeError('Partition map not loaded') + try: + part = self._partitions[partname.upper()] + except KeyError as exc: + raise ValueError(f"No such partition '{partname}'") from exc + if not part.hw_digest: + raise ValueError(f"No HW digest in partition '{partname}'") + self._dai_prepare(part.digest_offset) + self._dai_execute_command('read') + digest = self._dai_read64() + self._log.info('%s HW digest %016x', partname, digest) + return digest + + def get_hw_partition_digests(self) -> Dict[str, int]: + digests: Dict[str, int] = {} + for name, part in self._partitions.items(): + if not part.hw_digest: + continue + digests[name] = self.get_hw_partition_digest(name) + return digests + + @classmethod + def is_wide_granule(cls, partition: OtpPartition, offset: int) -> bool: + return partition.secret or (partition.digest_offset == offset & ~0b111) + + def read_partition_item(self, partname: str, itemname: str) \ + -> [int | bytes]: + pname = partname.upper() + try: + part = self._partitions[pname] + items = self._item_offsets[pname] + except KeyError as exc: + raise ValueError(f"No such partition '{partname}'") from exc + try: + ioffset, size = items[itemname.upper()] + except KeyError as exc: + raise ValueError(f"No such item '{itemname}' in partition " + f"'{partname}'") from exc + if size == 4: + self._dai_execute_read(part.offset + ioffset) + return self._dai_read32() + wide_granule = self.is_wide_granule(part, ioffset) + if wide_granule and size == 8: + self._dai_execute_read(part.offset + ioffset) + return self._dai_read64() + buffer = bytearray() + pos = 0 + while size > 0: + self._dai_execute_read(part.offset + ioffset + pos) + if wide_granule and size >= 8: + val = self._dai_read64() + buffer.extend(val.to_bytes(8, 'little')) + pos += 8 + size -= 8 + continue + if size >= 4: + val = self._dai_read32() + buffer.extend(val.to_bytes(4, 'little')) + pos += 4 + size -= 4 + continue + raise RuntimeError('Invalid item size') + return bytes(buffer) + + def write_partition_item(self, partname: str, itemname: str, + value: [int | bytes | bytearray | str]) -> None: + pname = partname.upper() + try: + part = self._partitions[pname] + items = self._item_offsets[pname] + except KeyError as exc: + raise ValueError(f"No such partition '{partname}'") from exc + try: + ioffset, size = items[itemname.upper()] + except KeyError as exc: + raise ValueError(f"No such item '{itemname}' in partition " + f"'{partname}'") from exc + if isinstance(value, int): + if size not in (4, 8): + raise ValueError(f'{itemname} expects a {size}-byte long value') + elif size in (4, 8): + value = HexInt.parse(value) + else: + if isinstance(value, str): + try: + value = unhexlify(value) + except ValueError as exc: + raise ValueError(f'Invalid hexa string: {exc}') from exc + if isinstance(value, (bytes, bytearray)): + vlen = len(value) + if size != vlen: + raise ValueError(f'{itemname} expects a {size}-byte long ' + f'value, value is {vlen}-byte long') + else: + raise TypeError(f'Invalid value type for {itemname}') + if size == 4: + self._dai_prepare(part.offset + ioffset) + self._dai_write32(value) + self._dai_execute_write() + return + wide_granule = self.is_wide_granule(part, ioffset) + if wide_granule and size == 8: + self._dai_prepare(part.offset + ioffset) + self._dai_write64(value) + self._dai_execute_write() + return + pos = 0 + while size > 0: + self._dai_prepare(part.offset + ioffset + pos) + if wide_granule and size >= 8: + val = int.from_bytes(value[pos:pos+8], 'little') + self._dai_write64(val) + self._dai_execute_write() + pos += 8 + size -= 8 + continue + if size >= 4: + val = int.from_bytes(value[pos:pos+4], 'little') + self._dai_write32(val) + self._dai_execute_write() + pos += 4 + size -= 4 + continue + raise RuntimeError('Invalid item size') + + def _dai_execute_read(self, offset: int) -> int: + self._dai_prepare(offset) + self._dai_execute_command('read') + self._dai_wait_completion() + + def _dai_execute_write(self) -> int: + self._dai_execute_command('write') + self._dai_wait_completion() + + def _dai_prepare(self, offset: int) -> int: + self._expect_dai_idle() + assert not self._is_dai_busy(), "DAI busy" + self._dai_set_address(offset) + + def _dai_wait_completion(self): + self._expect_dai_idle() + assert not self._is_dai_busy(), "DAI busy" + dai_error = self._get_dai_error() + if dai_error != self.ERR_CODE.none: + raise OTPError(dai_error) + if not self._is_dai_regwen(): + raise OTPBusyError() + + def _dai_execute_command(self, command: str) -> None: + if not self._is_dai_regwen(): + raise OTPBusyError() + try: + cmd = {'read': 'rd', 'write': 'wr', 'digest': 'digest'}[command] + except KeyError as exc: + raise ValueError(f'Unsupported command {command}') from exc + cmdargs = {cmd: True} + val = self.BITFIELDS['CMD'].encode(**cmdargs) + self._dm.write32(self._base + self.REGISTERS['cmd'], val) + + def _dai_set_address(self, address: int) -> None: + if address > self._max_addr: + raise ValueError(f'Invalid OTP address {address}') + if not self._is_dai_regwen(): + raise OTPBusyError() + self._dm.write32(self._base + self.REGISTERS['address'], address) + + def _dai_read32(self) -> int: + return self._dm.read32(self._base + self.REGISTERS['rdata_0']) + + def _dai_read64(self) -> int: + return self._dm.read64(self._base + self.REGISTERS['rdata_0']) + + def _dai_write32(self, val: int) -> None: + self._dm.write32(self._base + self.REGISTERS['wdata_0'], val) + + def _dai_write64(self, val: int) -> None: + self._dm.write64(self._base + self.REGISTERS['wdata_0'], val) + + def _is_dai_regwen(self) -> bool: + val = self._dm.read32(self._base + self.REGISTERS['regwen']) + return self.BITFIELDS['REGWEN'].decode(val)['en'] + + def _is_dai_busy(self) -> bool: + val = self._dm.read32(self._base + self.REGISTERS['status']) + return not self.BITFIELDS['STATUS'].decode(val)['dai_idle'] + + def _get_dai_error(self) -> 'OTPController.ERR_CODE': + return self._get_error_code(self.ERROR_COUNT-2) + + def _get_error_code(self, slot: int) -> 'OTPController.ERR_CODE': + if slot >= self.ERROR_COUNT: + raise ValueError(f'Invalid slot {slot}') + val = self._dm.read32(self._base + self.REGISTERS['err_code'] + + 4 * slot) + return self.BITFIELDS['ERR_CODE'].decode(val)['err_code'] + + def _expect_dai_idle(self, timeout: float = 0.5): + timeout += now() + while now() < timeout: + if self._is_dai_regwen(): + return + sleep(0.05) + raise TimeoutError('DAI stalled') + + def _fill_item_offsets(self) -> None: + for pname, part in self._partitions.items(): + items = self._item_offsets[pname] = {} + offset = 0 + for iname, item in part.items.items(): + size = item['size'] + items[iname] = offset, size + offset += size diff --git a/scripts/opentitan/otpdm.py b/scripts/opentitan/otpdm.py new file mode 100755 index 0000000000000..d790806f39455 --- /dev/null +++ b/scripts/opentitan/otpdm.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2024, Rivos, Inc. +# SPDX-License-Identifier: Apache2 + +"""OTP controller access through the RISC-V Debug Module. + + :author: Emmanuel Blot +""" + +from argparse import ArgumentParser, Namespace, FileType +from binascii import hexlify +from io import StringIO +from os import linesep +from os.path import dirname, join as joinpath, normpath +from socket import create_connection +from traceback import format_exc +from typing import Optional +import sys + +# pylint: disable=wrong-import-position +# pylint: disable=wrong-import-order +# pylint: disable=import-error + +# JTAG module is available from the scripts/ directory +sys.path.append(joinpath(normpath(dirname(dirname(sys.argv[0]))))) + +from ot.util.log import configure_loggers # noqa: E402 +from ot.util.misc import HexInt, dump_buffer # noqa: E402 +from ot.dtm import DebugTransportModule # noqa: E402 +from ot.dm import DebugModule # noqa: E402 +from ot.dm.otp import OTPController # noqa: E402 +from ot.otp import OtpMap # noqa: E402 +from jtag.bitbang import JtagBitbangController # noqa: E402 +from jtag.jtag import JtagEngine # noqa: E402 + + +DEFAULT_OTP_BASE_ADDRESS = 0x30130000 +"""Default base address of the OTP controller on the OT local crossbar.""" + +def main(): + """Entry point.""" + debug = True + try: + args: Optional[Namespace] = None + argparser = ArgumentParser( + description=sys.modules[__name__].__doc__.split('.')[0]) + qvm = argparser.add_argument_group(title='Virtual machine') + qvm.add_argument('-H', '--host', default='127.0.0.1', + help='JTAG host (default: localhost)') + qvm.add_argument('-P', '--port', type=int, + default=JtagBitbangController.DEFAULT_PORT, + help=f'JTAG port, ' + f'default: {JtagBitbangController.DEFAULT_PORT}') + qvm.add_argument('-Q', '--no-quit', action='store_true', default=False, + help='do not ask the QEMU to quit on exit') + dmi = argparser.add_argument_group(title='DMI') + dmi.add_argument('-l', '--ir-length', type=int, default=5, + help='bit length of the IR register') + dmi.add_argument('-b', '--base', type=HexInt.parse, default=0, + help='define DMI base address') + otp = argparser.add_argument_group(title='OTP') + otp.add_argument('-j', '--otp-map', type=FileType('rt'), + metavar='HJSON', + help='input OTP controller memory map file') + otp.add_argument('-a', '--address', type=HexInt.parse, + default=DEFAULT_OTP_BASE_ADDRESS, + help=f'base address the OTP controller, default: ' + f'0x{DEFAULT_OTP_BASE_ADDRESS:08x}') + otp.add_argument('-p', '--partition', + help='select a partition') + otp.add_argument('-i', '--item', + help='select a partition item') + otp.add_argument('-L', '--list', action='store_true', + help='list the partitions and/or the items') + otp.add_argument('-r', '--read', action='store_true', + help='read the value of the selected item') + otp.add_argument('-w', '--write', + help='write the value to the selected item') + otp.add_argument('-D', '--digest', action='store_true', + help='show the OTP HW partition digests') + extra = argparser.add_argument_group(title='Extras') + extra.add_argument('-v', '--verbose', action='count', + help='increase verbosity') + extra.add_argument('-d', '--debug', action='store_true', + help='enable debug mode') + + args = argparser.parse_args() + debug = args.debug + + configure_loggers(args.verbose, 'dtm.rvdm', 'dtm.otp', -1, 'dtm', + 'jtag') + + sock = create_connection((args.host, args.port), timeout=0.5) + sock.settimeout(0.1) + + if args.otp_map: + otpmap = OtpMap() + otpmap.load(args.otp_map) + args.otp_map.close() + else: + if args.digest: + argparser.error('Digest feature requires an OTP map') + otpmap = None + ctrl = JtagBitbangController(sock) + eng = JtagEngine(ctrl) + ctrl.tap_reset(True) + ir_length = args.ir_length + dtm = DebugTransportModule(eng, ir_length) + rvdm = DebugModule(dtm, args.base) + try: + rvdm.initialize() + rvdm.halt() + otp = OTPController(rvdm, base=0x30130000) + partition = None + item = None + if otpmap: + otp.set_map(otpmap) + elif any((getattr(args, a) for a in + ('partition', 'item', 'list', 'read', 'write', 'digest'))): + argparser.error('OTP map is required for this operation') + if args.partition: + partname = args.partition.upper() + try: + partition = otpmap.partitions[partname] + except KeyError: + argparser.error(f"Unknown partition '{args.partition}'") + else: + partition = None + partname = None + if args.item: + if not partition: + argparser.error('Missing partition for selecting item') + item_name = args.item.upper() + item = None + for entry in partition['items']: + if entry.get('name', '') == item_name: + item = entry + break + else: + argparser.error(f"Unknown item '{args.item}") + else: + item = None + if args.digest: + if partition is None: + print('HW digests:') + for name, digest in otp.get_hw_partition_digests().items(): + print(f' * {name:10} 0x{digest:016x}') + else: + digest = otp.get_hw_partition_digest(partname) + print(f'0x{digest:016x}') + if args.list: + if partition: + if not item: + print('Items:') + for item in partition.get('items', []): + name = item['name'] + print(f' * {name}') + else: + print(item['name']) + for name, value in item.items(): + if name == 'name': + continue + print(f' * {name}: {value}') + else: + print('Partitions:') + for part in otpmap.enumerate_partitions(): + print(f' * {part.name}') + if args.read or args.write: + if not partition or not partname: + argparser.error('Partition is required for this operation') + if not item: + argparser.error('Item is required for this operation') + if args.read: + val = otp.read_partition_item(partname, item['name']) + if isinstance(val, int): + pad = ' * value: ' if args.list else '' + print(f'{pad}0x{val:x}') + else: + if args.list: + out = StringIO() + dump_buffer(val, file=out) + print(' * value: |') + for line in out.getvalue().split('\n'): + print(f' {line}') + else: + print(hexlify(val).decode()) + if args.write: + otp.write_partition_item(partname, item['name'], args.write) + finally: + if not args.no_quit: + ctrl.quit() + + # pylint: disable=broad-except + except Exception as exc: + print(f'{linesep}Error: {exc}', file=sys.stderr) + if debug: + print(format_exc(chain=False), file=sys.stderr) + sys.exit(1) + except KeyboardInterrupt: + sys.exit(2) + + +if __name__ == '__main__': + main() From e23d08fd0ab6575258a9c5e5792f40c663108f57 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Mon, 15 Apr 2024 15:57:09 +0200 Subject: [PATCH 18/27] [ot] hw/opentitan: ot_rom_ctrl: add a trace when no ROM image is defined Signed-off-by: Emmanuel Blot --- hw/opentitan/ot_rom_ctrl.c | 1 + hw/opentitan/trace-events | 1 + 2 files changed, 2 insertions(+) diff --git a/hw/opentitan/ot_rom_ctrl.c b/hw/opentitan/ot_rom_ctrl.c index d5e76832096af..3d429e357752b 100644 --- a/hw/opentitan/ot_rom_ctrl.c +++ b/hw/opentitan/ot_rom_ctrl.c @@ -225,6 +225,7 @@ static void ot_rom_ctrl_load_rom(OtRomCtrlState *s) /* try to find our ROM image object */ obj = object_resolve_path_component(object_get_objects_root(), s->ot_id); if (!obj) { + trace_ot_rom_ctrl_load_rom_no_image(s->ot_id); return; } rom_img = (OtRomImg *)object_dynamic_cast(obj, TYPE_OT_ROM_IMG); diff --git a/hw/opentitan/trace-events b/hw/opentitan/trace-events index c9bb92e8e6485..6fc6cab4b3e4b 100644 --- a/hw/opentitan/trace-events +++ b/hw/opentitan/trace-events @@ -304,6 +304,7 @@ ot_rom_ctrl_mem_read_out(const char *id, uint32_t addr, uint32_t val, uint32_t p ot_rom_ctrl_mem_write(const char *id, uint32_t addr, uint32_t val, uint32_t pc) "%s: addr=0x%04x, val=0x%x, pc=0x%x" ot_rom_ctrl_mem_accepts(const char *id, uint32_t addr, bool is_write, uint32_t pc) "%s: addr=0x%04x, is_write=%u, pc=0x%x" ot_rom_ctrl_reset(const char *id, const char *phase) "%s: %s" +ot_rom_ctrl_load_rom_no_image(const char *id) "%s: ROM image not defined" # ot_rstmgr.c From 2390d7912592f95c651bdfb9ff1457a19c08a8e2 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 16 Apr 2024 09:51:20 +0200 Subject: [PATCH 19/27] [ot] hw/opentitan: ot_rom_ctrl: reduce trace messages for valid requests Signed-off-by: Emmanuel Blot --- hw/opentitan/ot_rom_ctrl.c | 10 +++++++--- hw/opentitan/trace-events | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/hw/opentitan/ot_rom_ctrl.c b/hw/opentitan/ot_rom_ctrl.c index 3d429e357752b..74d22ab4a4f64 100644 --- a/hw/opentitan/ot_rom_ctrl.c +++ b/hw/opentitan/ot_rom_ctrl.c @@ -399,8 +399,6 @@ static bool ot_rom_ctrl_mem_accepts(void *opaque, hwaddr addr, unsigned size, (void)attrs; uint32_t pc = ibex_get_current_pc(); - trace_ot_rom_ctrl_mem_accepts(s->ot_id, (uint32_t)addr, is_write, pc); - if (!is_write) { /* * only allow reads during first reset (after complete check, MR gets @@ -409,7 +407,13 @@ static bool ot_rom_ctrl_mem_accepts(void *opaque, hwaddr addr, unsigned size, return s->first_reset; } - return ((addr + size) <= s->size && s->first_reset); + bool accept = ((addr + size) <= s->size && s->first_reset); + + if (!accept) { + trace_ot_rom_ctrl_mem_rejects(s->ot_id, (uint32_t)addr, is_write, pc); + } + + return accept; } static void ot_rom_ctrl_send_kmac_req(OtRomCtrlState *s) diff --git a/hw/opentitan/trace-events b/hw/opentitan/trace-events index 6fc6cab4b3e4b..eaec0056201c3 100644 --- a/hw/opentitan/trace-events +++ b/hw/opentitan/trace-events @@ -302,7 +302,7 @@ ot_rom_ctrl_io_read_out(const char *id, uint32_t addr, const char * regname, uin ot_rom_ctrl_io_write(const char *id, uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "%s: addr=0x%02x (%s), val=0x%x, pc=0x%x" ot_rom_ctrl_mem_read_out(const char *id, uint32_t addr, uint32_t val, uint32_t pc) "%s: addr=0x%04x, val=0x%x, pc=0x%x" ot_rom_ctrl_mem_write(const char *id, uint32_t addr, uint32_t val, uint32_t pc) "%s: addr=0x%04x, val=0x%x, pc=0x%x" -ot_rom_ctrl_mem_accepts(const char *id, uint32_t addr, bool is_write, uint32_t pc) "%s: addr=0x%04x, is_write=%u, pc=0x%x" +ot_rom_ctrl_mem_rejects(const char *id, uint32_t addr, bool is_write, uint32_t pc) "%s: addr=0x%04x, is_write=%u, pc=0x%x" ot_rom_ctrl_reset(const char *id, const char *phase) "%s: %s" ot_rom_ctrl_load_rom_no_image(const char *id) "%s: ROM image not defined" From dd976f527fdace2e7db5b68de6b1e471e597f949 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 16 Apr 2024 09:51:55 +0200 Subject: [PATCH 20/27] [ot] hw/opentitan: ot_aon_timer: add a trace when setting watchdog Signed-off-by: Emmanuel Blot --- hw/opentitan/ot_aon_timer.c | 1 + hw/opentitan/trace-events | 1 + 2 files changed, 2 insertions(+) diff --git a/hw/opentitan/ot_aon_timer.c b/hw/opentitan/ot_aon_timer.c index 32c42127c4b8f..c4ba28e82c75c 100644 --- a/hw/opentitan/ot_aon_timer.c +++ b/hw/opentitan/ot_aon_timer.c @@ -275,6 +275,7 @@ static void ot_aon_timer_rearm_wdog(OtAonTimerState *s, bool reset_origin) } else { int64_t delta = ot_aon_timer_ticks_to_ns(s, 0u, threshold - count); int64_t next = ot_aon_timer_compute_next_timeout(s, now, delta); + trace_ot_aon_timer_set_wdog(now, next); timer_mod(s->wdog_timer, next); } diff --git a/hw/opentitan/trace-events b/hw/opentitan/trace-events index eaec0056201c3..a7fa8d9b6f676 100644 --- a/hw/opentitan/trace-events +++ b/hw/opentitan/trace-events @@ -23,6 +23,7 @@ ot_alert_io_write(uint32_t addr, uint32_t val, uint32_t pc) "addr=0x%02x, val=0x ot_aon_timer_irqs(bool wakeup, bool bark, bool bite) "wkup:%u bark:%u bite:%u" ot_aon_timer_read_out(uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "addr=0x%02x (%s), val=0x%x, pc=0x%x" +ot_aon_timer_set_wdog(int64_t now, int64_t next) "now %" PRId64 ", next %" PRId64 ot_aon_timer_write(uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "addr=0x%02x (%s), val=0x%x, pc=0x%x" # ot_ast.c From 0bb0f1e0a69d3bb46d1ff538eaf2d698cf19faf9 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Mon, 15 Apr 2024 20:16:41 +0200 Subject: [PATCH 21/27] [ot] hw/opentitan: ot_ast_dj: fix virtual clock type Signed-off-by: Emmanuel Blot --- hw/opentitan/ot_ast_dj.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/opentitan/ot_ast_dj.c b/hw/opentitan/ot_ast_dj.c index 5f067e6332c51..86d43cfd86aa6 100644 --- a/hw/opentitan/ot_ast_dj.c +++ b/hw/opentitan/ot_ast_dj.c @@ -181,7 +181,7 @@ static int ot_ast_dj_get_random(OtRandomSrcIf *dev, int genid, } else { /* computed delay fits into a 31-bit value */ wait_ns = (int)(timer_expire_time_ns(s->random.timer) - - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT)); + qemu_clock_get_ns(OT_VIRTUAL_CLOCK)); } return wait_ns; } From 54f8fbc2537d930bea1ac1d838c09ae4f4145164 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 16 Apr 2024 09:54:58 +0200 Subject: [PATCH 22/27] [ot] hw/opentitan: ot_common: use VIRTUAL timer, not VIRTUAL_RT VIRTUAL timer is paced with icount setting; VIRTUAL_RT is not. This means that as the higher icount, the less instruction executed in a given time slot, VIRTUAL_RT makes HW timer relatively much faster from the guest standpoint. Signed-off-by: Emmanuel Blot --- include/hw/opentitan/ot_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/hw/opentitan/ot_common.h b/include/hw/opentitan/ot_common.h index 3f62043359375..ec00a88442c88 100644 --- a/include/hw/opentitan/ot_common.h +++ b/include/hw/opentitan/ot_common.h @@ -32,7 +32,7 @@ /* ------------------------------------------------------------------------ */ /* QEMU virtual timer to use for OpenTitan devices */ -#define OT_VIRTUAL_CLOCK QEMU_CLOCK_VIRTUAL_RT +#define OT_VIRTUAL_CLOCK QEMU_CLOCK_VIRTUAL /* ------------------------------------------------------------------------ */ /* Multi-bit boolean values */ From b36e0ebdb63685a6feec05a43c55a1201626f800 Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 16 Apr 2024 16:36:44 +0200 Subject: [PATCH 23/27] [ot] hw/opentitan: ot_timer: add OT identifier and update trace name Signed-off-by: Emmanuel Blot --- hw/opentitan/ot_timer.c | 22 +++++++++++++--------- hw/opentitan/trace-events | 5 +++-- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/hw/opentitan/ot_timer.c b/hw/opentitan/ot_timer.c index ae6a98cc4f20f..76ab0242642ab 100644 --- a/hw/opentitan/ot_timer.c +++ b/hw/opentitan/ot_timer.c @@ -82,18 +82,17 @@ static const char *REG_NAMES[REGS_COUNT] = { struct OtTimerState { SysBusDevice parent_obj; - QEMUTimer *timer; - MemoryRegion mmio; + IbexIRQ m_timer_irq; + IbexIRQ irq; + IbexIRQ alert; + QEMUTimer *timer; uint32_t regs[REGS_COUNT]; - uint32_t pclk; - int64_t origin_ns; - IbexIRQ m_timer_irq; - IbexIRQ irq; - IbexIRQ alert; + char *ot_id; + uint32_t pclk; }; static uint64_t ot_timer_ns_to_ticks(OtTimerState *s, int64_t ns) @@ -155,6 +154,9 @@ static void ot_timer_update_irqs(OtTimerState *s) { bool level = s->regs[R_INTR_STATE0] & s->regs[R_INTR_ENABLE0] & INTR_CMP0_MASK; + if (level != (bool)ibex_irq_get_level(&s->m_timer_irq)) { + trace_ot_timer_update_irq(s->ot_id, level); + } ibex_irq_set(&s->m_timer_irq, level); ibex_irq_set(&s->irq, level); } @@ -239,7 +241,8 @@ static uint64_t ot_timer_read(void *opaque, hwaddr addr, unsigned size) } uint32_t pc = ibex_get_current_pc(); - trace_ot_timer_read_out((uint32_t)addr, REG_NAME(reg), val32, pc); + trace_ot_timer_io_read_out(s->ot_id, (uint32_t)addr, REG_NAME(reg), val32, + pc); return (uint32_t)val32; } @@ -254,7 +257,7 @@ static void ot_timer_write(void *opaque, hwaddr addr, uint64_t value, hwaddr reg = R32_OFF(addr); uint32_t pc = ibex_get_current_pc(); - trace_ot_timer_write((uint32_t)addr, REG_NAME(reg), val32, pc); + trace_ot_timer_io_write(s->ot_id, (uint32_t)addr, REG_NAME(reg), val32, pc); switch (reg) { case R_ALERT_TEST: @@ -342,6 +345,7 @@ static const MemoryRegionOps ot_timer_ops = { }; static Property ot_timer_properties[] = { + DEFINE_PROP_STRING("ot_id", OtTimerState, ot_id), DEFINE_PROP_UINT32("pclk", OtTimerState, pclk, 0u), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/opentitan/trace-events b/hw/opentitan/trace-events index a7fa8d9b6f676..6a2b186e65f36 100644 --- a/hw/opentitan/trace-events +++ b/hw/opentitan/trace-events @@ -383,8 +383,9 @@ ot_sram_ctrl_io_write(const char *id, uint32_t addr, const char * regname, uint3 # ot_timer.c -ot_timer_read_out(uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "addr=0x%02x (%s), val=0x%x, pc=0x%x" -ot_timer_write(uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "addr=0x%02x (%s), val=0x%x, pc=0x%x" +ot_timer_io_read_out(const char *id, uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "%s: addr=0x%02x (%s), val=0x%x, pc=0x%x" +ot_timer_update_irq(const char *id, bool level) "%s: %d" +ot_timer_io_write(const char *id, uint32_t addr, const char * regname, uint32_t val, uint32_t pc) "%s: addr=0x%02x (%s), val=0x%x, pc=0x%x" # ot_uart.c From 9aad698ade62df6189fc16c2fe0526674c1c6dda Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Mon, 15 Apr 2024 10:46:22 +0200 Subject: [PATCH 24/27] [ot] scripts/opentitan: improve RV-DM implementation and fix a parser bug - CSR value is not required when no CSR check is requested - remove useless update of TDI signal - implement System Bus "fast mode" transfer Signed-off-by: Emmanuel Blot --- docs/opentitan/dtm.md | 15 +++++++++++---- scripts/opentitan/dtm.py | 21 ++++++++++++++------- scripts/opentitan/ot/dm/dm.py | 13 ++++++++++--- scripts/opentitan/ot/dtm/dtm.py | 1 - 4 files changed, 35 insertions(+), 15 deletions(-) diff --git a/docs/opentitan/dtm.md b/docs/opentitan/dtm.md index b1612956a7e44..85d43dd29a58b 100644 --- a/docs/opentitan/dtm.md +++ b/docs/opentitan/dtm.md @@ -43,6 +43,7 @@ Memory: -s SIZE, --size SIZE size in bytes of memory to access -f FILE, --file FILE file to read/write data for memory access -e ELF, --elf ELF load ELF file into memory + -F, --fast-mode do not check system bus status while transfering Extras: -v, --verbose increase verbosity @@ -64,6 +65,10 @@ Extras: * `-e` specify an ELF32 application file to upload into memory. See also the `--exec` option. +* `-F` assume System Bus can cope with received data pace. This feature increases transfer data + rate by bypassing SB status check. However it may cause the transfer to fail in case System Bus + becomes busy while data are transfered. + * `-H` specify the address of the QEMU VM. * `-I` report the JTAG ID code and the DTM configuration. @@ -93,9 +98,11 @@ Extras: This can be useful for example when the QEMU VM is started with `-S` and no application code has been loaded in memory: once the DTM operations are completed, the default behavior is to resume the hart execution, would start execution code from the current PC and cause an immediate - exception. + exception. The `-x` option can nevertheless be executed, as it is the last action that the script + performs. -* `-x` execute the loaded ELF application from its entry point. Requires the `--elf` option +* `-x` execute the loaded ELF application from its entry point. Requires the `--elf` option. + Application is executed even with `-X` is defined. ### Examples @@ -111,9 +118,9 @@ Running QEMU VM with the `-jtag tcp::3335` option: ./scripts/opentitan/dtm.py -c misa -C 0x401411ad ```` -* Load and execute an application +* Load (fast mode) and execute an application ````sh - ./scripts/opentitan/dtm.py -e .../helloworld -x + ./scripts/opentitan/dtm.py -e .../helloworld -x -F ```` * Dump a memory segment to stdout diff --git a/scripts/opentitan/dtm.py b/scripts/opentitan/dtm.py index 8ccc2af335ac0..ca3771e9c620a 100755 --- a/scripts/opentitan/dtm.py +++ b/scripts/opentitan/dtm.py @@ -22,7 +22,7 @@ # pylint: disable=import-error # JTAG module is available from the scripts/ directory -sys.path.append(joinpath(normpath(dirname(dirname(sys.argv[0]))))) +sys.path.append(normpath(dirname(dirname(sys.argv[0])))) from ot.util.elf import ElfBlob # noqa: E402 from ot.util.log import configure_loggers # noqa: E402 @@ -88,6 +88,10 @@ def main(): help='file to read/write data for memory access') mem.add_argument('-e', '--elf', type=FileType('rb'), help='load ELF file into memory') + mem.add_argument('-F', '--fast-mode', default=False, + action='store_true', + help='do not check system bus status while ' + 'transfering') extra = argparser.add_argument_group(title='Extras') extra.add_argument('-v', '--verbose', action='count', help='increase verbosity') @@ -116,7 +120,7 @@ def main(): print(f'DTM: v{version[0]}.{version[1]}, {abits} bits') dtm['dtmcs'].check() dtm['dtmcs'].dmireset() - if args.csr_check is None and not args.csr: + if args.csr_check is not None and not args.csr: argparser.error('CSR check requires CSR option') if args.csr: if not rvdm: @@ -137,7 +141,8 @@ def main(): except ValueError: csr = args.csr csr_val = rvdm.read_csr(csr) - rvdm.resume() + if not args.no_exec: + rvdm.resume() if args.csr_check is not None: if csr_val != args.csr_check: raise RuntimeError(f'CSR {args.csr} check failed: ' @@ -162,10 +167,11 @@ def main(): mode = 'rb' if args.mem == 'write' else 'wb' with open(args.file, mode) as mfp: rvdm.memory_copy(mfp, args.mem, args.address, - args.size) + args.size, no_check=args.fast_mode) else: mfp = BytesIO() - rvdm.memory_copy(mfp, args.mem, args.address, args.size) + rvdm.memory_copy(mfp, args.mem, args.address, args.size, + no_check=args.fast_mode) dump_buffer(mfp, args.address) finally: if not args.no_exec: @@ -184,11 +190,12 @@ def main(): try: rvdm.halt() mfp = BytesIO(elf.blob) - rvdm.memory_copy(mfp, 'write', elf.load_address, args.size) + rvdm.memory_copy(mfp, 'write', elf.load_address, args.size, + no_check=args.fast_mode) if args.execute: rvdm.set_pc(elf.entry_point) finally: - if not args.no_exec: + if args.execute or not args.no_exec: rvdm.resume() else: if args.execute: diff --git a/scripts/opentitan/ot/dm/dm.py b/scripts/opentitan/ot/dm/dm.py index 65008e1cd3fe7..5895efd834c04 100644 --- a/scripts/opentitan/ot/dm/dm.py +++ b/scripts/opentitan/ot/dm/dm.py @@ -308,7 +308,7 @@ def write_csr(self, reg: [str | int], value: int) -> None: return value def memory_copy(self, mfp: BinaryIO, mop: str, addr: int, - size: Optional[int]) -> None: + size: Optional[int], no_check: bool = False) -> None: """Handle memory operations. Only support 32-bit transfers (address and size should be aligned) @@ -319,6 +319,9 @@ def memory_copy(self, mfp: BinaryIO, mop: str, addr: int, :param mop: the operation to perform (read, write) :param addr: start address :param size: count of bytes to write + :param no_check: assume remote peer always accepts incoming data: + SBCS status is not checked during transfer if this + option is set. """ read = mop == 'read' write = mop == 'write' @@ -344,7 +347,8 @@ def memory_copy(self, mfp: BinaryIO, mop: str, addr: int, # pylint: disable=access-member-before-definition while to_go > 0: self._log.debug('reading mem from 0x%08x', addr) - self._wait_sb_idle() + if not no_check: + self._wait_sb_idle() # trigger next read (sbreadondata), inc addr (sbautoincrement) data = self.sbdata0 mfp.write(data.to_bytes(4, 'little')) @@ -366,9 +370,12 @@ def memory_copy(self, mfp: BinaryIO, mop: str, addr: int, data = int.from_bytes(buf, 'little') # inc addr (sbautoincrement) self.sbdata0 = data - self._wait_sb_idle() + if not no_check: + self._wait_sb_idle() to_go -= 4 addr += 4 + if no_check: + self._wait_sb_idle(check=True) lap = now() - start rate = size / (lap * 1024) self._log.info('copied %d KB @ %.1f KB/s', size//1024, rate) diff --git a/scripts/opentitan/ot/dtm/dtm.py b/scripts/opentitan/ot/dtm/dtm.py index d7191c61fe28d..5cf8f66fb5096 100644 --- a/scripts/opentitan/ot/dtm/dtm.py +++ b/scripts/opentitan/ot/dtm/dtm.py @@ -263,7 +263,6 @@ def build_error(cls, code: int) -> Optional[DMIError]: def read(self, address: int, length: int) -> BitSequence: """Read a bit sequence value.""" self._engine.write_ir(BitSequence(address, self._ir_length)) - self._engine.set_tdi(False) return self._engine.read_dr(length) def write(self, address: int, bseq: BitSequence) -> None: From 425e14c779983a1953296bd5b7989ff13db8e4ba Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Mon, 15 Apr 2024 11:27:04 +0200 Subject: [PATCH 25/27] [ot] scripts/jtag: remove useless APIs - remove useless setter/getter for TMS and TDI - remove useless TMS update, already performed by state selection Signed-off-by: Emmanuel Blot --- scripts/jtag/bitbang.py | 29 +++++------------------- scripts/jtag/jtag.py | 49 +---------------------------------------- 2 files changed, 6 insertions(+), 72 deletions(-) diff --git a/scripts/jtag/bitbang.py b/scripts/jtag/bitbang.py index 5b87eef6b58c5..24a38371bdc38 100644 --- a/scripts/jtag/bitbang.py +++ b/scripts/jtag/bitbang.py @@ -117,14 +117,13 @@ def write_tms(self, modesel: BitSequence) -> None: self._tms = tms self._tck = tck - def write(self, out: BitSequence, use_last: bool = True): + def write(self, out: BitSequence): if not isinstance(out, BitSequence): raise ValueError('out is not a BitSequence') - if use_last: - if self._last is not None: - # TODO: check if this case needs to be handled - raise NotImplementedError('Last is lost') - self._last = out.pop_left_bit() + if self._last is not None: + # TODO: check if this case needs to be handled + raise NotImplementedError('Last is lost') + self._last = out.pop_left_bit() if self._link_log: self._log.debug('write TDI [%d] %s', len(out), out) tms = self._tms @@ -178,24 +177,6 @@ def read(self, length: int) -> BitSequence: self._log.debug('read TDI [%d] %s', len(bseq), bseq) return bseq - @property - def tdi(self) -> bool: - return self._tdi - - @tdi.setter - def tdi(self, value: bool): - self._tdi = bool(value) - if self._link_log: - self._log.debug('set TDI %u', self._tdi) - - @property - def tms(self) -> bool: - return self._tms - - @tms.setter - def tms(self, value: bool): - self._tms = bool(value) - @classmethod def _bus_code(cls, tclk: bool, tms: bool, tdi: bool) -> int: return 0x30 + ((int(tclk) << 2) | (int(tms) << 1) | tdi) diff --git a/scripts/jtag/jtag.py b/scripts/jtag/jtag.py index f1f29f3d6ec19..6163a6a885b4a 100644 --- a/scripts/jtag/jtag.py +++ b/scripts/jtag/jtag.py @@ -217,13 +217,11 @@ def write_tms(self, modesel: BitSequence) -> None: """ raise NotImplementedError('ABC') - def write(self, out: BitSequence, use_last: bool = True): + def write(self, out: BitSequence): """Write a sequence of bits to TDI. :note: out content may be consumed, i.e. emptied :param out: the bot sequence of TDI bits to clock in - :param use_last: whether to clock in the stored TMS bits on first - clock cycle """ raise NotImplementedError('ABC') @@ -235,26 +233,6 @@ def read(self, length: int) -> BitSequence: """ raise NotImplementedError('ABC') - @property - def tdi(self) -> bool: - """Get current TDI value.""" - raise NotImplementedError('ABC') - - @tdi.setter - def tdi(self, value: bool): - """Set TDI value, to be clocked out on next operation.""" - raise NotImplementedError('ABC') - - @property - def tms(self) -> bool: - """Get current TMS value.""" - raise NotImplementedError('ABC') - - @tms.setter - def tms(self, value: bool): - """Set TMS value, to be clocked out on next operation.""" - raise NotImplementedError('ABC') - class JtagEngine: """High-level JTAG engine controller""" @@ -321,12 +299,8 @@ def capture_ir(self) -> None: def write_ir(self, instruction) -> None: """Change the current instruction of the TAP controller""" self.change_state('shift_ir') - ilength = len(instruction) # write consumes the instruction self._ctrl.write(instruction) self.change_state('update_ir') - # flush IR output - self._ctrl.tms = False - self._ctrl.read(ilength) def capture_dr(self) -> None: """Capture the current data register from the TAP controller""" @@ -341,27 +315,6 @@ def write_dr(self, data) -> None: def read_dr(self, length: int) -> BitSequence: """Read the data register from the TAP controller""" self.change_state('shift_dr') - self._ctrl.tms = False data = self._ctrl.read(length) self.change_state('update_dr') return data - - def write_tms(self, out) -> None: - """Change the TAP controller state""" - self._ctrl.write_tms(out) - - def write(self, out, use_last=False) -> None: - """Write a sequence of bits to TDI""" - self._ctrl.write(out, use_last) - - def read(self, length): - """Read out a sequence of bits from TDO""" - return self._ctrl.read(length) - - def set_tdi(self, value: bool): - """Force default TDI value, clocked out on each cycle.""" - self._ctrl.tdi = value - - def set_tms(self, value: bool): - """Force default TMS value clocked out on each cycle.""" - self._ctrl.tms = value From 5637af32f071fcc32c99cc3e848ff376ebc39fad Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Mon, 15 Apr 2024 20:41:01 +0200 Subject: [PATCH 26/27] [ot] scripts/opentitan: dtm.py, otpdm.py: report default settings * TAP IR length * DMI base address Signed-off-by: Emmanuel Blot --- scripts/opentitan/dtm.py | 19 +++++++++++++++---- scripts/opentitan/otpdm.py | 19 +++++++++++++++---- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/scripts/opentitan/dtm.py b/scripts/opentitan/dtm.py index ca3771e9c620a..2fb5d8053c975 100755 --- a/scripts/opentitan/dtm.py +++ b/scripts/opentitan/dtm.py @@ -34,6 +34,13 @@ from jtag.jtag import JtagEngine # noqa: E402 +DEFAULT_IR_LENGTH = 5 +"""Default TAP Instruction Register length.""" + +DEFAULT_DMI_ADDRESS = 0x0 +"""Default DMI address of the DM.""" + + def idcode(engine: JtagEngine, ir_length: int) -> None: """Retrieve ID code.""" code = JtagBitbangController.INSTRUCTIONS['idcode'] @@ -61,10 +68,14 @@ def main(): qvm.add_argument('-Q', '--no-quit', action='store_true', default=False, help='do not ask the QEMU to quit on exit') dmi = argparser.add_argument_group(title='DMI') - dmi.add_argument('-l', '--ir-length', type=int, default=5, - help='bit length of the IR register') - dmi.add_argument('-b', '--base', type=HexInt.parse, default=0, - help='define DMI base address') + dmi.add_argument('-l', '--ir-length', type=int, + default=DEFAULT_IR_LENGTH, + help=f'bit length of the IR register ' + f'(default: {DEFAULT_IR_LENGTH})') + dmi.add_argument('-b', '--base', type=HexInt.parse, + default=DEFAULT_DMI_ADDRESS, + help=f'define DMI base address ' + f'(default: 0x{DEFAULT_DMI_ADDRESS:x})') info = argparser.add_argument_group(title='Info') info.add_argument('-I', '--info', action='store_true', help='report JTAG ID code and DTM configuration') diff --git a/scripts/opentitan/otpdm.py b/scripts/opentitan/otpdm.py index d790806f39455..2b7c73bee8b70 100755 --- a/scripts/opentitan/otpdm.py +++ b/scripts/opentitan/otpdm.py @@ -35,9 +35,16 @@ from jtag.jtag import JtagEngine # noqa: E402 +DEFAULT_IR_LENGTH = 5 +"""Default TAP Instruction Register length.""" + +DEFAULT_DMI_ADDRESS = 0x0 +"""Default DMI address of the DM.""" + DEFAULT_OTP_BASE_ADDRESS = 0x30130000 """Default base address of the OTP controller on the OT local crossbar.""" + def main(): """Entry point.""" debug = True @@ -55,10 +62,14 @@ def main(): qvm.add_argument('-Q', '--no-quit', action='store_true', default=False, help='do not ask the QEMU to quit on exit') dmi = argparser.add_argument_group(title='DMI') - dmi.add_argument('-l', '--ir-length', type=int, default=5, - help='bit length of the IR register') - dmi.add_argument('-b', '--base', type=HexInt.parse, default=0, - help='define DMI base address') + dmi.add_argument('-l', '--ir-length', type=int, + default=DEFAULT_IR_LENGTH, + help=f'bit length of the IR register ' + f'(default: {DEFAULT_IR_LENGTH})') + dmi.add_argument('-b', '--base', type=HexInt.parse, + default=DEFAULT_DMI_ADDRESS, + help=f'define DMI base address ' + f'(default: 0x{DEFAULT_DMI_ADDRESS:x})') otp = argparser.add_argument_group(title='OTP') otp.add_argument('-j', '--otp-map', type=FileType('rt'), metavar='HJSON', From 637ee7bfb7df8bddf4c8c27201df2ad9aa4dfbfc Mon Sep 17 00:00:00 2001 From: Emmanuel Blot Date: Tue, 16 Apr 2024 17:33:27 +0200 Subject: [PATCH 27/27] [ot] docs/opentitan: add a JTAG troubleshooting section. Signed-off-by: Emmanuel Blot --- docs/opentitan/dtm.md | 2 +- docs/opentitan/jtag-dm.md | 34 ++++++++++++++++++++++++++++++++++ docs/opentitan/jtagmbx.md | 4 ++++ docs/opentitan/lc_ctrl_dmi.md | 4 ++++ docs/opentitan/otpdm.md | 4 ++++ 5 files changed, 47 insertions(+), 1 deletion(-) diff --git a/docs/opentitan/dtm.md b/docs/opentitan/dtm.md index 85d43dd29a58b..af1b26e0d7e17 100644 --- a/docs/opentitan/dtm.md +++ b/docs/opentitan/dtm.md @@ -102,7 +102,7 @@ Extras: performs. * `-x` execute the loaded ELF application from its entry point. Requires the `--elf` option. - Application is executed even with `-X` is defined. + Application is executed even with `-X` defined. ### Examples diff --git a/docs/opentitan/jtag-dm.md b/docs/opentitan/jtag-dm.md index f2f94676dac5e..b1bd5f9951b92 100644 --- a/docs/opentitan/jtag-dm.md +++ b/docs/opentitan/jtag-dm.md @@ -129,3 +129,37 @@ information about this stack and demonstrate how to use the Debug Module to acce The [`scripts/opentitan/otpdm.py`](otpdm.md) also use the same stack to access the cells of the OTP controller. + +### Troubleshooting [#troubleshooting] + +A common issue with initial JTAG configuration is to use the wrong Instruction Register length. +The IR length depends on the actual implementation of the TAP controller and therefore may be +different across HW implementations, here across OpenTitan instantiations. + +Unfortunately, there is no reliable way to automatically detect the IR length, this needs to be +a setting that should be provided to the JTAG tool. Scripts in `scripts/opentitan` use the `-l` / +`--ir-length` option to specify the IR length. The default value may be obtained with the `-h` +option, which is IR length = 5 bits for default EarlGrey and Darjeeling machines. + +Whenever the wrong IR length is specified, or the default IR length is used with a HW/VM machine +that uses a non-default length, the first instruction that is stored in the TAP instruction register +is misinterpreted, which may cause different errors with the same root cause: a wrong IR length +setting. + +It is recommended to query the DMI address bits with for example [`scripts/opentitan/dtm.py`](dtm.md) +which is a basic command that needs a valid IR length setting to complete. Use `dtm.py -I` to query +some low-level information from the remote peer. + +It should report something like: +```` +IDCODE: 0x11cdf +DTM: v0.13, 12 bits +```` + +* If the DTM bit count is stuck to 0, the IR length is likely wrong, +* If an error message such as `Invalid reported address bits` is returned, the IR length is likely + wrong. + +Another common issue is to use a machine with multiple Debug Modules but fail to specify its base +address. The default DMI base address is always `0x0`. Use `-b` / `--base` option to select the +proper Debug Module base address. diff --git a/docs/opentitan/jtagmbx.md b/docs/opentitan/jtagmbx.md index d477db1266992..12b87aa0a26cf 100644 --- a/docs/opentitan/jtagmbx.md +++ b/docs/opentitan/jtagmbx.md @@ -246,3 +246,7 @@ Note: `devproxy.py` needs to be found within the Python path, using for example ```sh exprot PYTHONPATH=${QEMU_SOURCE_PATH}/scripts/opentitan ``` + +### Troubleshooting + +See the [Troubleshooting](jtag-dm.md#troubleshooting) section for details. diff --git a/docs/opentitan/lc_ctrl_dmi.md b/docs/opentitan/lc_ctrl_dmi.md index ef4c2c4c32dec..f6bcb235ffe73 100644 --- a/docs/opentitan/lc_ctrl_dmi.md +++ b/docs/opentitan/lc_ctrl_dmi.md @@ -65,3 +65,7 @@ lc_ctrl = LifeCycleController(dtm, 0x3000 >> 2) # See LifeCycleController for LC controller communication API ```` + +### Troubleshooting + +See the [Troubleshooting](jtag-dm.md#troubleshooting) section for details. diff --git a/docs/opentitan/otpdm.md b/docs/opentitan/otpdm.md index 9d0502172e485..26502cf0f3970 100644 --- a/docs/opentitan/otpdm.md +++ b/docs/opentitan/otpdm.md @@ -133,3 +133,7 @@ Running QEMU VM with the `-jtag tcp::3335` option: -w 4c6f72656d20697073756d20646f6c6f722073697420616d65742c20636f6e73 ```` +### Troubleshooting + +See the [Troubleshooting](jtag-dm.md#troubleshooting) section for details. +