Skip to content

amd

zeus.device.gpu.amd

AMD GPUs.

MockAMDSMI

Mock class for AMD SMI library.

Source code in zeus/device/gpu/amd.py
22
23
24
25
26
27
28
29
30
31
class MockAMDSMI:
    """Mock class for AMD SMI library."""

    def __getattr__(self, name):
        """Raise an error if any method is called.

        Since this class is only used when `amdsmi` is not available,
        something has gone wrong if any method is called.
        """
        raise RuntimeError(f"amdsmi is not available and amdsmi.{name} shouldn't have been called. This is a bug.")

__getattr__

__getattr__(name)

Raise an error if any method is called.

Since this class is only used when amdsmi is not available, something has gone wrong if any method is called.

Source code in zeus/device/gpu/amd.py
25
26
27
28
29
30
31
def __getattr__(self, name):
    """Raise an error if any method is called.

    Since this class is only used when `amdsmi` is not available,
    something has gone wrong if any method is called.
    """
    raise RuntimeError(f"amdsmi is not available and amdsmi.{name} shouldn't have been called. This is a bug.")

AMDGPU

Bases: GPU

Implementation of GPU for AMD GPUs.

Source code in zeus/device/gpu/amd.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
class AMDGPU(gpu_common.GPU):
    """Implementation of `GPU` for AMD GPUs."""

    def __init__(self, gpu_index: int) -> None:
        """Initialize the GPU object."""
        super().__init__(gpu_index)
        self._get_handle()

        # These values are updated in AMDGPUs constructor
        self._supports_get_total_energy_consumption = True
        self._supports_instant_power_usage = True
        self._supports_average_power_usage = True
        self._is_dual_die_odd_chiplet = False  # Set for MI250/MI250X odd-indexed GPUs

    _exception_map = {
        1: gpu_common.ZeusGPUInvalidArgError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_INVAL
        2: gpu_common.ZeusGPUNotSupportedError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_NOT_SUPPORTED
        8: gpu_common.ZeusGPUTimeoutError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_TIMEOUT
        10: gpu_common.ZeusGPUNoPermissionError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_NO_PERM
        15: gpu_common.ZeusGPUMemoryError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_OUT_OF_RESOURCES
        18: gpu_common.ZeusGPUInitError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_INIT_ERROR
        31: gpu_common.ZeusGPUNotFoundError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_NOT_FOUND
        32: gpu_common.ZeusGPUInitError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_NOT_INIT
        34: gpu_common.ZeusGPUDriverNotLoadedError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_DRIVER_NOT_LOADED
        41: gpu_common.ZeusGPUInsufficientSizeError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_INSUFFICIENT_SIZE
        45: gpu_common.ZeusGPUDriverNotLoadedError,  # amdsmi.amdsmi_wrapper.AMDSMI_NO_ENERGY_DRV
        46: gpu_common.ZeusGPUDriverNotLoadedError,  # amdsmi.amdsmi_wrapper.AMDSMI_NO_MSR_DRV
        47: gpu_common.ZeusGPUDriverNotLoadedError,  # amdsmi.amdsmi_wrapper.AMDSMI_NO_HSMP_DRV
        48: gpu_common.ZeusGPUNotSupportedError,  # amdsmi.amdsmi_wrapper.AMDSMI_NO_HSMP_SUP
        49: gpu_common.ZeusGPUNotSupportedError,  # amdsmi.amdsmi_wrapper.AMDSMI_NO_HSMP_MSG_SUP
        50: gpu_common.ZeusGPUTimeoutError,  # amdsmi.amdsmi_wrapper.AMDSMI_HSMP_TIMEOUT
        51: gpu_common.ZeusGPUDriverNotLoadedError,  # amdsmi.amdsmi_wrapper.AMDSMI_NO_DRV
        52: gpu_common.ZeusGPULibraryNotFoundError,  # amdsmi.amdsmi_wrapper.AMDSMI_FILE_NOT_FOUND
        53: gpu_common.ZeusGPUInvalidArgError,  # amdsmi.amdsmi_wrapper.AMDSMI_ARG_PTR_NULL
        4294967295: gpu_common.ZeusGPUUnknownError,  # amdsmi.amdsmi_wrapper.AMDSMI_STATUS_UNKNOWN_ERROR
    }

    @_handle_amdsmi_errors
    def _get_handle(self):
        # `self.gpu_index` is a HIP index (what PyTorch sees), but
        # `amdsmi_get_processor_handles()` returns handles ordered by amd-smi's
        # own GPU index (BDF-sorted). On some nodes (e.g., MI350X) these two
        # index spaces differ. Map HIP index -> handle via
        # `amdsmi_get_gpu_enumeration_info`, the same source `amd-smi monitor`
        # uses to print its HIP-ID column.
        hip_to_handle = _hip_to_amdsmi_handle()
        if self.gpu_index not in hip_to_handle:
            raise gpu_common.ZeusGPUNotFoundError(
                f"GPU with HIP index {self.gpu_index} not found. Found HIP indices: {sorted(hip_to_handle)}."
            )
        self.handle = hip_to_handle[self.gpu_index]

    @_handle_amdsmi_errors
    def get_name(self) -> str:
        """Return the name of the GPU model."""
        info = amdsmi.amdsmi_get_gpu_asic_info(self.handle)
        return info["market_name"]

    @property
    def supports_nonblocking_setters(self) -> bool:
        """Return True if the GPU object supports non-blocking configuration setters."""
        return False

    @_handle_amdsmi_errors
    def get_power_management_limit_constraints(self) -> tuple[int, int]:
        """Return the minimum and maximum power management limits. Units: mW."""
        info = amdsmi.amdsmi_get_power_cap_info(self.handle)  # Returns in uW
        min_power_cap, res = divmod(info["min_power_cap"], 1000)
        if res != 0:
            logger.warning(
                "Minimum power cap for GPU %d is not a multiple of 1000 uW: %d uW",
                self.gpu_index,
                info["min_power_cap"],
            )
        max_power_cap, res = divmod(info["max_power_cap"], 1000)
        if res != 0:
            logger.warning(
                "Maximum power cap for GPU %d is not a multiple of 1000 uW: %d uW",
                self.gpu_index,
                info["max_power_cap"],
            )
        return (int(min_power_cap), int(max_power_cap))

    @_handle_amdsmi_errors
    def get_power_management_limit(self) -> int:
        """Return the current power management limit. Units: mW."""
        info = amdsmi.amdsmi_get_power_cap_info(self.handle)  # Returns in uW
        power_cap, res = divmod(info["power_cap"], 1000)
        if res != 0:
            logger.warning(
                "Current power cap for GPU %d is not a multiple of 1000 uW: %d uW",
                self.gpu_index,
                info["power_cap"],
            )
        return int(power_cap)

    @_handle_amdsmi_errors
    def set_power_management_limit(self, power_limit_mw: int, block: bool = True) -> None:
        """Set the GPU's power management limit. Unit: mW."""
        current_limit = self.get_power_management_limit()
        if current_limit == power_limit_mw:
            return

        self._warn_sys_admin()
        # Units for set_power_cap is uW
        amdsmi.amdsmi_set_power_cap(self.handle, 0, int(power_limit_mw * 1000))

    @_handle_amdsmi_errors
    def reset_power_management_limit(self, block: bool = True) -> None:
        """Reset the GPU's power management limit to the default value."""
        info = amdsmi.amdsmi_get_power_cap_info(self.handle)  # Returns in uW
        default_power_cap_uw = int(info["default_power_cap"])
        current_limit_mw = self.get_power_management_limit()
        if current_limit_mw * 1000 == default_power_cap_uw:
            return

        self._warn_sys_admin()
        # Units for set_power_cap is uW
        amdsmi.amdsmi_set_power_cap(self.handle, 0, cap=default_power_cap_uw)

    @_handle_amdsmi_errors
    def set_persistence_mode(self, enabled: bool, block: bool = True) -> None:
        """Set persistence mode."""
        raise gpu_common.ZeusGPUNotSupportedError("Persistence mode is not supported on AMD GPUs.")

    @_handle_amdsmi_errors
    def get_supported_memory_clocks(self) -> list[int]:
        """Return a list of supported memory clock frequencies. Units: MHz."""
        info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.MEM)  # returns MHz
        return [info["max_clk"], info["min_clk"]]

    @_handle_amdsmi_errors
    def set_memory_locked_clocks(self, min_clock_mhz: int, max_clock_mhz: int, block: bool = True) -> None:
        """Lock the memory clock to a specified range. Units: MHz."""
        self._warn_sys_admin()
        amdsmi.amdsmi_set_gpu_clk_range(
            self.handle,
            min_clock_mhz,
            max_clock_mhz,
            clk_type=amdsmi.AmdSmiClkType.MEM,
        )

    @_handle_amdsmi_errors
    def reset_memory_locked_clocks(self, block: bool = True) -> None:
        """Reset the locked memory clocks to the default."""
        # Get default MEM clock values
        info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.MEM)  # returns MHz

        self._warn_sys_admin()
        amdsmi.amdsmi_set_gpu_clk_range(
            self.handle,
            info["min_clk"],
            info["max_clk"],
            clk_type=amdsmi.AmdSmiClkType.MEM,
        )  # expects MHz

    @_handle_amdsmi_errors
    def get_supported_graphics_clocks(self, memory_clock_mhz: int | None = None) -> list[int]:
        """Return a list of supported graphics clock frequencies. Units: MHz.

        Args:
            memory_clock_mhz: Memory clock frequency to use. Some GPUs have
                different supported graphics clocks depending on the memory clock.
        """
        pass
        info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.GFX)  # returns MHz
        return [info["max_clk"], info["min_clk"]]

    @_handle_amdsmi_errors
    def set_gpu_locked_clocks(self, min_clock_mhz: int, max_clock_mhz: int, block: bool = True) -> None:
        """Lock the GPU clock to a specified range. Units: MHz."""
        self._warn_sys_admin()
        amdsmi.amdsmi_set_gpu_clk_range(
            self.handle,
            min_clock_mhz,
            max_clock_mhz,
            clk_type=amdsmi.AmdSmiClkType.GFX,
        )

    @_handle_amdsmi_errors
    def reset_gpu_locked_clocks(self, block: bool = True) -> None:
        """Reset the locked GPU clocks to the default."""
        # Get default GPU clock values
        info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.GFX)  # returns MHz

        self._warn_sys_admin()
        amdsmi.amdsmi_set_gpu_clk_range(
            self.handle,
            info["min_clk"],
            info["max_clk"],
            clk_type=amdsmi.AmdSmiClkType.GFX,
        )  # expects MHz

    def _ensure_not_dual_die_odd_chiplet(self) -> None:
        """Raise an error if the GPU is a chiplet of a dual-die AMD Instinct MI250/MI250X GPU."""
        if self._is_dual_die_odd_chiplet:
            raise gpu_common.ZeusGPUNotSupportedError(
                f"GPU {self.gpu_index} is a chiplet of a dual-die AMD Instinct MI250/MI250X GPU "
                f"that does not support individual power monitoring. AMD's driver only reports power "
                f"for GPU {self.gpu_index - 1}, which represents the COMBINED power draw of BOTH chiplets "
                f"(GPU {self.gpu_index - 1} and GPU {self.gpu_index}).\n\n"
                f"To measure power/energy for workloads on this GPU:\n"
                f"  1. Use GPU {self.gpu_index - 1} for measurements (e.g., PowerMonitor(gpu_indices=[{self.gpu_index - 1}]))\n"
                f"  2. Be aware that measurements include BOTH chiplets and cannot be separated\n"
                f"  3. If you run workloads on GPU {self.gpu_index}, its power consumption will be "
                f"included in GPU {self.gpu_index - 1}'s readings"
            )

    @_handle_amdsmi_errors
    def get_average_power_usage(self) -> int:
        """Return the average power draw of the GPU. Units: mW."""
        self._ensure_not_dual_die_odd_chiplet()

        if not self._supports_average_power_usage:
            raise gpu_common.ZeusGPUNotSupportedError(
                "Average power usage is not supported on this AMD GPU. "
                "This is because amdsmi.amdsmi_get_power_info does not return a valid 'average_socket_power'. "
                "Please use `get_instant_power_usage` instead."
            )
        # returns in W, convert to mW
        power_info = amdsmi.amdsmi_get_power_info(self.handle)
        avg_power = power_info["average_socket_power"]
        if not isinstance(avg_power, int):
            raise gpu_common.ZeusGPUNotSupportedError(
                f"Average power usage is not supported on this AMD GPU. "
                f"amdsmi.amdsmi_get_power_info returned '{avg_power}' for 'average_socket_power'. "
                f"Please use `get_instant_power_usage` instead."
            )
        return avg_power * 1000

    @_handle_amdsmi_errors
    def get_instant_power_usage(self) -> int:
        """Return the current power draw of the GPU. Units: mW."""
        self._ensure_not_dual_die_odd_chiplet()

        if not self._supports_instant_power_usage:
            raise gpu_common.ZeusGPUNotSupportedError(
                "Instant power usage is not supported on this AMD GPU. "
                "This is because amdsmi.amdsmi_get_power_info does not return a valid 'current_socket_power'. "
                "Please use `get_average_power_usage` instead."
            )
        # returns in W, convert to mW
        return int(amdsmi.amdsmi_get_power_info(self.handle)["current_socket_power"]) * 1000

    @_handle_amdsmi_errors
    def get_average_memory_power_usage(self) -> int:
        """Return the average power usage of the GPU's memory. Units: mW."""
        raise gpu_common.ZeusGPUNotSupportedError("Average memory power usage is not supported on AMD GPUs.")

    @_handle_amdsmi_errors
    def supports_get_total_energy_consumption(self) -> bool:
        """Check if the GPU supports retrieving total energy consumption. Returns a future object of the result."""
        return self._supports_get_total_energy_consumption

    @_handle_amdsmi_errors
    def get_total_energy_consumption(self) -> int:
        """Return the total energy consumption of the GPU since driver load. Units: mJ."""
        if not self._supports_get_total_energy_consumption:
            raise gpu_common.ZeusGPUNotSupportedError(
                "Total energy consumption is not supported on this AMD GPU. "
                "This is because the result of `amdsmi.amdsmi_get_energy_count` is not accurate. "
                "Please use `get_average_power_usage` or `get_instant_power_usage` to calculate energy usage."
            )
        energy_dict = amdsmi.amdsmi_get_energy_count(self.handle)
        if "energy_accumulator" in energy_dict:  # Changed since amdsmi 6.2.1
            energy = energy_dict["energy_accumulator"] * energy_dict["counter_resolution"]
        else:
            # Old API: assume has key "power". If not, exception will be handled by _handle_amdsmi_errors.
            energy = energy_dict["power"] * energy_dict["counter_resolution"]

        return int(energy / 1e3)  # returns in micro Joules, convert to mili Joules

    @_handle_amdsmi_errors
    def get_gpu_temperature(self) -> int:
        """Return the current GPU temperature. Units: Celsius.

        We use the hotspot temperatue (as opposed to edge) as we believe it to be more representative
        of the GPU core's temperature under load.
        """
        # amdsmi_get_temp_metric returns millidegrees Celsius, convert to Celsius
        temp_millidegrees = amdsmi.amdsmi_get_temp_metric(
            self.handle,
            amdsmi.AmdSmiTemperatureType.HOTSPOT,
            amdsmi.AmdSmiTemperatureMetric.CURRENT,
        )
        return temp_millidegrees // 1000

supports_nonblocking_setters property

supports_nonblocking_setters

Return True if the GPU object supports non-blocking configuration setters.

__init__

__init__(gpu_index)
Source code in zeus/device/gpu/amd.py
100
101
102
103
104
105
106
107
108
109
def __init__(self, gpu_index: int) -> None:
    """Initialize the GPU object."""
    super().__init__(gpu_index)
    self._get_handle()

    # These values are updated in AMDGPUs constructor
    self._supports_get_total_energy_consumption = True
    self._supports_instant_power_usage = True
    self._supports_average_power_usage = True
    self._is_dual_die_odd_chiplet = False  # Set for MI250/MI250X odd-indexed GPUs

get_name

get_name()

Return the name of the GPU model.

Source code in zeus/device/gpu/amd.py
149
150
151
152
153
@_handle_amdsmi_errors
def get_name(self) -> str:
    """Return the name of the GPU model."""
    info = amdsmi.amdsmi_get_gpu_asic_info(self.handle)
    return info["market_name"]

get_power_management_limit_constraints

get_power_management_limit_constraints()

Return the minimum and maximum power management limits. Units: mW.

Source code in zeus/device/gpu/amd.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
@_handle_amdsmi_errors
def get_power_management_limit_constraints(self) -> tuple[int, int]:
    """Return the minimum and maximum power management limits. Units: mW."""
    info = amdsmi.amdsmi_get_power_cap_info(self.handle)  # Returns in uW
    min_power_cap, res = divmod(info["min_power_cap"], 1000)
    if res != 0:
        logger.warning(
            "Minimum power cap for GPU %d is not a multiple of 1000 uW: %d uW",
            self.gpu_index,
            info["min_power_cap"],
        )
    max_power_cap, res = divmod(info["max_power_cap"], 1000)
    if res != 0:
        logger.warning(
            "Maximum power cap for GPU %d is not a multiple of 1000 uW: %d uW",
            self.gpu_index,
            info["max_power_cap"],
        )
    return (int(min_power_cap), int(max_power_cap))

get_power_management_limit

get_power_management_limit()

Return the current power management limit. Units: mW.

Source code in zeus/device/gpu/amd.py
180
181
182
183
184
185
186
187
188
189
190
191
@_handle_amdsmi_errors
def get_power_management_limit(self) -> int:
    """Return the current power management limit. Units: mW."""
    info = amdsmi.amdsmi_get_power_cap_info(self.handle)  # Returns in uW
    power_cap, res = divmod(info["power_cap"], 1000)
    if res != 0:
        logger.warning(
            "Current power cap for GPU %d is not a multiple of 1000 uW: %d uW",
            self.gpu_index,
            info["power_cap"],
        )
    return int(power_cap)

set_power_management_limit

set_power_management_limit(power_limit_mw, block=True)

Set the GPU's power management limit. Unit: mW.

Source code in zeus/device/gpu/amd.py
193
194
195
196
197
198
199
200
201
202
@_handle_amdsmi_errors
def set_power_management_limit(self, power_limit_mw: int, block: bool = True) -> None:
    """Set the GPU's power management limit. Unit: mW."""
    current_limit = self.get_power_management_limit()
    if current_limit == power_limit_mw:
        return

    self._warn_sys_admin()
    # Units for set_power_cap is uW
    amdsmi.amdsmi_set_power_cap(self.handle, 0, int(power_limit_mw * 1000))

reset_power_management_limit

reset_power_management_limit(block=True)

Reset the GPU's power management limit to the default value.

Source code in zeus/device/gpu/amd.py
204
205
206
207
208
209
210
211
212
213
214
215
@_handle_amdsmi_errors
def reset_power_management_limit(self, block: bool = True) -> None:
    """Reset the GPU's power management limit to the default value."""
    info = amdsmi.amdsmi_get_power_cap_info(self.handle)  # Returns in uW
    default_power_cap_uw = int(info["default_power_cap"])
    current_limit_mw = self.get_power_management_limit()
    if current_limit_mw * 1000 == default_power_cap_uw:
        return

    self._warn_sys_admin()
    # Units for set_power_cap is uW
    amdsmi.amdsmi_set_power_cap(self.handle, 0, cap=default_power_cap_uw)

set_persistence_mode

set_persistence_mode(enabled, block=True)

Set persistence mode.

Source code in zeus/device/gpu/amd.py
217
218
219
220
@_handle_amdsmi_errors
def set_persistence_mode(self, enabled: bool, block: bool = True) -> None:
    """Set persistence mode."""
    raise gpu_common.ZeusGPUNotSupportedError("Persistence mode is not supported on AMD GPUs.")

get_supported_memory_clocks

get_supported_memory_clocks()

Return a list of supported memory clock frequencies. Units: MHz.

Source code in zeus/device/gpu/amd.py
222
223
224
225
226
@_handle_amdsmi_errors
def get_supported_memory_clocks(self) -> list[int]:
    """Return a list of supported memory clock frequencies. Units: MHz."""
    info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.MEM)  # returns MHz
    return [info["max_clk"], info["min_clk"]]

set_memory_locked_clocks

set_memory_locked_clocks(min_clock_mhz, max_clock_mhz, block=True)

Lock the memory clock to a specified range. Units: MHz.

Source code in zeus/device/gpu/amd.py
228
229
230
231
232
233
234
235
236
237
@_handle_amdsmi_errors
def set_memory_locked_clocks(self, min_clock_mhz: int, max_clock_mhz: int, block: bool = True) -> None:
    """Lock the memory clock to a specified range. Units: MHz."""
    self._warn_sys_admin()
    amdsmi.amdsmi_set_gpu_clk_range(
        self.handle,
        min_clock_mhz,
        max_clock_mhz,
        clk_type=amdsmi.AmdSmiClkType.MEM,
    )

reset_memory_locked_clocks

reset_memory_locked_clocks(block=True)

Reset the locked memory clocks to the default.

Source code in zeus/device/gpu/amd.py
239
240
241
242
243
244
245
246
247
248
249
250
251
@_handle_amdsmi_errors
def reset_memory_locked_clocks(self, block: bool = True) -> None:
    """Reset the locked memory clocks to the default."""
    # Get default MEM clock values
    info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.MEM)  # returns MHz

    self._warn_sys_admin()
    amdsmi.amdsmi_set_gpu_clk_range(
        self.handle,
        info["min_clk"],
        info["max_clk"],
        clk_type=amdsmi.AmdSmiClkType.MEM,
    )  # expects MHz

get_supported_graphics_clocks

get_supported_graphics_clocks(memory_clock_mhz=None)

Return a list of supported graphics clock frequencies. Units: MHz.

Parameters:

Name Type Description Default
memory_clock_mhz int | None

Memory clock frequency to use. Some GPUs have different supported graphics clocks depending on the memory clock.

None
Source code in zeus/device/gpu/amd.py
253
254
255
256
257
258
259
260
261
262
263
@_handle_amdsmi_errors
def get_supported_graphics_clocks(self, memory_clock_mhz: int | None = None) -> list[int]:
    """Return a list of supported graphics clock frequencies. Units: MHz.

    Args:
        memory_clock_mhz: Memory clock frequency to use. Some GPUs have
            different supported graphics clocks depending on the memory clock.
    """
    pass
    info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.GFX)  # returns MHz
    return [info["max_clk"], info["min_clk"]]

set_gpu_locked_clocks

set_gpu_locked_clocks(min_clock_mhz, max_clock_mhz, block=True)

Lock the GPU clock to a specified range. Units: MHz.

Source code in zeus/device/gpu/amd.py
265
266
267
268
269
270
271
272
273
274
@_handle_amdsmi_errors
def set_gpu_locked_clocks(self, min_clock_mhz: int, max_clock_mhz: int, block: bool = True) -> None:
    """Lock the GPU clock to a specified range. Units: MHz."""
    self._warn_sys_admin()
    amdsmi.amdsmi_set_gpu_clk_range(
        self.handle,
        min_clock_mhz,
        max_clock_mhz,
        clk_type=amdsmi.AmdSmiClkType.GFX,
    )

reset_gpu_locked_clocks

reset_gpu_locked_clocks(block=True)

Reset the locked GPU clocks to the default.

Source code in zeus/device/gpu/amd.py
276
277
278
279
280
281
282
283
284
285
286
287
288
@_handle_amdsmi_errors
def reset_gpu_locked_clocks(self, block: bool = True) -> None:
    """Reset the locked GPU clocks to the default."""
    # Get default GPU clock values
    info = amdsmi.amdsmi_get_clock_info(self.handle, amdsmi.AmdSmiClkType.GFX)  # returns MHz

    self._warn_sys_admin()
    amdsmi.amdsmi_set_gpu_clk_range(
        self.handle,
        info["min_clk"],
        info["max_clk"],
        clk_type=amdsmi.AmdSmiClkType.GFX,
    )  # expects MHz

_ensure_not_dual_die_odd_chiplet

_ensure_not_dual_die_odd_chiplet()

Raise an error if the GPU is a chiplet of a dual-die AMD Instinct MI250/MI250X GPU.

Source code in zeus/device/gpu/amd.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def _ensure_not_dual_die_odd_chiplet(self) -> None:
    """Raise an error if the GPU is a chiplet of a dual-die AMD Instinct MI250/MI250X GPU."""
    if self._is_dual_die_odd_chiplet:
        raise gpu_common.ZeusGPUNotSupportedError(
            f"GPU {self.gpu_index} is a chiplet of a dual-die AMD Instinct MI250/MI250X GPU "
            f"that does not support individual power monitoring. AMD's driver only reports power "
            f"for GPU {self.gpu_index - 1}, which represents the COMBINED power draw of BOTH chiplets "
            f"(GPU {self.gpu_index - 1} and GPU {self.gpu_index}).\n\n"
            f"To measure power/energy for workloads on this GPU:\n"
            f"  1. Use GPU {self.gpu_index - 1} for measurements (e.g., PowerMonitor(gpu_indices=[{self.gpu_index - 1}]))\n"
            f"  2. Be aware that measurements include BOTH chiplets and cannot be separated\n"
            f"  3. If you run workloads on GPU {self.gpu_index}, its power consumption will be "
            f"included in GPU {self.gpu_index - 1}'s readings"
        )

get_average_power_usage

get_average_power_usage()

Return the average power draw of the GPU. Units: mW.

Source code in zeus/device/gpu/amd.py
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
@_handle_amdsmi_errors
def get_average_power_usage(self) -> int:
    """Return the average power draw of the GPU. Units: mW."""
    self._ensure_not_dual_die_odd_chiplet()

    if not self._supports_average_power_usage:
        raise gpu_common.ZeusGPUNotSupportedError(
            "Average power usage is not supported on this AMD GPU. "
            "This is because amdsmi.amdsmi_get_power_info does not return a valid 'average_socket_power'. "
            "Please use `get_instant_power_usage` instead."
        )
    # returns in W, convert to mW
    power_info = amdsmi.amdsmi_get_power_info(self.handle)
    avg_power = power_info["average_socket_power"]
    if not isinstance(avg_power, int):
        raise gpu_common.ZeusGPUNotSupportedError(
            f"Average power usage is not supported on this AMD GPU. "
            f"amdsmi.amdsmi_get_power_info returned '{avg_power}' for 'average_socket_power'. "
            f"Please use `get_instant_power_usage` instead."
        )
    return avg_power * 1000

get_instant_power_usage

get_instant_power_usage()

Return the current power draw of the GPU. Units: mW.

Source code in zeus/device/gpu/amd.py
327
328
329
330
331
332
333
334
335
336
337
338
339
@_handle_amdsmi_errors
def get_instant_power_usage(self) -> int:
    """Return the current power draw of the GPU. Units: mW."""
    self._ensure_not_dual_die_odd_chiplet()

    if not self._supports_instant_power_usage:
        raise gpu_common.ZeusGPUNotSupportedError(
            "Instant power usage is not supported on this AMD GPU. "
            "This is because amdsmi.amdsmi_get_power_info does not return a valid 'current_socket_power'. "
            "Please use `get_average_power_usage` instead."
        )
    # returns in W, convert to mW
    return int(amdsmi.amdsmi_get_power_info(self.handle)["current_socket_power"]) * 1000

get_average_memory_power_usage

get_average_memory_power_usage()

Return the average power usage of the GPU's memory. Units: mW.

Source code in zeus/device/gpu/amd.py
341
342
343
344
@_handle_amdsmi_errors
def get_average_memory_power_usage(self) -> int:
    """Return the average power usage of the GPU's memory. Units: mW."""
    raise gpu_common.ZeusGPUNotSupportedError("Average memory power usage is not supported on AMD GPUs.")

supports_get_total_energy_consumption

supports_get_total_energy_consumption()

Check if the GPU supports retrieving total energy consumption. Returns a future object of the result.

Source code in zeus/device/gpu/amd.py
346
347
348
349
@_handle_amdsmi_errors
def supports_get_total_energy_consumption(self) -> bool:
    """Check if the GPU supports retrieving total energy consumption. Returns a future object of the result."""
    return self._supports_get_total_energy_consumption

get_total_energy_consumption

get_total_energy_consumption()

Return the total energy consumption of the GPU since driver load. Units: mJ.

Source code in zeus/device/gpu/amd.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
@_handle_amdsmi_errors
def get_total_energy_consumption(self) -> int:
    """Return the total energy consumption of the GPU since driver load. Units: mJ."""
    if not self._supports_get_total_energy_consumption:
        raise gpu_common.ZeusGPUNotSupportedError(
            "Total energy consumption is not supported on this AMD GPU. "
            "This is because the result of `amdsmi.amdsmi_get_energy_count` is not accurate. "
            "Please use `get_average_power_usage` or `get_instant_power_usage` to calculate energy usage."
        )
    energy_dict = amdsmi.amdsmi_get_energy_count(self.handle)
    if "energy_accumulator" in energy_dict:  # Changed since amdsmi 6.2.1
        energy = energy_dict["energy_accumulator"] * energy_dict["counter_resolution"]
    else:
        # Old API: assume has key "power". If not, exception will be handled by _handle_amdsmi_errors.
        energy = energy_dict["power"] * energy_dict["counter_resolution"]

    return int(energy / 1e3)  # returns in micro Joules, convert to mili Joules

get_gpu_temperature

get_gpu_temperature()

Return the current GPU temperature. Units: Celsius.

We use the hotspot temperatue (as opposed to edge) as we believe it to be more representative of the GPU core's temperature under load.

Source code in zeus/device/gpu/amd.py
369
370
371
372
373
374
375
376
377
378
379
380
381
382
@_handle_amdsmi_errors
def get_gpu_temperature(self) -> int:
    """Return the current GPU temperature. Units: Celsius.

    We use the hotspot temperatue (as opposed to edge) as we believe it to be more representative
    of the GPU core's temperature under load.
    """
    # amdsmi_get_temp_metric returns millidegrees Celsius, convert to Celsius
    temp_millidegrees = amdsmi.amdsmi_get_temp_metric(
        self.handle,
        amdsmi.AmdSmiTemperatureType.HOTSPOT,
        amdsmi.AmdSmiTemperatureMetric.CURRENT,
    )
    return temp_millidegrees // 1000

AMDGPUs

Bases: GPUs

AMD GPU Manager object, containing individual AMDGPU objects, abstracting amdsmi calls and handling related exceptions.

Important

Currently only ROCm >= 6.2 is supported.

Index resolution

AMD systems simultaneously expose several index spaces for the same physical GPU, and they do not all agree:

  • HIP index — what the HIP runtime hands out. This is what PyTorch sees as cuda:N, what torch.cuda.current_device() returns, and what HIP_VISIBLE_DEVICES / CUDA_VISIBLE_DEVICES refer to.
  • amd-smi GPU index — the slot number amd-smi and rocm-smi use (ordered by PCI BDF). This is what appears in amd-smi monitor's GPU column, what amdsmi_get_processor_handles() is ordered by, and what node-local admin scripts like set_powercap.sh -gpu N expect.
  • OAM-ID — physical OAM tray position; not used by Zeus but shown by amd-smi monitor.

On most nodes these orderings happen to coincide, but on some (e.g., MI350X in SPX/NPS1), the HIP runtime enumerates GPUs in a different order than PCI BDF — so HIP index 0 may be amd-smi GPU 3, and so on. Mixing up the two spaces results in silently operating on the wrong physical GPU.

Zeus sits at the application layer, so all indices passed to Zeus are HIP indices (matching PyTorch). Internally, AMDGPU._get_handle uses amdsmi_get_gpu_enumeration_info(handle)["hip_id"] to translate each HIP index to the correct amdsmi processor handle before any query or set — same data source amd-smi monitor uses to populate its HIP-ID column.

HIP_VISIBLE_DEVICES / CUDA_VISIBLE_DEVICES

HIP_VISIBLE_DEVICES is respected exactly as HIP itself interprets it: a comma-separated list of HIP indices to expose, in order. The remaining GPUs are hidden; the exposed GPUs are re-numbered densely starting at 0 within the process (matching what PyTorch shows as cuda:0, cuda:1, …).

The index you pass to AMDGPUs / GPUs methods is this dense, post-masking index — the same one PyTorch uses. Example: on a 4-GPU node with HIP_VISIBLE_DEVICES=0,2, Zeus tracks two GPUs, and the call gpus.get_power_management_limit(1) hits the physical GPU whose HIP index (as seen by the driver before masking) was 2, i.e., PyTorch's cuda:1.

When HIP_VISIBLE_DEVICES is not set but CUDA_VISIBLE_DEVICES is, Zeus honors CUDA_VISIBLE_DEVICES as if it were HIP_VISIBLE_DEVICES (this mirrors how ROCm PyTorch builds behave).

Source code in zeus/device/gpu/amd.py
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
class AMDGPUs(gpu_common.GPUs):
    """AMD GPU Manager object, containing individual AMDGPU objects, abstracting amdsmi calls and handling related exceptions.

    !!! Important
        Currently only ROCm >= 6.2 is supported.

    ## Index resolution

    AMD systems simultaneously expose several index spaces for the same
    physical GPU, and they do not all agree:

    - **HIP index** — what the HIP runtime hands out. This is what PyTorch
      sees as `cuda:N`, what `torch.cuda.current_device()` returns, and what
      `HIP_VISIBLE_DEVICES` / `CUDA_VISIBLE_DEVICES` refer to.
    - **amd-smi GPU index** — the slot number `amd-smi` and `rocm-smi` use
      (ordered by PCI BDF). This is what appears in `amd-smi monitor`'s
      `GPU` column, what `amdsmi_get_processor_handles()` is ordered by, and
      what node-local admin scripts like `set_powercap.sh -gpu N` expect.
    - **OAM-ID** — physical OAM tray position; not used by Zeus but shown
      by `amd-smi monitor`.

    On most nodes these orderings happen to coincide, but on some (e.g.,
    MI350X in SPX/NPS1), the HIP runtime enumerates GPUs in a different
    order than PCI BDF — so HIP index 0 may be amd-smi GPU 3, and so on.
    Mixing up the two spaces results in silently operating on the wrong
    physical GPU.

    Zeus sits at the application layer, so **all indices passed to Zeus are
    HIP indices** (matching PyTorch). Internally, `AMDGPU._get_handle` uses
    `amdsmi_get_gpu_enumeration_info(handle)["hip_id"]` to translate each
    HIP index to the correct `amdsmi` processor handle before any query or
    set — same data source `amd-smi monitor` uses to populate its `HIP-ID`
    column.

    ## HIP_VISIBLE_DEVICES / CUDA_VISIBLE_DEVICES

    `HIP_VISIBLE_DEVICES` is respected exactly as HIP itself interprets it:
    a comma-separated list of HIP indices to expose, in order. The remaining
    GPUs are hidden; the exposed GPUs are re-numbered densely starting at 0
    within the process (matching what PyTorch shows as `cuda:0`, `cuda:1`, …).

    The index you pass to `AMDGPUs` / `GPUs` methods is this dense,
    post-masking index — the same one PyTorch uses. Example: on a 4-GPU
    node with `HIP_VISIBLE_DEVICES=0,2`, Zeus tracks two GPUs, and the
    call `gpus.get_power_management_limit(1)` hits the physical GPU whose
    HIP index (as seen by the driver before masking) was 2, i.e.,
    PyTorch's `cuda:1`.

    When `HIP_VISIBLE_DEVICES` is not set but `CUDA_VISIBLE_DEVICES` is,
    Zeus honors `CUDA_VISIBLE_DEVICES` as if it were `HIP_VISIBLE_DEVICES`
    (this mirrors how ROCm PyTorch builds behave).
    """

    def __init__(self, ensure_homogeneous: bool = False) -> None:
        """Initialize AMDSMI and sets up the GPUs.

        Args:
            ensure_homogeneous (bool): If True, ensures that all tracked GPUs have the same name.
        """
        try:
            amdsmi.amdsmi_init(amdsmi.AmdSmiInitFlags.INIT_AMD_GPUS)
            self._init_gpus()
            if ensure_homogeneous:
                self._ensure_homogeneous()
        except amdsmi.AmdSmiLibraryException as e:
            exception_class = AMDGPU._exception_map.get(e.get_error_code(), gpu_common.ZeusBaseGPUError)
            raise exception_class(e.get_error_info()) from e

    @property
    def gpus(self) -> Sequence[AMDGPU]:
        """Return a list of AMDGPU objects being tracked."""
        return self._gpus

    def _init_gpus(self) -> None:
        # Must respect `HIP_VISIBLE_DEVICES` (or `CUDA_VISIBLE_DEVICES`) if set
        if (visible_device := os.environ.get("HIP_VISIBLE_DEVICES")) is not None or (
            visible_device := os.environ.get("CUDA_VISIBLE_DEVICES")
        ) is not None:
            if not visible_device:
                raise gpu_common.ZeusGPUInitError(
                    "HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES is set but empty. "
                    "You can use either one for AMD GPUs, but it should either be unset "
                    "or a comma-separated list of GPU indices."
                )
            visible_indices = [int(idx) for idx in visible_device.split(",")]
        else:
            visible_indices = list(range(len(amdsmi.amdsmi_get_processor_handles())))

        # create the number of visible GPUs
        self._gpus = [AMDGPU(gpu_num) for gpu_num in visible_indices]

        # set _supports_instant_power_usage for all GPUs
        # amdsmi.amdsmi_get_power_info["current_socket_power"] returns "N/A" if not supported
        for gpu in self._gpus:
            gpu._supports_instant_power_usage = isinstance(
                amdsmi.amdsmi_get_power_info(gpu.handle)["current_socket_power"],
                int,
            )

        # set _supports_average_power_usage for all GPUs
        # amdsmi.amdsmi_get_power_info["average_socket_power"] returns "N/A" if not supported
        for gpu in self._gpus:
            gpu._supports_average_power_usage = isinstance(
                amdsmi.amdsmi_get_power_info(gpu.handle)["average_socket_power"],
                int,
            )

        # set _supports_get_total_energy_consumption for all GPUs
        wait_time = 0.5  # seconds
        # Try to get power for energy validation, fallback if needed
        powers = []
        for gpu in self._gpus:
            try:
                power = gpu.get_average_power_usage()
            except gpu_common.ZeusGPUNotSupportedError:
                try:
                    power = gpu.get_instant_power_usage()
                except gpu_common.ZeusGPUNotSupportedError:
                    # Neither average nor instant power available, use 0
                    power = 0
            powers.append(power)
        initial_energies = [gpu.get_total_energy_consumption() for gpu in self._gpus]
        time.sleep(wait_time)
        final_energies = [gpu.get_total_energy_consumption() for gpu in self._gpus]
        measured_energies = [final - initial for final, initial in zip(final_energies, initial_energies)]
        expected_energies = [power * wait_time for power in powers]  # energy = power * time

        for gpu, measured_energy, expected_energy in zip(self._gpus, measured_energies, expected_energies):
            # Check for MI250/MI250X dual-die GPUs and warn users about combined power reporting
            gpu_name = gpu.get_name()
            if "MI250" in gpu_name:
                if gpu.gpu_index % 2 == 1 and expected_energy < 0.001:
                    # This is an odd-indexed MI250/MI250X GPU (chiplet without power reporting)
                    gpu._is_dual_die_odd_chiplet = True
                    gpu._supports_get_total_energy_consumption = False
                    gpu._supports_instant_power_usage = False
                    gpu._supports_average_power_usage = False
                    logger.warning(
                        "GPU %d is a chiplet of a dual-die AMD Instinct MI250/MI250X GPU. "
                        "AMD's driver only reports power for GPU %d, which represents the COMBINED "
                        "power draw of BOTH chiplets (GPU %d and GPU %d). "
                        "Power and energy measurements are not available for this GPU individually.",
                        gpu.gpu_index,
                        gpu.gpu_index - 1,
                        gpu.gpu_index - 1,
                        gpu.gpu_index,
                    )
                elif gpu.gpu_index % 2 == 0 and expected_energy >= 0.001:
                    # This is an even-indexed MI250/MI250X GPU (reports combined power)
                    logger.warning(
                        "GPU %d is a dual-die AMD Instinct MI250/MI250X GPU. "
                        "Power and energy measurements for this GPU represent the COMBINED consumption "
                        "of BOTH chiplets (GPU %d and GPU %d). Workloads running on either chiplet "
                        "will be included in these measurements and cannot be separated.",
                        gpu.gpu_index,
                        gpu.gpu_index,
                        gpu.gpu_index + 1,
                    )

            # Check for zero or very small expected_energy to avoid division by zero
            if expected_energy < 0.001:
                # Skip if already handled as MI250 odd chiplet above
                if not gpu._is_dual_die_odd_chiplet:
                    # Generic case: GPU reports zero power (idle or unsupported)
                    gpu._supports_get_total_energy_consumption = False
                    logger.info(
                        "Disabling `get_total_energy_consumption` for device %d. "
                        "Power reading is zero or negligible (expected energy: %.3f mJ), "
                        "so energy counter validation cannot be performed. "
                        "You can still measure energy by polling either `get_instant_power_usage` or `get_average_power_usage` and integrating over time.",
                        gpu.gpu_index,
                        expected_energy,
                    )
            # Loose bound to rule out very obvious counter problems
            elif 0.1 < measured_energy / expected_energy < 10:
                gpu._supports_get_total_energy_consumption = True
            else:
                gpu._supports_get_total_energy_consumption = False
                logger.info(
                    "Disabling `get_total_energy_consumption` for device %d. The result of `amdsmi.amdsmi_get_energy_count` is not accurate. Expected energy: %d mJ, Measured energy: %d mJ. "
                    "This is a known issue with some AMD GPUs, please see https://github.com/ROCm/amdsmi/issues/38 for more information. "
                    "You can still measure energy by polling either `get_instant_power_usage` or `get_average_power_usage` and integrating over time.",
                    gpu.gpu_index,
                    expected_energy,
                    measured_energy,
                )

    def __del__(self) -> None:
        """Shut down AMDSMI."""
        with contextlib.suppress(amdsmi.AmdSmiException):
            amdsmi.amdsmi_shut_down()  # Ignore error on shutdown. Neccessary for proper cleanup and test functionality

gpus property

gpus

Return a list of AMDGPU objects being tracked.

__init__

__init__(ensure_homogeneous=False)

Parameters:

Name Type Description Default
ensure_homogeneous bool

If True, ensures that all tracked GPUs have the same name.

False
Source code in zeus/device/gpu/amd.py
438
439
440
441
442
443
444
445
446
447
448
449
450
451
def __init__(self, ensure_homogeneous: bool = False) -> None:
    """Initialize AMDSMI and sets up the GPUs.

    Args:
        ensure_homogeneous (bool): If True, ensures that all tracked GPUs have the same name.
    """
    try:
        amdsmi.amdsmi_init(amdsmi.AmdSmiInitFlags.INIT_AMD_GPUS)
        self._init_gpus()
        if ensure_homogeneous:
            self._ensure_homogeneous()
    except amdsmi.AmdSmiLibraryException as e:
        exception_class = AMDGPU._exception_map.get(e.get_error_code(), gpu_common.ZeusBaseGPUError)
        raise exception_class(e.get_error_info()) from e

__del__

__del__()

Shut down AMDSMI.

Source code in zeus/device/gpu/amd.py
572
573
574
575
def __del__(self) -> None:
    """Shut down AMDSMI."""
    with contextlib.suppress(amdsmi.AmdSmiException):
        amdsmi.amdsmi_shut_down()  # Ignore error on shutdown. Neccessary for proper cleanup and test functionality

amdsmi_is_available cached

amdsmi_is_available()

Check if amdsmi is available.

Source code in zeus/device/gpu/amd.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
@lru_cache(maxsize=1)
def amdsmi_is_available() -> bool:
    """Check if amdsmi is available."""
    try:
        # `amdsmi` prints to stdout on import when libamd_smi.so is not found.
        with contextlib.redirect_stdout(None):
            import amdsmi
    except ImportError:
        logger.info("amdsmi is not available.")
        return False
    # usually thrown if amdsmi can't find libamd_smi.so
    except OSError:
        if os.getenv("ROCM_PATH") is None:
            logger.warning("`ROCM_PATH` is not set. Do you have ROCm installed?")
        return False
    # usually thrown if versions of amdsmi and ROCm are incompatible.
    except AttributeError:
        logger.warning(
            "Failed to import amdsmi. Ensure amdsmi's version is at least as high as the current ROCm version."
        )
        return False
    except KeyError as e:
        logger.warning(
            "Failed to import amdsmi due to a key error on: [%s]. Ensure that amdsmi is installed on your system.",
            e,
        )
        return False
    try:
        amdsmi.amdsmi_init(amdsmi.AmdSmiInitFlags.INIT_AMD_GPUS)
        logger.info("amdsmi is available and initialized")
        return True
    except amdsmi.AmdSmiLibraryException as e:
        logger.info("amdsmi is available but could not initialize: %s", e)
        return False

_hip_to_amdsmi_handle cached

_hip_to_amdsmi_handle()

Map each HIP index to its amdsmi processor handle.

The mapping is invariant for the life of the process, so it's cached.

Source code in zeus/device/gpu/amd.py
76
77
78
79
80
81
82
@lru_cache(maxsize=1)
def _hip_to_amdsmi_handle() -> dict[int, c_void_p]:
    """Map each HIP index to its amdsmi processor handle.

    The mapping is invariant for the life of the process, so it's cached.
    """
    return {amdsmi.amdsmi_get_gpu_enumeration_info(h)["hip_id"]: h for h in amdsmi.amdsmi_get_processor_handles()}